From d840f36617fe3789bfe8c79a08d61dd8e4367a7c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 23 Jul 2024 01:31:41 +0300 Subject: [PATCH 001/289] [Xtensa] Implement lowering Mul/Div/Shift/ROT/CTTZ/CTLZ/CTPOP operations. Implement lowering of the Mul/Div operations and also shift parts operations. Implement lowering of the bit manipulations, like ROT/SWAP/CTPOP/CTTZ/CTLZ. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 252 ++++++- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 14 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 24 + .../Target/Xtensa/XtensaMachineFunctionInfo.h | 53 ++ llvm/lib/Target/Xtensa/XtensaOperators.td | 8 + .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 7 + llvm/lib/Target/Xtensa/XtensaTargetMachine.h | 4 + llvm/test/CodeGen/Xtensa/bswap.ll | 413 ++++++++++++ llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll | 531 +++++++++++++++ llvm/test/CodeGen/Xtensa/div.ll | 491 ++++++++++++++ llvm/test/CodeGen/Xtensa/mul.ll | 636 ++++++++++++++++++ llvm/test/CodeGen/Xtensa/rotl-rotr.ll | 500 ++++++++++++++ llvm/test/CodeGen/Xtensa/shift.ll | 72 ++ 13 files changed, 3001 insertions(+), 4 deletions(-) create mode 100644 llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h create mode 100644 llvm/test/CodeGen/Xtensa/bswap.ll create mode 100644 llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll create mode 100644 llvm/test/CodeGen/Xtensa/div.ll create mode 100644 llvm/test/CodeGen/Xtensa/mul.ll create mode 100644 llvm/test/CodeGen/Xtensa/rotl-rotr.ll create mode 100644 llvm/test/CodeGen/Xtensa/shift.ll diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 80d01d662a221..8c30dbbad821e 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -13,6 +13,7 @@ #include "XtensaISelLowering.h" #include "XtensaConstantPoolValue.h" +#include "XtensaMachineFunctionInfo.h" #include "XtensaSubtarget.h" #include "XtensaTargetMachine.h" #include "llvm/CodeGen/CallingConvLower.h" @@ -21,6 +22,7 @@ #include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/IR/GlobalVariable.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" @@ -98,6 +100,32 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setCondCodeAction(ISD::SETUGT, MVT::i32, Expand); setCondCodeAction(ISD::SETULE, MVT::i32, Expand); + setOperationAction(ISD::MUL, MVT::i32, Custom); + setOperationAction(ISD::MULHU, MVT::i32, Expand); + setOperationAction(ISD::MULHS, MVT::i32, Expand); + setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); + setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); + + setOperationAction(ISD::SDIV, MVT::i32, Expand); + setOperationAction(ISD::UDIV, MVT::i32, Expand); + setOperationAction(ISD::SREM, MVT::i32, Expand); + setOperationAction(ISD::UREM, MVT::i32, Expand); + setOperationAction(ISD::SDIVREM, MVT::i32, Expand); + setOperationAction(ISD::UDIVREM, MVT::i32, Expand); + + setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); + setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); + setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); + + setOperationAction(ISD::BSWAP, MVT::i32, Expand); + setOperationAction(ISD::ROTL, MVT::i32, Expand); + setOperationAction(ISD::ROTR, MVT::i32, Expand); + setOperationAction(ISD::CTPOP, MVT::i32, Expand); + setOperationAction(ISD::CTTZ, MVT::i32, Expand); + setOperationAction(ISD::CTLZ, MVT::i32, Expand); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); + // Implement custom stack allocations setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); // Implement custom stack save and restore @@ -665,12 +693,30 @@ SDValue XtensaTargetLowering::getAddrPCRel(SDValue Op, SDValue XtensaTargetLowering::LowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const { EVT PtrVT = getPointerTy(DAG.getDataLayout()); + auto C = const_cast(CP->getConstVal()); + auto T = const_cast(CP->getType()); SDValue Result; - if (!CP->isMachineConstantPoolEntry()) { - Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(), - CP->getOffset()); + + // Do not use constant pool for aggregate or vector constant types, + // in such cases create global variable, for example to store tabel + // when we lower CTTZ operation. + if (T->isAggregateType() || T->isVectorTy()) { + auto AFI = DAG.getMachineFunction().getInfo(); + auto M = const_cast( + DAG.getMachineFunction().getFunction().getParent()); + auto GV = new GlobalVariable( + *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, + Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + + Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + + Twine(AFI->createLabelUId())); + Result = DAG.getTargetConstantPool(GV, PtrVT, Align(4)); } else { - report_fatal_error("This constantpool type is not supported yet"); + if (!CP->isMachineConstantPoolEntry()) { + Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, + CP->getAlign(), CP->getOffset()); + } else { + report_fatal_error("This constantpool type is not supported yet"); + } } return getAddrPCRel(Result, DAG); @@ -713,6 +759,131 @@ SDValue XtensaTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, return DAG.getMergeValues(Ops, DL); } +SDValue XtensaTargetLowering::LowerShiftLeftParts(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + MVT VT = MVT::i32; + SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1); + SDValue Shamt = Op.getOperand(2); + + // if Shamt - register size < 0: // Shamt < register size + // Lo = Lo << Shamt + // Hi = (Hi << Shamt) | (Lo >>u (register size - Shamt)) + // else: + // Lo = 0 + // Hi = Lo << (Shamt - register size) + + SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT); + SDValue ShamtMinusRegisterSize = + DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize); + + SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); + + SDValue HiTrue = DAG.getNode(XtensaISD::SRCL, DL, VT, Hi, Lo, Shamt); + + SDValue Zero = DAG.getConstant(0, DL, VT); + + SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusRegisterSize); + + SDValue Cond = DAG.getSetCC(DL, VT, ShamtMinusRegisterSize, Zero, ISD::SETLT); + + Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, LoTrue, Zero); + + Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, HiTrue, HiFalse); + + return DAG.getMergeValues({Lo, Hi}, DL); +} + +SDValue XtensaTargetLowering::LowerShiftRightParts(SDValue Op, + SelectionDAG &DAG, + bool IsSRA) const { + SDLoc DL(Op); + SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1); + SDValue Shamt = Op.getOperand(2); + MVT VT = MVT::i32; + + // SRA expansion: + // if Shamt - register size < 0: // Shamt < register size + // Lo = (Lo >>u Shamt) | (Hi << u (register size - Shamt)) + // Hi = Hi >>s Shamt + // else: + // Lo = Hi >>s (Shamt - register size); + // Hi = Hi >>s (register size - 1) + // + // SRL expansion: + // if Shamt - register size < 0: // Shamt < register size + // Lo = (Lo >>u Shamt) | (Hi << u (register size - Shamt)) + // Hi = Hi >>u Shamt + // else: + // Lo = Hi >>u (Shamt - register size); + // Hi = 0; + + unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; + + SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT); + SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT); + SDValue ShamtMinusRegisterSize = + DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize); + + SDValue LoTrue = DAG.getNode(XtensaISD::SRCR, DL, VT, Hi, Lo, Shamt); + + SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); + + SDValue Zero = DAG.getConstant(0, DL, VT); + + SDValue LoFalse = + DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusRegisterSize); + + SDValue HiFalse; + + if (IsSRA) { + HiFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, RegisterSizeMinus1); + } else { + HiFalse = Zero; + } + + SDValue Cond = DAG.getSetCC(DL, VT, ShamtMinusRegisterSize, Zero, ISD::SETLT); + + Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, LoTrue, LoFalse); + + Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, HiTrue, HiFalse); + + SDValue Ops[2] = {Lo, Hi}; + return DAG.getMergeValues(Ops, DL); +} + +SDValue XtensaTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { + EVT VT = Op->getValueType(0); + SDLoc DL(Op); + + if (VT != MVT::i32) + return SDValue(); + + ConstantSDNode *C = dyn_cast(Op->getOperand(1)); + if (!C) + return SDValue(); + + int64_t MulAmt = C->getSExtValue(); + unsigned ShiftAmt = 0; + + switch (MulAmt) { + case 2: + ShiftAmt = 1; + break; + case 4: + ShiftAmt = 2; + break; + case 8: + ShiftAmt = 3; + break; + default: + return SDValue(); + } + + return DAG.getNode(ISD::SHL, DL, VT, Op->getOperand(0), + DAG.getConstant(ShiftAmt, DL, VT)); +} + SDValue XtensaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { @@ -728,6 +899,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerJumpTable(Op, DAG); case ISD::ConstantPool: return LowerConstantPool(cast(Op), DAG); + case ISD::MUL: + return LowerMUL(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::STACKSAVE: @@ -736,6 +909,12 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerSTACKRESTORE(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); + case ISD::SHL_PARTS: + return LowerShiftLeftParts(Op, DAG); + case ISD::SRA_PARTS: + return LowerShiftRightParts(Op, DAG, true); + case ISD::SRL_PARTS: + return LowerShiftRightParts(Op, DAG, false); default: report_fatal_error("Unexpected node to lower"); } @@ -753,6 +932,10 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::RET"; case XtensaISD::SELECT_CC: return "XtensaISD::SELECT_CC"; + case XtensaISD::SRCL: + return "XtensaISD::SRCL"; + case XtensaISD::SRCR: + return "XtensaISD::SRCR"; } return nullptr; } @@ -827,9 +1010,70 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI, MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *MBB) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + switch (MI.getOpcode()) { case Xtensa::SELECT: return emitSelectCC(MI, MBB); + case Xtensa::SHL_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &S = MI.getOperand(1); + MachineOperand &SA = MI.getOperand(2); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSL)).addReg(SA.getReg()); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SLL), R.getReg()).addReg(S.getReg()); + MI.eraseFromParent(); + return MBB; + } + case Xtensa::SRA_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &T = MI.getOperand(1); + MachineOperand &SA = MI.getOperand(2); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSR)).addReg(SA.getReg()); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRA), R.getReg()).addReg(T.getReg()); + MI.eraseFromParent(); + return MBB; + } + case Xtensa::SRL_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &T = MI.getOperand(1); + MachineOperand &SA = MI.getOperand(2); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSR)).addReg(SA.getReg()); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRL), R.getReg()).addReg(T.getReg()); + MI.eraseFromParent(); + return MBB; + } + case Xtensa::SRCL_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &HI = MI.getOperand(1); + MachineOperand &LO = MI.getOperand(2); + MachineOperand &SA = MI.getOperand(3); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSL)).addReg(SA.getReg()); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRC), R.getReg()) + .addReg(HI.getReg()) + .addReg(LO.getReg()); + ; + MI.eraseFromParent(); + return MBB; + } + case Xtensa::SRCR_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &HI = MI.getOperand(1); + MachineOperand &LO = MI.getOperand(2); + MachineOperand &SA = MI.getOperand(3); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSR)).addReg(SA.getReg()); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRC), R.getReg()) + .addReg(HI.getReg()) + .addReg(LO.getReg()); + ; + MI.eraseFromParent(); + return MBB; + } default: llvm_unreachable("Unexpected instr type to insert"); } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index dd811ae9f3a77..b4c4929922cbf 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -40,6 +40,10 @@ enum { // the lhs and rhs (ops #0 and #1) of a conditional expression with the // condition code in op #4 SELECT_CC, + + // Shift + SRCL, + SRCR, }; } @@ -50,6 +54,10 @@ class XtensaTargetLowering : public TargetLowering { explicit XtensaTargetLowering(const TargetMachine &TM, const XtensaSubtarget &STI); + MVT getScalarShiftAmountTy(const DataLayout &, EVT LHSTy) const override { + return LHSTy.getSizeInBits() <= 32 ? MVT::i32 : MVT::i64; + } + EVT getSetCCResultType(const DataLayout &, LLVMContext &, EVT VT) const override { if (!VT.isVector()) @@ -103,6 +111,8 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const; + SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; @@ -111,6 +121,10 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; + SDValue getAddrPCRel(SDValue Op, SelectionDAG &DAG) const; CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index fc134e794153b..18a31fef18446 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -189,6 +189,30 @@ def SSAI : RRR_Inst<0x00, 0x00, 0x04, (outs), (ins uimm5:$imm), let t{0} = imm{4}; } +// Shift Pseudo instructions: +// SSL/SSR + Shift combination +let usesCustomInserter = 1 in { + def SHL_P : Pseudo<(outs AR:$r), (ins AR:$s, AR:$sa), + "# SHL_P $r, $s, $sa", + [(set i32:$r, (shl i32:$s, i32:$sa))]>; + + def SRA_P : Pseudo<(outs AR:$r), (ins AR:$t, AR:$sa), + "# SRA_P $r, $t, $sa", + [(set i32:$r, (sra i32:$t, i32:$sa))]>; + + def SRL_P : Pseudo<(outs AR:$r), (ins AR:$t, AR:$sa), + "# SRL_P $r, $t, $sa", + [(set i32:$r, (srl i32:$t, i32:$sa))]>; + + def SRCL_P : Pseudo<(outs AR:$r), (ins AR:$hi, AR:$lo, AR:$sa), + "# SRCL_P $r, $hi, $lo, $sa", + [(set i32:$r, (Xtensa_srcl i32:$hi, i32:$lo, i32:$sa))]>; + + def SRCR_P : Pseudo<(outs AR:$r), (ins AR:$hi, AR:$lo, AR:$sa), + "# SRCR_P $r, $hi, $lo, $sa", + [(set i32:$r, (Xtensa_srcr i32:$hi, i32:$lo, i32:$sa))]>; +} + //===----------------------------------------------------------------------===// // Load and store instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h new file mode 100644 index 0000000000000..86ee81128c34c --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h @@ -0,0 +1,53 @@ +//==- XtensaMachineFunctionInfo.h - Xtensa machine function info --*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares Xtensa-specific per-machine-function information. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H +#define LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H + +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + +class XtensaFunctionInfo : public MachineFunctionInfo { + unsigned VarArgsFirstGPR; + int VarArgsStackOffset; + unsigned VarArgsFrameIndex; + bool SaveFrameRegister = false; + unsigned LabelUId = 0; + +public: + explicit XtensaFunctionInfo(const Function &F, const TargetSubtargetInfo *STI) + : VarArgsFirstGPR(0), VarArgsStackOffset(0), VarArgsFrameIndex(0) {} + + unsigned getVarArgsFirstGPR() const { return VarArgsFirstGPR; } + void setVarArgsFirstGPR(unsigned GPR) { VarArgsFirstGPR = GPR; } + + int getVarArgsStackOffset() const { return VarArgsStackOffset; } + void setVarArgsStackOffset(int Offset) { VarArgsStackOffset = Offset; } + + // Get and set the frame index of the first stack vararg. + unsigned getVarArgsFrameIndex() const { return VarArgsFrameIndex; } + void setVarArgsFrameIndex(unsigned FI) { VarArgsFrameIndex = FI; } + + bool isSaveFrameRegister() const { return SaveFrameRegister; } + void setSaveFrameRegister() { SaveFrameRegister = true; } + + unsigned createLabelUId() { return LabelUId++; } +}; + +} // namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index 93cd1c933dbde..c825359f3c5dd 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -24,6 +24,10 @@ def SDT_XtensaSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<2, 3>, SDTCisVT<5, i32>]>; + +def SDT_XtensaSRC : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, + SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; + //===----------------------------------------------------------------------===// // Node definitions //===----------------------------------------------------------------------===// @@ -46,3 +50,7 @@ def Xtensa_brjt: SDNode<"XtensaISD::BR_JT", SDT_XtensaBrJT, [SDNPHasChain]>; def Xtensa_select_cc: SDNode<"XtensaISD::SELECT_CC", SDT_XtensaSelectCC, [SDNPInGlue]>; + +def Xtensa_srcl: SDNode<"XtensaISD::SRCL", SDT_XtensaSRC>; + +def Xtensa_srcr: SDNode<"XtensaISD::SRCR", SDT_XtensaSRC>; diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 49c7faf84df1d..eba169a2fe7a9 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -14,6 +14,7 @@ #include "XtensaTargetMachine.h" #include "TargetInfo/XtensaTargetInfo.h" +#include "XtensaMachineFunctionInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" #include "llvm/CodeGen/TargetPassConfig.h" @@ -83,6 +84,12 @@ XtensaTargetMachine::getSubtargetImpl(const Function &F) const { return I.get(); } +MachineFunctionInfo *XtensaTargetMachine::createMachineFunctionInfo( + BumpPtrAllocator &Allocator, const Function &F, + const TargetSubtargetInfo *STI) const { + return XtensaFunctionInfo::create(Allocator, F, STI); +} + namespace { /// Xtensa Code Generator Pass Configuration Options. class XtensaPassConfig : public TargetPassConfig { diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.h b/llvm/lib/Target/Xtensa/XtensaTargetMachine.h index f371f22ed3d0e..6975076b5d699 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.h +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.h @@ -45,6 +45,10 @@ class XtensaTargetMachine : public LLVMTargetMachine { return TLOF.get(); } + MachineFunctionInfo * + createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, + const TargetSubtargetInfo *STI) const override; + protected: mutable StringMap> SubtargetMap; }; diff --git a/llvm/test/CodeGen/Xtensa/bswap.ll b/llvm/test/CodeGen/Xtensa/bswap.ll new file mode 100644 index 0000000000000..e4458c7cf81c3 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/bswap.ll @@ -0,0 +1,413 @@ +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +declare i16 @llvm.bswap.i16(i16) +declare i32 @llvm.bswap.i32(i32) +declare i64 @llvm.bswap.i64(i64) +declare i8 @llvm.bitreverse.i8(i8) +declare i16 @llvm.bitreverse.i16(i16) +declare i32 @llvm.bitreverse.i32(i32) +declare i64 @llvm.bitreverse.i64(i64) + +define i16 @test_bswap_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_bswap_i16: +; XTENSA: l32r a8, .LCPI0_0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a8, a8, 8 +; XTENSA-NEXT: slli a9, a2, 8 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.bswap.i16(i16 %a) + ret i16 %tmp +} + +define i32 @test_bswap_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_bswap_i32: +; XTENSA: movi a8, 24 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: srli a9, a2, 8 +; XTENSA-NEXT: l32r a10, .LCPI1_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: and a9, a2, a10 +; XTENSA-NEXT: slli a9, a9, 8 +; XTENSA-NEXT: slli a10, a2, 24 +; XTENSA-NEXT: or a9, a10, a9 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.bswap.i32(i32 %a) + ret i32 %tmp +} + +define i64 @test_bswap_i64(i64 %a) nounwind { +; XTENSA-LABEL: test_bswap_i64: +; XTENSA: movi a9, 24 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a8, a3 +; XTENSA-NEXT: srli a10, a3, 8 +; XTENSA-NEXT: l32r a11, .LCPI2_0 +; XTENSA-NEXT: and a10, a10, a11 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: and a10, a3, a11 +; XTENSA-NEXT: slli a10, a10, 8 +; XTENSA-NEXT: slli a7, a3, 24 +; XTENSA-NEXT: or a10, a7, a10 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a2 +; XTENSA-NEXT: srli a10, a2, 8 +; XTENSA-NEXT: and a10, a10, a11 +; XTENSA-NEXT: or a9, a10, a9 +; XTENSA-NEXT: and a10, a2, a11 +; XTENSA-NEXT: slli a10, a10, 8 +; XTENSA-NEXT: slli a11, a2, 24 +; XTENSA-NEXT: or a10, a11, a10 +; XTENSA-NEXT: or a3, a10, a9 +; XTENSA-NEXT: or a2, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i64 @llvm.bswap.i64(i64 %a) + ret i64 %tmp +} + +define i8 @test_bitreverse_i8(i8 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_i8: +; XTENSA: movi a8, 15 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: slli a8, a8, 4 +; XTENSA-NEXT: movi a9, 240 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: srli a9, a9, 4 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: movi a10, 51 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: movi a10, 85 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i8 @llvm.bitreverse.i8(i8 %a) + ret i8 %tmp +} + +define i16 @test_bitreverse_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_i16: +; XTENSA: l32r a8, .LCPI4_0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a8, a8, 8 +; XTENSA-NEXT: slli a9, a2, 8 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: l32r a10, .LCPI4_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 4 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI4_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI4_3 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.bitreverse.i16(i16 %a) + ret i16 %tmp +} + +define i32 @test_bitreverse_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_i32: +; XTENSA: movi a8, 24 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: srli a9, a2, 8 +; XTENSA-NEXT: l32r a10, .LCPI5_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: and a9, a2, a10 +; XTENSA-NEXT: slli a9, a9, 8 +; XTENSA-NEXT: slli a10, a2, 24 +; XTENSA-NEXT: or a9, a10, a9 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: l32r a10, .LCPI5_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 4 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI5_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI5_3 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.bitreverse.i32(i32 %a) + ret i32 %tmp +} + +define i64 @test_bitreverse_i64(i64 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_i64: +; XTENSA: movi a10, 24 +; XTENSA-NEXT: ssr a10 +; XTENSA-NEXT: srl a8, a3 +; XTENSA-NEXT: srli a11, a3, 8 +; XTENSA-NEXT: l32r a9, .LCPI6_0 +; XTENSA-NEXT: and a11, a11, a9 +; XTENSA-NEXT: or a8, a11, a8 +; XTENSA-NEXT: and a11, a3, a9 +; XTENSA-NEXT: slli a11, a11, 8 +; XTENSA-NEXT: slli a7, a3, 24 +; XTENSA-NEXT: or a11, a7, a11 +; XTENSA-NEXT: or a8, a11, a8 +; XTENSA-NEXT: srli a7, a8, 4 +; XTENSA-NEXT: l32r a11, .LCPI6_1 +; XTENSA-NEXT: and a7, a7, a11 +; XTENSA-NEXT: and a8, a8, a11 +; XTENSA-NEXT: slli a8, a8, 4 +; XTENSA-NEXT: or a8, a7, a8 +; XTENSA-NEXT: srli a7, a8, 2 +; XTENSA-NEXT: l32r a6, .LCPI6_2 +; XTENSA-NEXT: and a7, a7, a6 +; XTENSA-NEXT: and a8, a8, a6 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a7, a8 +; XTENSA-NEXT: srli a7, a8, 1 +; XTENSA-NEXT: l32r a5, .LCPI6_3 +; XTENSA-NEXT: and a7, a7, a5 +; XTENSA-NEXT: and a8, a8, a5 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a8, a7, a8 +; XTENSA-NEXT: ssr a10 +; XTENSA-NEXT: srl a10, a2 +; XTENSA-NEXT: srli a7, a2, 8 +; XTENSA-NEXT: and a7, a7, a9 +; XTENSA-NEXT: or a10, a7, a10 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 8 +; XTENSA-NEXT: slli a7, a2, 24 +; XTENSA-NEXT: or a9, a7, a9 +; XTENSA-NEXT: or a9, a9, a10 +; XTENSA-NEXT: srli a10, a9, 4 +; XTENSA-NEXT: and a10, a10, a11 +; XTENSA-NEXT: and a9, a9, a11 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a9, a10, a9 +; XTENSA-NEXT: srli a10, a9, 2 +; XTENSA-NEXT: and a10, a10, a6 +; XTENSA-NEXT: and a9, a9, a6 +; XTENSA-NEXT: slli a9, a9, 2 +; XTENSA-NEXT: or a9, a10, a9 +; XTENSA-NEXT: srli a10, a9, 1 +; XTENSA-NEXT: and a10, a10, a5 +; XTENSA-NEXT: and a9, a9, a5 +; XTENSA-NEXT: slli a9, a9, 1 +; XTENSA-NEXT: or a3, a10, a9 +; XTENSA-NEXT: or a2, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i64 @llvm.bitreverse.i64(i64 %a) + ret i64 %tmp +} + +define i16 @test_bswap_bitreverse_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_bswap_bitreverse_i16: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI7_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI7_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI7_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.bswap.i16(i16 %a) + %tmp2 = call i16 @llvm.bitreverse.i16(i16 %tmp) + ret i16 %tmp2 +} + +define i32 @test_bswap_bitreverse_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_bswap_bitreverse_i32: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI8_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI8_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI8_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.bswap.i32(i32 %a) + %tmp2 = call i32 @llvm.bitreverse.i32(i32 %tmp) + ret i32 %tmp2 +} + +define i64 @test_bswap_bitreverse_i64(i64 %a) nounwind { +; XTENSA-LABEL: test_bswap_bitreverse_i64: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI9_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a10, a2, a9 +; XTENSA-NEXT: slli a10, a10, 4 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: srli a10, a8, 2 +; XTENSA-NEXT: l32r a11, .LCPI9_1 +; XTENSA-NEXT: and a10, a10, a11 +; XTENSA-NEXT: and a8, a8, a11 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a8, 1 +; XTENSA-NEXT: l32r a7, .LCPI9_2 +; XTENSA-NEXT: and a10, a10, a7 +; XTENSA-NEXT: and a8, a8, a7 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a10, a8 +; XTENSA-NEXT: srli a8, a3, 4 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a3, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: and a9, a9, a11 +; XTENSA-NEXT: and a8, a8, a11 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: and a9, a9, a7 +; XTENSA-NEXT: and a8, a8, a7 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a3, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i64 @llvm.bswap.i64(i64 %a) + %tmp2 = call i64 @llvm.bitreverse.i64(i64 %tmp) + ret i64 %tmp2 +} + +define i16 @test_bitreverse_bswap_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_bswap_i16: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI10_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI10_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI10_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.bitreverse.i16(i16 %a) + %tmp2 = call i16 @llvm.bswap.i16(i16 %tmp) + ret i16 %tmp2 +} + +define i32 @test_bitreverse_bswap_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_bswap_i32: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI11_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: l32r a10, .LCPI11_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI11_2 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.bitreverse.i32(i32 %a) + %tmp2 = call i32 @llvm.bswap.i32(i32 %tmp) + ret i32 %tmp2 +} + +define i64 @test_bitreverse_bswap_i64(i64 %a) nounwind { +; XTENSA-LABEL: test_bitreverse_bswap_i64: +; XTENSA: srli a8, a2, 4 +; XTENSA-NEXT: l32r a9, .LCPI12_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a10, a2, a9 +; XTENSA-NEXT: slli a10, a10, 4 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: srli a10, a8, 2 +; XTENSA-NEXT: l32r a11, .LCPI12_1 +; XTENSA-NEXT: and a10, a10, a11 +; XTENSA-NEXT: and a8, a8, a11 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a8, 1 +; XTENSA-NEXT: l32r a7, .LCPI12_2 +; XTENSA-NEXT: and a10, a10, a7 +; XTENSA-NEXT: and a8, a8, a7 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a2, a10, a8 +; XTENSA-NEXT: srli a8, a3, 4 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: and a9, a3, a9 +; XTENSA-NEXT: slli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: and a9, a9, a11 +; XTENSA-NEXT: and a8, a8, a11 +; XTENSA-NEXT: slli a8, a8, 2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: and a9, a9, a7 +; XTENSA-NEXT: and a8, a8, a7 +; XTENSA-NEXT: slli a8, a8, 1 +; XTENSA-NEXT: or a3, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i64 @llvm.bitreverse.i64(i64 %a) + %tmp2 = call i64 @llvm.bswap.i64(i64 %tmp) + ret i64 %tmp2 +} diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll new file mode 100644 index 0000000000000..030f2a0fbfdc7 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll @@ -0,0 +1,531 @@ +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +declare i8 @llvm.cttz.i8(i8, i1) +declare i16 @llvm.cttz.i16(i16, i1) +declare i32 @llvm.cttz.i32(i32, i1) +declare i64 @llvm.cttz.i64(i64, i1) +declare i8 @llvm.ctlz.i8(i8, i1) +declare i16 @llvm.ctlz.i16(i16, i1) +declare i32 @llvm.ctlz.i32(i32, i1) +declare i64 @llvm.ctlz.i64(i64, i1) +declare i8 @llvm.ctpop.i8(i8) +declare i16 @llvm.ctpop.i16(i16) +declare i32 @llvm.ctpop.i32(i32) +declare i64 @llvm.ctpop.i64(i64) + +define i8 @test_cttz_i8(i8 %a) nounwind { +; XTENSA-LABEL: test_cttz_i8: +; XTENSA: movi a8, 255 +; XTENSA-NEXT: and a9, a2, a8 +; XTENSA-NEXT: movi a8, 8 +; XTENSA-NEXT: beqz a9, .LBB0_2 +; XTENSA-NEXT: j .LBB0_1 +; XTENSA-NEXT: .LBB0_1: # %cond.false +; XTENSA-NEXT: movi a8, -1 +; XTENSA-NEXT: xor a8, a2, a8 +; XTENSA-NEXT: addi a9, a2, -1 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: movi a10, 85 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: movi a9, 51 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: .LBB0_2: # %cond.end +; XTENSA-NEXT: or a2, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false) + ret i8 %tmp +} + +define i16 @test_cttz_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_cttz_i16: +; XTENSA: l32r a8, .LCPI1_0 +; XTENSA-NEXT: and a9, a2, a8 +; XTENSA-NEXT: movi a8, 16 +; XTENSA-NEXT: beqz a9, .LBB1_2 +; XTENSA-NEXT: j .LBB1_1 +; XTENSA-NEXT: .LBB1_1: # %cond.false +; XTENSA-NEXT: movi a8, -1 +; XTENSA-NEXT: xor a8, a2, a8 +; XTENSA-NEXT: addi a9, a2, -1 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI1_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI1_2 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: l32r a10, .LCPI1_3 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: srli a8, a8, 8 +; XTENSA-NEXT: add a8, a9, a8 +; XTENSA-NEXT: .LBB1_2: # %cond.end +; XTENSA-NEXT: or a2, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false) + ret i16 %tmp +} + +define i32 @test_cttz_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_cttz_i32: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a8, a2, a2 +; XTENSA-NEXT: movi a2, 32 +; XTENSA-NEXT: beqz a8, .LBB2_2 +; XTENSA-NEXT: j .LBB2_1 +; XTENSA-NEXT: .LBB2_1: # %cond.false +; XTENSA-NEXT: neg a9, a8 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: l32r a3, .LCPI2_0 +; XTENSA-NEXT: l32r a8, .LCPI2_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: movi a8, 27 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: l32r a9, .LCPI2_2 +; XTENSA-NEXT: add a8, a9, a8 +; XTENSA-NEXT: l8ui a2, a8, 0 +; XTENSA-NEXT: .LBB2_2: # %cond.end +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) + ret i32 %tmp +} + +define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind { +; XTENSA-LABEL: test_cttz_i8_zero_undef: +; XTENSA: movi a8, -1 +; XTENSA-NEXT: xor a8, a2, a8 +; XTENSA-NEXT: addi a9, a2, -1 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: movi a10, 85 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: movi a9, 51 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: ret + %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true) + ret i8 %tmp +} + +define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind { +; XTENSA-LABEL: test_cttz_i16_zero_undef: +; XTENSA: movi a8, -1 +; XTENSA-NEXT: xor a8, a2, a8 +; XTENSA-NEXT: addi a9, a2, -1 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI4_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI4_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: l32r a10, .LCPI4_2 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: srli a8, a8, 8 +; XTENSA-NEXT: add a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true) + ret i16 %tmp +} + +define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { +; XTENSA-LABEL: test_cttz_i32_zero_undef: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: neg a8, a2 +; XTENSA-NEXT: and a2, a2, a8 +; XTENSA-NEXT: l32r a3, .LCPI5_0 +; XTENSA-NEXT: l32r a8, .LCPI5_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: movi a8, 27 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: l32r a9, .LCPI5_2 +; XTENSA-NEXT: add a8, a9, a8 +; XTENSA-NEXT: l8ui a2, a8, 0 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true) + ret i32 %tmp +} + +define i8 @test_ctlz_i8(i8 %a) nounwind { +; XTENSA-LABEL: test_ctlz_i8: +; XTENSA: movi a8, 255 +; XTENSA-NEXT: and a9, a2, a8 +; XTENSA-NEXT: movi a8, 8 +; XTENSA-NEXT: beqz a9, .LBB6_2 +; XTENSA-NEXT: j .LBB6_1 +; XTENSA-NEXT: .LBB6_1: # %cond.false +; XTENSA-NEXT: movi a8, 254 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a8, a8, 1 +; XTENSA-NEXT: or a8, a2, a8 +; XTENSA-NEXT: movi a9, 252 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: srli a9, a9, 2 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, 240 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: srli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, -1 +; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: movi a10, 85 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: movi a9, 51 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: .LBB6_2: # %cond.end +; XTENSA-NEXT: or a2, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i8 @llvm.ctlz.i8(i8 %a, i1 false) + ret i8 %tmp +} + +define i16 @test_ctlz_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_ctlz_i16: +; XTENSA: l32r a8, .LCPI7_0 +; XTENSA-NEXT: and a9, a2, a8 +; XTENSA-NEXT: movi a8, 16 +; XTENSA-NEXT: beqz a9, .LBB7_2 +; XTENSA-NEXT: j .LBB7_1 +; XTENSA-NEXT: .LBB7_1: # %cond.false +; XTENSA-NEXT: l32r a8, .LCPI7_1 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a8, a8, 1 +; XTENSA-NEXT: or a8, a2, a8 +; XTENSA-NEXT: l32r a9, .LCPI7_2 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: srli a9, a9, 2 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI7_3 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: srli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI7_4 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: srli a9, a9, 8 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, -1 +; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI7_5 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI7_6 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: l32r a10, .LCPI7_7 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: srli a8, a8, 8 +; XTENSA-NEXT: add a8, a9, a8 +; XTENSA-NEXT: .LBB7_2: # %cond.end +; XTENSA-NEXT: or a2, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.ctlz.i16(i16 %a, i1 false) + ret i16 %tmp +} + +define i32 @test_ctlz_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_ctlz_i32: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a8, a2, a2 +; XTENSA-NEXT: movi a2, 32 +; XTENSA-NEXT: beqz a8, .LBB8_2 +; XTENSA-NEXT: j .LBB8_1 +; XTENSA-NEXT: .LBB8_1: # %cond.false +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, 16 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a8 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, -1 +; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI8_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI8_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI8_2 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: l32r a3, .LCPI8_3 +; XTENSA-NEXT: l32r a8, .LCPI8_4 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: movi a8, 24 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a2, a2 +; XTENSA-NEXT: .LBB8_2: # %cond.end +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) + ret i32 %tmp +} + +define i8 @test_ctlz_i8_zero_undef(i8 %a) nounwind { +; XTENSA-LABEL: test_ctlz_i8_zero_undef: +; XTENSA: movi a8, 254 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a8, a8, 1 +; XTENSA-NEXT: or a8, a2, a8 +; XTENSA-NEXT: movi a9, 252 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: srli a9, a9, 2 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, 240 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: srli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, -1 +; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: movi a10, 85 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: movi a9, 51 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: ret + %tmp = call i8 @llvm.ctlz.i8(i8 %a, i1 true) + ret i8 %tmp +} + +define i16 @test_ctlz_i16_zero_undef(i16 %a) nounwind { +; XTENSA-LABEL: test_ctlz_i16_zero_undef: +; XTENSA: l32r a8, .LCPI10_0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a8, a8, 1 +; XTENSA-NEXT: or a8, a2, a8 +; XTENSA-NEXT: l32r a9, .LCPI10_1 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: srli a9, a9, 2 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI10_2 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: srli a9, a9, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI10_3 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: srli a9, a9, 8 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, -1 +; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI10_4 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI10_5 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: l32r a10, .LCPI10_6 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: srli a8, a8, 8 +; XTENSA-NEXT: add a2, a9, a8 +; XTENSA-NEXT: ret + %tmp = call i16 @llvm.ctlz.i16(i16 %a, i1 true) + ret i16 %tmp +} + +define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { +; XTENSA-LABEL: test_ctlz_i32_zero_undef: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: srli a8, a2, 1 +; XTENSA-NEXT: or a8, a2, a8 +; XTENSA-NEXT: srli a9, a8, 2 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, 16 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a8 +; XTENSA-NEXT: or a8, a8, a9 +; XTENSA-NEXT: movi a9, -1 +; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI11_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI11_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI11_2 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: l32r a3, .LCPI11_3 +; XTENSA-NEXT: l32r a8, .LCPI11_4 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: movi a8, 24 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a2, a2 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 true) + ret i32 %tmp +} + +define i8 @test_ctpop_i8(i8 %a) nounwind { +; XTENSA-LABEL: test_ctpop_i8: +; XTENSA: srli a8, a2, 1 +; XTENSA-NEXT: movi a9, 85 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: sub a8, a2, a8 +; XTENSA-NEXT: movi a9, 51 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: ret + %1 = call i8 @llvm.ctpop.i8(i8 %a) + ret i8 %1 +} + +define i16 @test_ctpop_i16(i16 %a) nounwind { +; XTENSA-LABEL: test_ctpop_i16: +; XTENSA: srli a8, a2, 1 +; XTENSA-NEXT: l32r a9, .LCPI13_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: sub a8, a2, a8 +; XTENSA-NEXT: l32r a9, .LCPI13_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a9, a8, a9 +; XTENSA-NEXT: l32r a10, .LCPI13_2 +; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: srli a8, a8, 8 +; XTENSA-NEXT: add a2, a9, a8 +; XTENSA-NEXT: ret + %1 = call i16 @llvm.ctpop.i16(i16 %a) + ret i16 %1 +} + +define i32 @test_ctpop_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_ctpop_i32: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: srli a8, a2, 1 +; XTENSA-NEXT: l32r a9, .LCPI14_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: sub a8, a2, a8 +; XTENSA-NEXT: l32r a9, .LCPI14_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI14_2 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: l32r a3, .LCPI14_3 +; XTENSA-NEXT: l32r a8, .LCPI14_4 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: movi a8, 24 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a2, a2 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = call i32 @llvm.ctpop.i32(i32 %a) + ret i32 %1 +} diff --git a/llvm/test/CodeGen/Xtensa/div.ll b/llvm/test/CodeGen/Xtensa/div.ll new file mode 100644 index 0000000000000..fcb58eb5bff53 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/div.ll @@ -0,0 +1,491 @@ +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +define i32 @udiv(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: udiv: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i32 %a, %b + ret i32 %1 +} + +define i32 @udiv_constant(i32 %a) nounwind { +; XTENSA-LABEL: udiv_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i32 %a, 5 + ret i32 %1 +} + +define i32 @udiv_pow2(i32 %a) nounwind { +; XTENSA-LABEL: udiv_pow2: +; XTENSA: srli a2, a2, 3 +; XTENSA-NEXT: ret + %1 = udiv i32 %a, 8 + ret i32 %1 +} + +define i32 @udiv_constant_lhs(i32 %a) nounwind { +; XTENSA-LABEL: udiv_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a3, a2, a2 +; XTENSA-NEXT: movi a2, 10 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i32 10, %a + ret i32 %1 +} + +define i64 @udiv64(i64 %a, i64 %b) nounwind { +; XTENSA-LABEL: udiv64: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i64 %a, %b + ret i64 %1 +} + +define i64 @udiv64_constant(i64 %a) nounwind { +; XTENSA-LABEL: udiv64_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 5 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i64 %a, 5 + ret i64 %1 +} + +define i64 @udiv64_constant_lhs(i64 %a) nounwind { +; XTENSA-LABEL: udiv64_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: or a4, a2, a2 +; XTENSA-NEXT: movi a2, 10 +; XTENSA-NEXT: movi a3, 0 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i64 10, %a + ret i64 %1 +} + +define i8 @udiv8(i8 %a, i8 %b) nounwind { +; XTENSA-LABEL: udiv8: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a8, 255 +; XTENSA-NEXT: and a2, a2, a8 +; XTENSA-NEXT: and a3, a3, a8 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i8 %a, %b + ret i8 %1 +} + +define i8 @udiv8_constant(i8 %a) nounwind { +; XTENSA-LABEL: udiv8_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a8, 255 +; XTENSA-NEXT: and a2, a2, a8 +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i8 %a, 5 + ret i8 %1 +} + +define i8 @udiv8_pow2(i8 %a) nounwind { +; XTENSA-LABEL: udiv8_pow2: +; XTENSA: movi a8, 248 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a2, a8, 3 +; XTENSA-NEXT: ret + %1 = udiv i8 %a, 8 + ret i8 %1 +} + +define i8 @udiv8_constant_lhs(i8 %a) nounwind { +; XTENSA-LABEL: udiv8_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a8, 255 +; XTENSA-NEXT: and a3, a2, a8 +; XTENSA-NEXT: movi a2, 10 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i8 10, %a + ret i8 %1 +} + +define i16 @udiv16(i16 %a, i16 %b) nounwind { +; XTENSA-LABEL: udiv16: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: and a2, a2, a8 +; XTENSA-NEXT: and a3, a3, a8 +; XTENSA-NEXT: l32r a8, .LCPI11_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i16 %a, %b + ret i16 %1 +} + +define i16 @udiv16_constant(i16 %a) nounwind { +; XTENSA-LABEL: udiv16_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: and a2, a2, a8 +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI12_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = udiv i16 %a, 5 + ret i16 %1 +} + +define i16 @udiv16_pow2(i16 %a) nounwind { +; XTENSA-LABEL: udiv16_pow2: +; XTENSA: l32r a8, .LCPI13_0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a2, a8, 3 +; XTENSA-NEXT: ret + %1 = udiv i16 %a, 8 + ret i16 %1 +} + +define i32 @sdiv(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: sdiv: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i32 %a, %b + ret i32 %1 +} + +define i32 @sdiv_constant_lhs(i32 %a) nounwind { +; XTENSA-LABEL: sdiv_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a3, a2, a2 +; XTENSA-NEXT: movi a2, -10 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i32 -10, %a + ret i32 %1 +} + +define i64 @sdiv64(i64 %a, i64 %b) nounwind { +; XTENSA-LABEL: sdiv64: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i64 %a, %b + ret i64 %1 +} + +define i64 @sdiv64_constant(i64 %a) nounwind { +; XTENSA-LABEL: sdiv64_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 5 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i64 %a, 5 + ret i64 %1 +} + +define i64 @sdiv64_constant_lhs(i64 %a) nounwind { +; XTENSA-LABEL: sdiv64_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: or a4, a2, a2 +; XTENSA-NEXT: movi a2, 10 +; XTENSA-NEXT: movi a3, 0 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i64 10, %a + ret i64 %1 +} + + +define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: sdiv64_sext_operands: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a4, a3, a3 +; XTENSA-NEXT: srai a3, a2, 31 +; XTENSA-NEXT: srai a5, a4, 31 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sext i32 %a to i64 + %2 = sext i32 %b to i64 + %3 = sdiv i64 %1, %2 + ret i64 %3 +} + +define i8 @sdiv8(i8 %a, i8 %b) nounwind { +; XTENSA-LABEL: sdiv8: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a2, a8, 24 +; XTENSA-NEXT: slli a8, a3, 24 +; XTENSA-NEXT: srai a3, a8, 24 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i8 %a, %b + ret i8 %1 +} + +define i8 @sdiv8_constant(i8 %a) nounwind { +; XTENSA-LABEL: sdiv8_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a2, a8, 24 +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i8 %a, 5 + ret i8 %1 +} + +define i8 @sdiv8_pow2(i8 %a) nounwind { +; XTENSA-LABEL: sdiv8_pow2: +; XTENSA: slli a8, a2, 24 +; XTENSA-NEXT: srai a8, a8, 24 +; XTENSA-NEXT: srli a8, a8, 12 +; XTENSA-NEXT: movi a9, 7 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a2, a8 +; XTENSA-NEXT: slli a8, a8, 24 +; XTENSA-NEXT: srai a2, a8, 27 +; XTENSA-NEXT: ret + %1 = sdiv i8 %a, 8 + ret i8 %1 +} + +define i8 @sdiv8_constant_lhs(i8 %a) nounwind { +; XTENSA-LABEL: sdiv8_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 24 +; XTENSA-NEXT: srai a3, a8, 24 +; XTENSA-NEXT: movi a2, -10 +; XTENSA-NEXT: l32r a8, .LCPI23_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i8 -10, %a + ret i8 %1 +} + +define i16 @sdiv16(i16 %a, i16 %b) nounwind { +; XTENSA-LABEL: sdiv16: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a2, a8, 16 +; XTENSA-NEXT: slli a8, a3, 16 +; XTENSA-NEXT: srai a3, a8, 16 +; XTENSA-NEXT: l32r a8, .LCPI24_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i16 %a, %b + ret i16 %1 +} + +define i16 @sdiv16_constant(i16 %a) nounwind { +; XTENSA-LABEL: sdiv16_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a2, a8, 16 +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI25_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i16 %a, 5 + ret i16 %1 +} + +define i16 @sdiv16_constant_lhs(i16 %a) nounwind { +; XTENSA-LABEL: sdiv16_constant_lhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: slli a8, a2, 16 +; XTENSA-NEXT: srai a3, a8, 16 +; XTENSA-NEXT: movi a2, -10 +; XTENSA-NEXT: l32r a8, .LCPI26_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sdiv i16 -10, %a + ret i16 %1 +} + +define i32 @sdiv_pow2(i32 %a) nounwind { +; XTENSA-LABEL: sdiv_pow2: +; XTENSA: srai a8, a2, 31 +; XTENSA-NEXT: movi a9, 29 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a8, a8 +; XTENSA-NEXT: add a8, a2, a8 +; XTENSA-NEXT: srai a2, a8, 3 +; XTENSA-NEXT: ret + %1 = sdiv i32 %a, 8 + ret i32 %1 +} + +define i32 @sdiv_pow2_2(i32 %a) nounwind { +; XTENSA-LABEL: sdiv_pow2_2: +; XTENSA: srai a8, a2, 31 +; XTENSA-NEXT: movi a9, 16 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a8, a8 +; XTENSA-NEXT: add a8, a2, a8 +; XTENSA-NEXT: srai a2, a8, 16 +; XTENSA-NEXT: ret + %1 = sdiv i32 %a, 65536 + ret i32 %1 +} + +define i16 @sdiv16_pow2(i16 %a) nounwind { +; XTENSA-LABEL: sdiv16_pow2: +; XTENSA: slli a8, a2, 16 +; XTENSA-NEXT: srai a8, a8, 16 +; XTENSA-NEXT: movi a9, 28 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a8, a8 +; XTENSA-NEXT: movi a9, 7 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a2, a8 +; XTENSA-NEXT: slli a8, a8, 16 +; XTENSA-NEXT: srai a2, a8, 19 +; XTENSA-NEXT: ret + %1 = sdiv i16 %a, 8 + ret i16 %1 +} diff --git a/llvm/test/CodeGen/Xtensa/mul.ll b/llvm/test/CodeGen/Xtensa/mul.ll new file mode 100644 index 0000000000000..0be2885458163 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/mul.ll @@ -0,0 +1,636 @@ +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +define signext i32 @square(i32 %a) nounwind { +; XTENSA-LABEL: square: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: or a3, a2, a2 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, %a + ret i32 %1 +} + +define signext i32 @mul(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: mul: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, %b + ret i32 %1 +} + +define signext i32 @mul_constant(i32 %a) nounwind { +; XTENSA-LABEL: mul_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, 5 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 5 + ret i32 %1 +} + +define i32 @mul_pow2(i32 %a) nounwind { +; XTENSA-LABEL: mul_pow2: +; XTENSA: slli a2, a2, 3 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 8 + ret i32 %1 +} + +define i64 @mul64(i64 %a, i64 %b) nounwind { +; XTENSA-LABEL: mul64: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, %b + ret i64 %1 +} + +define i64 @mul64_constant(i64 %a) nounwind { +; XTENSA-LABEL: mul64_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 5 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, 5 + ret i64 %1 +} + +define i32 @mulhs(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: mulhs: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a4, a3, a3 +; XTENSA-NEXT: srai a3, a2, 31 +; XTENSA-NEXT: srai a5, a4, 31 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sext i32 %a to i64 + %2 = sext i32 %b to i64 + %3 = mul i64 %1, %2 + %4 = lshr i64 %3, 32 + %5 = trunc i64 %4 to i32 + ret i32 %5 +} + +define i32 @mulhs_positive_constant(i32 %a) nounwind { +; XTENSA-LABEL: mulhs_positive_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: srai a3, a2, 31 +; XTENSA-NEXT: movi a4, 5 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sext i32 %a to i64 + %2 = mul i64 %1, 5 + %3 = lshr i64 %2, 32 + %4 = trunc i64 %3 to i32 + ret i32 %4 +} + +define i32 @mulhs_negative_constant(i32 %a) nounwind { +; XTENSA-LABEL: mulhs_negative_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: srai a3, a2, 31 +; XTENSA-NEXT: movi a4, -5 +; XTENSA-NEXT: movi a5, -1 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = sext i32 %a to i64 + %2 = mul i64 %1, -5 + %3 = lshr i64 %2, 32 + %4 = trunc i64 %3 to i32 + ret i32 %4 +} + +define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind { +; XTENSA-LABEL: mulhu: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a4, a3, a3 +; XTENSA-NEXT: movi a3, 0 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = zext i32 %a to i64 + %2 = zext i32 %b to i64 + %3 = mul i64 %1, %2 + %4 = lshr i64 %3, 32 + %5 = trunc i64 %4 to i32 + ret i32 %5 +} + +define i32 @mulhsu(i32 %a, i32 %b) nounwind { +; XTENSA-LABEL: mulhsu: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a4, a3, a3 +; XTENSA-NEXT: srai a5, a4, 31 +; XTENSA-NEXT: movi a3, 0 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = zext i32 %a to i64 + %2 = sext i32 %b to i64 + %3 = mul i64 %1, %2 + %4 = lshr i64 %3, 32 + %5 = trunc i64 %4 to i32 + ret i32 %5 +} + +define i32 @mulhu_constant(i32 %a) nounwind { +; XTENSA-LABEL: mulhu_constant: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 5 +; XTENSA-NEXT: movi a3, 0 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: or a5, a3, a3 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a3, a3 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = zext i32 %a to i64 + %2 = mul i64 %1, 5 + %3 = lshr i64 %2, 32 + %4 = trunc i64 %3 to i32 + ret i32 %4 +} + +define i32 @muli32_p65(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p65: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, 65 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 65 + ret i32 %1 +} + +define i32 @muli32_p63(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p63: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, 63 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 63 + ret i32 %1 +} + +define i64 @muli64_p65(i64 %a) nounwind { +; XTENSA-LABEL: muli64_p65: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 65 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, 65 + ret i64 %1 +} + +define i64 @muli64_p63(i64 %a) nounwind { +; XTENSA-LABEL: muli64_p63: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, 63 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, 63 + ret i64 %1 +} + +define i32 @muli32_m63(i32 %a) nounwind { +; XTENSA-LABEL: muli32_m63: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, -63 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, -63 + ret i32 %1 +} + +define i32 @muli32_m65(i32 %a) nounwind { +; XTENSA-LABEL: muli32_m65: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, -65 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, -65 + ret i32 %1 +} + +define i64 @muli64_m63(i64 %a) nounwind { +; XTENSA-LABEL: muli64_m63: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, -63 +; XTENSA-NEXT: movi a5, -1 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, -63 + ret i64 %1 +} + +define i64 @muli64_m65(i64 %a) nounwind { +; XTENSA-LABEL: muli64_m65: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a4, -65 +; XTENSA-NEXT: movi a5, -1 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, -65 + ret i64 %1 +} + +define i32 @muli32_p384(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p384: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: movi a3, 384 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 384 + ret i32 %1 +} + +define i32 @muli32_p12288(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p12288: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a3, .LCPI21_0 +; XTENSA-NEXT: l32r a8, .LCPI21_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 12288 + ret i32 %1 +} + +define i32 @muli32_p4352(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p4352: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a3, .LCPI22_0 +; XTENSA-NEXT: l32r a8, .LCPI22_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 4352 + ret i32 %1 +} + +define i32 @muli32_p3840(i32 %a) nounwind { +; XTENSA-LABEL: muli32_p3840: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a3, .LCPI23_0 +; XTENSA-NEXT: l32r a8, .LCPI23_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 3840 + ret i32 %1 +} + +define i32 @muli32_m3840(i32 %a) nounwind { +; XTENSA-LABEL: muli32_m3840: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a3, .LCPI24_0 +; XTENSA-NEXT: l32r a8, .LCPI24_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, -3840 + ret i32 %1 +} + +define i32 @muli32_m4352(i32 %a) nounwind { +; XTENSA-LABEL: muli32_m4352: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a3, .LCPI25_0 +; XTENSA-NEXT: l32r a8, .LCPI25_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i32 %a, -4352 + ret i32 %1 +} + +define i64 @muli64_p4352(i64 %a) nounwind { +; XTENSA-LABEL: muli64_p4352: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a4, .LCPI26_0 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI26_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, 4352 + ret i64 %1 +} + +define i64 @muli64_p3840(i64 %a) nounwind { +; XTENSA-LABEL: muli64_p3840: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a4, .LCPI27_0 +; XTENSA-NEXT: movi a5, 0 +; XTENSA-NEXT: l32r a8, .LCPI27_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, 3840 + ret i64 %1 +} + +define i64 @muli64_m4352(i64 %a) nounwind { +; XTENSA-LABEL: muli64_m4352: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a4, .LCPI28_0 +; XTENSA-NEXT: movi a5, -1 +; XTENSA-NEXT: l32r a8, .LCPI28_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, -4352 + ret i64 %1 +} + +define i64 @muli64_m3840(i64 %a) nounwind { +; XTENSA-LABEL: muli64_m3840: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: l32r a4, .LCPI29_0 +; XTENSA-NEXT: movi a5, -1 +; XTENSA-NEXT: l32r a8, .LCPI29_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i64 %a, -3840 + ret i64 %1 +} + +define i128 @muli128_m3840(i128 %a) nounwind { +; XTENSA-LABEL: muli128_m3840: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 8 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, -1 +; XTENSA-NEXT: s32i a7, a1, 4 +; XTENSA-NEXT: s32i a7, a1, 0 +; XTENSA-NEXT: l32r a6, .LCPI30_0 +; XTENSA-NEXT: l32r a8, .LCPI30_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 8 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i128 %a, -3840 + ret i128 %1 +} + +define i128 @muli128_m63(i128 %a) nounwind { +; XTENSA-LABEL: muli128_m63: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 8 # 4-byte Folded Spill +; XTENSA-NEXT: movi a7, -1 +; XTENSA-NEXT: s32i a7, a1, 4 +; XTENSA-NEXT: s32i a7, a1, 0 +; XTENSA-NEXT: movi a6, -63 +; XTENSA-NEXT: l32r a8, .LCPI31_0 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: l32i a0, a1, 8 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = mul i128 %a, -63 + ret i128 %1 +} + +define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind { +; XTENSA-LABEL: mulhsu_i64: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 8 # 4-byte Folded Spill +; XTENSA-NEXT: or a7, a5, a5 +; XTENSA-NEXT: or a6, a4, a4 +; XTENSA-NEXT: srai a8, a7, 31 +; XTENSA-NEXT: s32i a8, a1, 4 +; XTENSA-NEXT: s32i a8, a1, 0 +; XTENSA-NEXT: movi a4, 0 +; XTENSA-NEXT: l32r a8, .LCPI32_0 +; XTENSA-NEXT: or a5, a4, a4 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: or a2, a4, a4 +; XTENSA-NEXT: or a3, a5, a5 +; XTENSA-NEXT: l32i a0, a1, 8 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %1 = zext i64 %a to i128 + %2 = sext i64 %b to i128 + %3 = mul i128 %1, %2 + %4 = lshr i128 %3, 64 + %5 = trunc i128 %4 to i64 + ret i64 %5 +} + +define i8 @muladd_demand(i8 %x, i8 %y) nounwind { +; XTENSA-LABEL: muladd_demand: +; XTENSA: slli a8, a2, 1 +; XTENSA-NEXT: sub a8, a3, a8 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: ret + %m = mul i8 %x, 14 + %a = add i8 %y, %m + %r = and i8 %a, 15 + ret i8 %r +} + +define i8 @mulsub_demand(i8 %x, i8 %y) nounwind { +; XTENSA-LABEL: mulsub_demand: +; XTENSA: addx2 a8, a2, a3 +; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: ret + %m = mul i8 %x, 14 + %a = sub i8 %y, %m + %r = and i8 %a, 15 + ret i8 %r +} + +define i8 @muladd_demand_2(i8 %x, i8 %y) nounwind { +; XTENSA-LABEL: muladd_demand_2: +; XTENSA: slli a8, a2, 1 +; XTENSA-NEXT: sub a8, a3, a8 +; XTENSA-NEXT: movi a9, -16 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %m = mul i8 %x, 14 + %a = add i8 %y, %m + %r = or i8 %a, 240 + ret i8 %r +} + +define i8 @mulsub_demand_2(i8 %x, i8 %y) nounwind { +; XTENSA-LABEL: mulsub_demand_2: +; XTENSA: addx2 a8, a2, a3 +; XTENSA-NEXT: movi a9, -16 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %m = mul i8 %x, 14 + %a = sub i8 %y, %m + %r = or i8 %a, 240 + ret i8 %r +} diff --git a/llvm/test/CodeGen/Xtensa/rotl-rotr.ll b/llvm/test/CodeGen/Xtensa/rotl-rotr.ll new file mode 100644 index 0000000000000..1dc52fbc94b41 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/rotl-rotr.ll @@ -0,0 +1,500 @@ +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +define i32 @rotl_32(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotl_32: +; XTENSA: ssl a3 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: movi a9, 32 +; XTENSA-NEXT: sub a9, a9, a3 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %z = sub i32 32, %y + %b = shl i32 %x, %y + %c = lshr i32 %x, %z + %d = or i32 %b, %c + ret i32 %d +} + +define i32 @rotr_32(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotr_32: +; XTENSA: ssr a3 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: movi a9, 32 +; XTENSA-NEXT: sub a9, a9, a3 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %z = sub i32 32, %y + %b = lshr i32 %x, %y + %c = shl i32 %x, %z + %d = or i32 %b, %c + ret i32 %d +} + +define i64 @rotl_64(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotl_64: +; XTENSA: movi a8, 64 +; XTENSA-NEXT: sub a8, a8, a4 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: src a11, a3, a2 +; XTENSA-NEXT: movi a9, 32 +; XTENSA-NEXT: sub a9, a9, a4 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a7, a3 +; XTENSA-NEXT: movi a10, 0 +; XTENSA-NEXT: blt a9, a10, .LBB2_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a11, a7, a7 +; XTENSA-NEXT: .LBB2_2: +; XTENSA-NEXT: ssl a4 +; XTENSA-NEXT: sll a7, a2 +; XTENSA-NEXT: addi a5, a4, -32 +; XTENSA-NEXT: blt a5, a10, .LBB2_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a7, a10, a10 +; XTENSA-NEXT: .LBB2_4: +; XTENSA-NEXT: ssl a4 +; XTENSA-NEXT: src a6, a3, a2 +; XTENSA-NEXT: ssl a5 +; XTENSA-NEXT: sll a4, a2 +; XTENSA-NEXT: blt a5, a10, .LBB2_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a6, a4, a4 +; XTENSA-NEXT: .LBB2_6: +; XTENSA-NEXT: or a2, a7, a11 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a3 +; XTENSA-NEXT: blt a9, a10, .LBB2_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: .LBB2_8: +; XTENSA-NEXT: or a3, a6, a8 +; XTENSA-NEXT: ret + %z = sub i64 64, %y + %b = shl i64 %x, %y + %c = lshr i64 %x, %z + %d = or i64 %b, %c + ret i64 %d +} + +define i64 @rotr_64(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotr_64: +; XTENSA: ssr a4 +; XTENSA-NEXT: src a10, a3, a2 +; XTENSA-NEXT: addi a8, a4, -32 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a11, a3 +; XTENSA-NEXT: movi a9, 0 +; XTENSA-NEXT: blt a8, a9, .LBB3_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a10, a11, a11 +; XTENSA-NEXT: .LBB3_2: +; XTENSA-NEXT: movi a11, 32 +; XTENSA-NEXT: sub a7, a11, a4 +; XTENSA-NEXT: movi a11, 64 +; XTENSA-NEXT: sub a11, a11, a4 +; XTENSA-NEXT: ssl a11 +; XTENSA-NEXT: sll a6, a2 +; XTENSA-NEXT: blt a7, a9, .LBB3_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a6, a9, a9 +; XTENSA-NEXT: .LBB3_4: +; XTENSA-NEXT: ssl a11 +; XTENSA-NEXT: src a11, a3, a2 +; XTENSA-NEXT: ssl a7 +; XTENSA-NEXT: sll a5, a2 +; XTENSA-NEXT: blt a7, a9, .LBB3_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a11, a5, a5 +; XTENSA-NEXT: .LBB3_6: +; XTENSA-NEXT: or a2, a10, a6 +; XTENSA-NEXT: ssr a4 +; XTENSA-NEXT: srl a10, a3 +; XTENSA-NEXT: blt a8, a9, .LBB3_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a10, a9, a9 +; XTENSA-NEXT: .LBB3_8: +; XTENSA-NEXT: or a3, a10, a11 +; XTENSA-NEXT: ret + %z = sub i64 64, %y + %b = lshr i64 %x, %y + %c = shl i64 %x, %z + %d = or i64 %b, %c + ret i64 %d +} + +define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotl_32_mask: +; XTENSA: ssl a3 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: neg a9, a3 +; XTENSA-NEXT: movi a10, 31 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %z = sub i32 0, %y + %and = and i32 %z, 31 + %b = shl i32 %x, %y + %c = lshr i32 %x, %and + %d = or i32 %b, %c + ret i32 %d +} + +define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotl_32_mask_and_63_and_31: +; XTENSA: movi a8, 63 +; XTENSA-NEXT: and a8, a3, a8 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: neg a9, a3 +; XTENSA-NEXT: movi a10, 31 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %a = and i32 %y, 63 + %b = shl i32 %x, %a + %c = sub i32 0, %y + %d = and i32 %c, 31 + %e = lshr i32 %x, %d + %f = or i32 %b, %e + ret i32 %f +} + +define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotr_32_mask: +; XTENSA: ssr a3 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: neg a9, a3 +; XTENSA-NEXT: movi a10, 31 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %z = sub i32 0, %y + %and = and i32 %z, 31 + %b = lshr i32 %x, %y + %c = shl i32 %x, %and + %d = or i32 %b, %c + ret i32 %d +} + +define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { +; XTENSA-LABEL: rotr_32_mask_and_63_and_31: +; XTENSA: movi a8, 63 +; XTENSA-NEXT: and a8, a3, a8 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: neg a9, a3 +; XTENSA-NEXT: movi a10, 31 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a2 +; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: ret + %a = and i32 %y, 63 + %b = lshr i32 %x, %a + %c = sub i32 0, %y + %d = and i32 %c, 31 + %e = shl i32 %x, %d + %f = or i32 %b, %e + ret i32 %f +} + +define i64 @rotl_64_mask(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotl_64_mask: +; XTENSA: ssl a4 +; XTENSA-NEXT: src a10, a3, a2 +; XTENSA-NEXT: addi a8, a4, -32 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a11, a2 +; XTENSA-NEXT: movi a9, 0 +; XTENSA-NEXT: blt a8, a9, .LBB8_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a10, a11, a11 +; XTENSA-NEXT: .LBB8_2: +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: movi a7, 63 +; XTENSA-NEXT: and a7, a11, a7 +; XTENSA-NEXT: ssr a7 +; XTENSA-NEXT: srl a11, a3 +; XTENSA-NEXT: addi a6, a7, -32 +; XTENSA-NEXT: blt a6, a9, .LBB8_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a11, a9, a9 +; XTENSA-NEXT: .LBB8_4: +; XTENSA-NEXT: ssr a7 +; XTENSA-NEXT: src a7, a3, a2 +; XTENSA-NEXT: ssr a6 +; XTENSA-NEXT: srl a5, a3 +; XTENSA-NEXT: blt a6, a9, .LBB8_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a7, a5, a5 +; XTENSA-NEXT: .LBB8_6: +; XTENSA-NEXT: or a3, a10, a11 +; XTENSA-NEXT: ssl a4 +; XTENSA-NEXT: sll a10, a2 +; XTENSA-NEXT: blt a8, a9, .LBB8_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a10, a9, a9 +; XTENSA-NEXT: .LBB8_8: +; XTENSA-NEXT: or a2, a10, a7 +; XTENSA-NEXT: ret + %z = sub i64 0, %y + %and = and i64 %z, 63 + %b = shl i64 %x, %y + %c = lshr i64 %x, %and + %d = or i64 %b, %c + ret i64 %d +} + +define i64 @rotl_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotl_64_mask_and_127_and_63: +; XTENSA: movi a8, 127 +; XTENSA-NEXT: and a8, a4, a8 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: src a11, a3, a2 +; XTENSA-NEXT: addi a9, a8, -32 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a7, a2 +; XTENSA-NEXT: movi a10, 0 +; XTENSA-NEXT: blt a9, a10, .LBB9_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a11, a7, a7 +; XTENSA-NEXT: .LBB9_2: +; XTENSA-NEXT: neg a7, a4 +; XTENSA-NEXT: movi a6, 63 +; XTENSA-NEXT: and a6, a7, a6 +; XTENSA-NEXT: ssr a6 +; XTENSA-NEXT: srl a7, a3 +; XTENSA-NEXT: addi a5, a6, -32 +; XTENSA-NEXT: blt a5, a10, .LBB9_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a7, a10, a10 +; XTENSA-NEXT: .LBB9_4: +; XTENSA-NEXT: ssr a6 +; XTENSA-NEXT: src a6, a3, a2 +; XTENSA-NEXT: ssr a5 +; XTENSA-NEXT: srl a4, a3 +; XTENSA-NEXT: blt a5, a10, .LBB9_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a6, a4, a4 +; XTENSA-NEXT: .LBB9_6: +; XTENSA-NEXT: or a3, a11, a7 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: blt a9, a10, .LBB9_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: .LBB9_8: +; XTENSA-NEXT: or a2, a8, a6 +; XTENSA-NEXT: ret + %a = and i64 %y, 127 + %b = shl i64 %x, %a + %c = sub i64 0, %y + %d = and i64 %c, 63 + %e = lshr i64 %x, %d + %f = or i64 %b, %e + ret i64 %f +} + +define i64 @rotr_64_mask(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotr_64_mask: +; XTENSA: ssr a4 +; XTENSA-NEXT: src a10, a3, a2 +; XTENSA-NEXT: addi a8, a4, -32 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a11, a3 +; XTENSA-NEXT: movi a9, 0 +; XTENSA-NEXT: blt a8, a9, .LBB10_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a10, a11, a11 +; XTENSA-NEXT: .LBB10_2: +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: movi a7, 63 +; XTENSA-NEXT: and a7, a11, a7 +; XTENSA-NEXT: ssl a7 +; XTENSA-NEXT: sll a11, a2 +; XTENSA-NEXT: addi a6, a7, -32 +; XTENSA-NEXT: blt a6, a9, .LBB10_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a11, a9, a9 +; XTENSA-NEXT: .LBB10_4: +; XTENSA-NEXT: ssl a7 +; XTENSA-NEXT: src a7, a3, a2 +; XTENSA-NEXT: ssl a6 +; XTENSA-NEXT: sll a5, a2 +; XTENSA-NEXT: blt a6, a9, .LBB10_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a7, a5, a5 +; XTENSA-NEXT: .LBB10_6: +; XTENSA-NEXT: or a2, a10, a11 +; XTENSA-NEXT: ssr a4 +; XTENSA-NEXT: srl a10, a3 +; XTENSA-NEXT: blt a8, a9, .LBB10_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a10, a9, a9 +; XTENSA-NEXT: .LBB10_8: +; XTENSA-NEXT: or a3, a10, a7 +; XTENSA-NEXT: ret + %z = sub i64 0, %y + %and = and i64 %z, 63 + %b = lshr i64 %x, %y + %c = shl i64 %x, %and + %d = or i64 %b, %c + ret i64 %d +} + +define i64 @rotr_64_mask_and_127_and_63(i64 %x, i64 %y) nounwind { +; XTENSA-LABEL: rotr_64_mask_and_127_and_63: +; XTENSA: movi a8, 127 +; XTENSA-NEXT: and a8, a4, a8 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: src a11, a3, a2 +; XTENSA-NEXT: addi a9, a8, -32 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a7, a3 +; XTENSA-NEXT: movi a10, 0 +; XTENSA-NEXT: blt a9, a10, .LBB11_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: or a11, a7, a7 +; XTENSA-NEXT: .LBB11_2: +; XTENSA-NEXT: neg a7, a4 +; XTENSA-NEXT: movi a6, 63 +; XTENSA-NEXT: and a6, a7, a6 +; XTENSA-NEXT: ssl a6 +; XTENSA-NEXT: sll a7, a2 +; XTENSA-NEXT: addi a5, a6, -32 +; XTENSA-NEXT: blt a5, a10, .LBB11_4 +; XTENSA-NEXT: # %bb.3: +; XTENSA-NEXT: or a7, a10, a10 +; XTENSA-NEXT: .LBB11_4: +; XTENSA-NEXT: ssl a6 +; XTENSA-NEXT: src a6, a3, a2 +; XTENSA-NEXT: ssl a5 +; XTENSA-NEXT: sll a4, a2 +; XTENSA-NEXT: blt a5, a10, .LBB11_6 +; XTENSA-NEXT: # %bb.5: +; XTENSA-NEXT: or a6, a4, a4 +; XTENSA-NEXT: .LBB11_6: +; XTENSA-NEXT: or a2, a11, a7 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a3 +; XTENSA-NEXT: blt a9, a10, .LBB11_8 +; XTENSA-NEXT: # %bb.7: +; XTENSA-NEXT: or a8, a10, a10 +; XTENSA-NEXT: .LBB11_8: +; XTENSA-NEXT: or a3, a8, a6 +; XTENSA-NEXT: ret + %a = and i64 %y, 127 + %b = lshr i64 %x, %a + %c = sub i64 0, %y + %d = and i64 %c, 63 + %e = shl i64 %x, %d + %f = or i64 %b, %e + ret i64 %f +} + +define signext i32 @rotl_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { +; XTENSA-LABEL: rotl_32_mask_shared: +; XTENSA: movi a8, 31 +; XTENSA-NEXT: and a9, a4, a8 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a10, a2 +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: and a8, a11, a8 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a3 +; XTENSA-NEXT: add a2, a8, a9 +; XTENSA-NEXT: ret + %maskedamt = and i32 %amt, 31 + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt) + %2 = shl i32 %b, %maskedamt + %3 = add i32 %1, %2 + ret i32 %3 +} +declare i32 @llvm.fshl.i32(i32, i32, i32) + +define signext i32 @rotr_32_mask_shared(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { +; XTENSA-LABEL: rotr_32_mask_shared: +; XTENSA: movi a8, 31 +; XTENSA-NEXT: and a9, a4, a8 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a10, a2 +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: and a8, a11, a8 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a3 +; XTENSA-NEXT: add a2, a8, a9 +; XTENSA-NEXT: ret + %maskedamt = and i32 %amt, 31 + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt) + %2 = shl i32 %b, %maskedamt + %3 = add i32 %1, %2 + ret i32 %3 +} +declare i32 @llvm.fshr.i32(i32, i32, i32) + +define signext i32 @rotl_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { +; XTENSA-LABEL: rotl_32_mask_multiple: +; XTENSA: movi a8, 31 +; XTENSA-NEXT: and a9, a4, a8 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a10, a3 +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: and a8, a11, a8 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a11, a3 +; XTENSA-NEXT: or a10, a10, a11 +; XTENSA-NEXT: ssl a9 +; XTENSA-NEXT: sll a9, a2 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: add a2, a8, a10 +; XTENSA-NEXT: ret + %maskedamt = and i32 %amt, 31 + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %maskedamt) + %2 = tail call i32 @llvm.fshl.i32(i32 %b, i32 %b, i32 %maskedamt) + %3 = add i32 %1, %2 + ret i32 %3 +} + +define signext i32 @rotr_32_mask_multiple(i32 signext %a, i32 signext %b, i32 signext %amt) nounwind { +; XTENSA-LABEL: rotr_32_mask_multiple: +; XTENSA: movi a8, 31 +; XTENSA-NEXT: and a9, a4, a8 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a10, a3 +; XTENSA-NEXT: neg a11, a4 +; XTENSA-NEXT: and a8, a11, a8 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a11, a3 +; XTENSA-NEXT: or a10, a10, a11 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a9, a2 +; XTENSA-NEXT: ssl a8 +; XTENSA-NEXT: sll a8, a2 +; XTENSA-NEXT: or a8, a9, a8 +; XTENSA-NEXT: add a2, a8, a10 +; XTENSA-NEXT: ret + %maskedamt = and i32 %amt, 31 + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %maskedamt) + %2 = tail call i32 @llvm.fshr.i32(i32 %b, i32 %b, i32 %maskedamt) + %3 = add i32 %1, %2 + ret i32 %3 +} diff --git a/llvm/test/CodeGen/Xtensa/shift.ll b/llvm/test/CodeGen/Xtensa/shift.ll new file mode 100644 index 0000000000000..acca8551fa621 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/shift.ll @@ -0,0 +1,72 @@ +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck %s + +define i64 @lshl_64(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: lshl_64: +; CHECK: ssl a4 +; CHECK-NEXT: src a3, a3, a2 +; CHECK-NEXT: addi a8, a4, -32 +; CHECK-NEXT: ssl a8 +; CHECK-NEXT: sll a10, a2 +; CHECK-NEXT: movi a9, 0 +; CHECK-NEXT: blt a8, a9, .LBB0_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: or a3, a10, a10 +; CHECK-NEXT: .LBB0_2: +; CHECK-NEXT: ssl a4 +; CHECK-NEXT: sll a2, a2 +; CHECK-NEXT: blt a8, a9, .LBB0_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: or a2, a9, a9 +; CHECK-NEXT: .LBB0_4: +; CHECK-NEXT: ret + %c = shl i64 %x, %y + ret i64 %c +} + +define i64 @lshr_64(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: lshr_64: +; CHECK: ssr a4 +; CHECK-NEXT: src a2, a3, a2 +; CHECK-NEXT: addi a8, a4, -32 +; CHECK-NEXT: ssr a8 +; CHECK-NEXT: srl a10, a3 +; CHECK-NEXT: movi a9, 0 +; CHECK-NEXT: blt a8, a9, .LBB1_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: or a2, a10, a10 +; CHECK-NEXT: .LBB1_2: +; CHECK-NEXT: ssr a4 +; CHECK-NEXT: srl a3, a3 +; CHECK-NEXT: blt a8, a9, .LBB1_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: or a3, a9, a9 +; CHECK-NEXT: .LBB1_4: +; CHECK-NEXT: ret + %c = lshr i64 %x, %y + ret i64 %c +} + +define i64 @ashr_64(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: ashr_64: +; CHECK: ssr a4 +; CHECK-NEXT: src a2, a3, a2 +; CHECK-NEXT: addi a9, a4, -32 +; CHECK-NEXT: ssr a9 +; CHECK-NEXT: sra a8, a3 +; CHECK-NEXT: movi a10, 0 +; CHECK-NEXT: blt a9, a10, .LBB2_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: or a2, a8, a8 +; CHECK-NEXT: .LBB2_2: +; CHECK-NEXT: ssr a4 +; CHECK-NEXT: sra a8, a3 +; CHECK-NEXT: blt a9, a10, .LBB2_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: srai a8, a3, 31 +; CHECK-NEXT: .LBB2_4: +; CHECK-NEXT: or a3, a8, a8 +; CHECK-NEXT: ret + %c = ashr i64 %x, %y + ret i64 %c +} From 19f6236165a407bcf8ec371b5fd725dcf88b89de Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Fri, 26 Jul 2024 18:03:37 +0300 Subject: [PATCH 002/289] [Xtensa] Minor fixes in constant pool lowering. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 22 ++++----- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 2 +- .../CodeGen/Xtensa/constantpool-aggregate.ll | 45 +++++++++++++++++++ 3 files changed, 57 insertions(+), 12 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/constantpool-aggregate.ll diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 8c30dbbad821e..2253d18c7ff81 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -690,25 +690,25 @@ SDValue XtensaTargetLowering::getAddrPCRel(SDValue Op, return DAG.getNode(XtensaISD::PCREL_WRAPPER, DL, Ty, Op); } -SDValue XtensaTargetLowering::LowerConstantPool(ConstantPoolSDNode *CP, +SDValue XtensaTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { - EVT PtrVT = getPointerTy(DAG.getDataLayout()); - auto C = const_cast(CP->getConstVal()); - auto T = const_cast(CP->getType()); + EVT PtrVT = Op.getValueType(); + ConstantPoolSDNode *CP = cast(Op); + auto C = const_cast(CP->getConstVal()); + auto T = CP->getType(); SDValue Result; // Do not use constant pool for aggregate or vector constant types, // in such cases create global variable, for example to store tabel // when we lower CTTZ operation. - if (T->isAggregateType() || T->isVectorTy()) { - auto AFI = DAG.getMachineFunction().getInfo(); - auto M = const_cast( - DAG.getMachineFunction().getFunction().getParent()); + if (T->isAggregateType()) { + MachineFunction &MF = DAG.getMachineFunction(); + auto AFI = MF.getInfo(); + auto M = const_cast(MF.getFunction().getParent()); auto GV = new GlobalVariable( *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + - Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + - Twine(AFI->createLabelUId())); + Twine(MF.getFunctionNumber()) + "_" + Twine(AFI->createLabelUId())); Result = DAG.getTargetConstantPool(GV, PtrVT, Align(4)); } else { if (!CP->isMachineConstantPoolEntry()) { @@ -898,7 +898,7 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, case ISD::JumpTable: return LowerJumpTable(Op, DAG); case ISD::ConstantPool: - return LowerConstantPool(cast(Op), DAG); + return LowerConstantPool(Op, DAG); case ISD::MUL: return LowerMUL(Op, DAG); case ISD::SELECT_CC: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index b4c4929922cbf..8e18b50f211da 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -109,7 +109,7 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const; + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/test/CodeGen/Xtensa/constantpool-aggregate.ll b/llvm/test/CodeGen/Xtensa/constantpool-aggregate.ll new file mode 100644 index 0000000000000..3ace3a6d604b3 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/constantpool-aggregate.ll @@ -0,0 +1,45 @@ +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck %s + +; Check that we place const array (CTTZ lookup table) in global variable, +; instead of constant pool and place label to this table in constant pool. + +; CHECK: .literal_position +; CHECK-NEXT: .literal .LCPI0_0, 125613361 +; CHECK-NEXT: .literal .LCPI0_1, __mulsi3 +; CHECK-NEXT: .literal .LCPI0_2, .LCP0_0 +; CHECK-NEXT: .global test_cttz_i32 + +define i32 @test_cttz_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_cttz_i32: +; XTENSA: addi a8, a1, -16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill +; XTENSA-NEXT: or a8, a2, a2 +; XTENSA-NEXT: movi a2, 32 +; XTENSA-NEXT: beqz a8, .LBB0_2 +; XTENSA-NEXT: j .LBB2_1 +; XTENSA-NEXT: .LBB2_1: # %cond.false +; XTENSA-NEXT: neg a9, a8 +; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: l32r a3, .LCPI0_0 +; XTENSA-NEXT: l32r a8, .LCPI0_1 +; XTENSA-NEXT: callx0 a8 +; XTENSA-NEXT: movi a8, 27 +; XTENSA-NEXT: ssr a8 +; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: l32r a9, .LCPI0_2 +; XTENSA-NEXT: add a8, a9, a8 +; XTENSA-NEXT: l8ui a2, a8, 0 +; XTENSA-NEXT: .LBB2_2: # %cond.end +; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: ret + %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) + ret i32 %tmp +} + +; CHECK: .LCP0_0: +; CHECK-NEXT: .ascii "\000\001\034\002\035\016\030\003\036\026\024\017\031\021\004\b\037\033\r\027\025\023\020\007\032\f\022\006\013\005\n\t" +; CHECK-NEXT: .size .LCP0_0, 32 From dc2f7e653cb8445f0c27438ce7ba6626343cd1a9 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Fri, 26 Jul 2024 20:49:19 +0300 Subject: [PATCH 003/289] [Xtensa] Minor code formatting. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 2253d18c7ff81..229bbc25cb28d 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -694,7 +694,7 @@ SDValue XtensaTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { EVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast(Op); - auto C = const_cast(CP->getConstVal()); + auto C = const_cast(CP->getConstVal()); auto T = CP->getType(); SDValue Result; From 562c940013658ba8b98d7d009492e9671d83b944 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Aug 2024 12:52:50 +0300 Subject: [PATCH 004/289] [Xtensa] Transform multipy by constant. Implement decomposeMulByConstant function and remove lowering Mul operation. Minor fixes in lowering constant pool. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 53 +++----- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 9 +- llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll | 69 ++++------- llvm/test/CodeGen/Xtensa/mul.ll | 24 ++++ llvm/test/CodeGen/Xtensa/shift.ll | 115 ++++++++++++++++-- 5 files changed, 180 insertions(+), 90 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 229bbc25cb28d..b87c081a62c96 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -100,7 +100,7 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setCondCodeAction(ISD::SETUGT, MVT::i32, Expand); setCondCodeAction(ISD::SETULE, MVT::i32, Expand); - setOperationAction(ISD::MUL, MVT::i32, Custom); + setOperationAction(ISD::MUL, MVT::i32, Expand); setOperationAction(ISD::MULHU, MVT::i32, Expand); setOperationAction(ISD::MULHS, MVT::i32, Expand); setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); @@ -655,10 +655,12 @@ SDValue XtensaTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); const DataLayout &TD = DAG.getDataLayout(); EVT PtrVT = Table.getValueType(); - unsigned EntrySize = MJTI->getEntrySize(TD); - Index = DAG.getNode(ISD::MUL, DL, Index.getValueType(), Index, - DAG.getConstant(EntrySize, DL, Index.getValueType())); + assert((MJTI->getEntrySize(TD) == 4) && "Unsupported jump-table entry size"); + + Index = DAG.getNode(ISD::SHL, DL, Index.getValueType(), Index, + DAG.getConstant(2, DL, Index.getValueType())); + SDValue Addr = DAG.getNode(ISD::ADD, DL, Index.getValueType(), Index, Table); SDValue LD = DAG.getLoad(PtrVT, DL, Chain, Addr, @@ -852,36 +854,23 @@ SDValue XtensaTargetLowering::LowerShiftRightParts(SDValue Op, return DAG.getMergeValues(Ops, DL); } -SDValue XtensaTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { - EVT VT = Op->getValueType(0); - SDLoc DL(Op); +bool XtensaTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, + SDValue C) const { + if (!VT.isScalarInteger()) + return false; - if (VT != MVT::i32) - return SDValue(); - - ConstantSDNode *C = dyn_cast(Op->getOperand(1)); - if (!C) - return SDValue(); - - int64_t MulAmt = C->getSExtValue(); - unsigned ShiftAmt = 0; - - switch (MulAmt) { - case 2: - ShiftAmt = 1; - break; - case 4: - ShiftAmt = 2; - break; - case 8: - ShiftAmt = 3; - break; - default: - return SDValue(); + // Omit if data size exceeds. + if (VT.getSizeInBits() > 32) + return false; + + if (auto *ConstNode = dyn_cast(C.getNode())) { + const APInt &Imm = ConstNode->getAPIntValue(); + // Convert MULT to LSL. + if (Imm.isPowerOf2() && Imm.isIntN(5)) + return true; } - return DAG.getNode(ISD::SHL, DL, VT, Op->getOperand(0), - DAG.getConstant(ShiftAmt, DL, VT)); + return false; } SDValue XtensaTargetLowering::LowerOperation(SDValue Op, @@ -899,8 +888,6 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerJumpTable(Op, DAG); case ISD::ConstantPool: return LowerConstantPool(Op, DAG); - case ISD::MUL: - return LowerMUL(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); case ISD::STACKSAVE: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 8e18b50f211da..861c3c58847a1 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -41,8 +41,10 @@ enum { // condition code in op #4 SELECT_CC, - // Shift + // SRCL(R) performs shift left(right) of the concatenation of 2 registers + // and returns high(low) 32-bit part of 64-bit result SRCL, + // Shift Right Combined SRCR, }; } @@ -90,6 +92,9 @@ class XtensaTargetLowering : public TargetLowering { const SmallVectorImpl &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override; + bool decomposeMulByConstant(LLVMContext &Context, EVT VT, + SDValue C) const override; + const XtensaSubtarget &getSubtarget() const { return Subtarget; } MachineBasicBlock * @@ -111,8 +116,6 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll index 030f2a0fbfdc7..81e4a04c6d23e 100644 --- a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll @@ -282,10 +282,7 @@ define i16 @test_ctlz_i16(i16 %a) nounwind { define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_ctlz_i32: -; XTENSA: addi a8, a1, -16 -; XTENSA-NEXT: or a1, a8, a8 -; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill -; XTENSA-NEXT: or a8, a2, a2 +; XTENSA: or a8, a2, a2 ; XTENSA-NEXT: movi a2, 32 ; XTENSA-NEXT: beqz a8, .LBB8_2 ; XTENSA-NEXT: j .LBB8_1 @@ -316,17 +313,15 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI8_2 -; XTENSA-NEXT: and a2, a8, a9 -; XTENSA-NEXT: l32r a3, .LCPI8_3 -; XTENSA-NEXT: l32r a8, .LCPI8_4 -; XTENSA-NEXT: callx0 a8 -; XTENSA-NEXT: movi a8, 24 -; XTENSA-NEXT: ssr a8 -; XTENSA-NEXT: srl a2, a2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 24 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a2, a8 ; XTENSA-NEXT: .LBB8_2: # %cond.end -; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload -; XTENSA-NEXT: addi a8, a1, 16 -; XTENSA-NEXT: or a1, a8, a8 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) ret i32 %tmp @@ -410,10 +405,7 @@ define i16 @test_ctlz_i16_zero_undef(i16 %a) nounwind { define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-LABEL: test_ctlz_i32_zero_undef: -; XTENSA: addi a8, a1, -16 -; XTENSA-NEXT: or a1, a8, a8 -; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill -; XTENSA-NEXT: srli a8, a2, 1 +; XTENSA: srli a8, a2, 1 ; XTENSA-NEXT: or a8, a2, a8 ; XTENSA-NEXT: srli a9, a8, 2 ; XTENSA-NEXT: or a8, a8, a9 @@ -439,16 +431,14 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI11_2 -; XTENSA-NEXT: and a2, a8, a9 -; XTENSA-NEXT: l32r a3, .LCPI11_3 -; XTENSA-NEXT: l32r a8, .LCPI11_4 -; XTENSA-NEXT: callx0 a8 -; XTENSA-NEXT: movi a8, 24 -; XTENSA-NEXT: ssr a8 -; XTENSA-NEXT: srl a2, a2 -; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload -; XTENSA-NEXT: addi a8, a1, 16 -; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 24 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a2, a8 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 true) ret i32 %tmp @@ -500,10 +490,7 @@ define i16 @test_ctpop_i16(i16 %a) nounwind { define i32 @test_ctpop_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_ctpop_i32: -; XTENSA: addi a8, a1, -16 -; XTENSA-NEXT: or a1, a8, a8 -; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill -; XTENSA-NEXT: srli a8, a2, 1 +; XTENSA: srli a8, a2, 1 ; XTENSA-NEXT: l32r a9, .LCPI14_0 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: sub a8, a2, a8 @@ -515,16 +502,14 @@ define i32 @test_ctpop_i32(i32 %a) nounwind { ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI14_2 -; XTENSA-NEXT: and a2, a8, a9 -; XTENSA-NEXT: l32r a3, .LCPI14_3 -; XTENSA-NEXT: l32r a8, .LCPI14_4 -; XTENSA-NEXT: callx0 a8 -; XTENSA-NEXT: movi a8, 24 -; XTENSA-NEXT: ssr a8 -; XTENSA-NEXT: srl a2, a2 -; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload -; XTENSA-NEXT: addi a8, a1, 16 -; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: movi a9, 24 +; XTENSA-NEXT: ssr a9 +; XTENSA-NEXT: srl a2, a8 ; XTENSA-NEXT: ret %1 = call i32 @llvm.ctpop.i32(i32 %a) ret i32 %1 diff --git a/llvm/test/CodeGen/Xtensa/mul.ll b/llvm/test/CodeGen/Xtensa/mul.ll index 0be2885458163..2a96f6ce52690 100644 --- a/llvm/test/CodeGen/Xtensa/mul.ll +++ b/llvm/test/CodeGen/Xtensa/mul.ll @@ -634,3 +634,27 @@ define i8 @mulsub_demand_2(i8 %x, i8 %y) nounwind { %r = or i8 %a, 240 ret i8 %r } + +define signext i32 @mul_imm_2(i32 %a) nounwind { +; XTENSA-LABEL: mul_imm_2: +; XTENSA: slli a2, a2, 1 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 2 + ret i32 %1 +} + +define signext i32 @mul_imm_1024(i32 %a) nounwind { +; XTENSA-LABEL: mul_imm_1024: +; XTENSA: slli a2, a2, 10 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 1024 + ret i32 %1 +} + +define signext i32 @mul_imm_16384(i32 %a) nounwind { +; XTENSA-LABEL: mul_imm_16384: +; XTENSA: slli a2, a2, 14 +; XTENSA-NEXT: ret + %1 = mul i32 %a, 16384 + ret i32 %1 +} diff --git a/llvm/test/CodeGen/Xtensa/shift.ll b/llvm/test/CodeGen/Xtensa/shift.ll index acca8551fa621..85973e26c2ef4 100644 --- a/llvm/test/CodeGen/Xtensa/shift.ll +++ b/llvm/test/CodeGen/Xtensa/shift.ll @@ -1,6 +1,97 @@ ; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ ; RUN: | FileCheck %s +define i32 @lshl(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: lshl: +; CHECK: ssl a3 +; CHECK-NEXT: sll a2, a2 +; CHECK-NEXT: ret + %c = shl i32 %x, %y + ret i32 %c +} + +define i32 @lshl_imm_1(i32 %x) nounwind { +; CHECK-LABEL: lshl_imm_1: +; CHECK: slli a2, a2, 1 +; CHECK-NEXT: ret + %c = shl i32 %x, 1 + ret i32 %c +} + +define i32 @lshl_imm_10(i32 %x) nounwind { +; CHECK-LABEL: lshl_imm_10: +; CHECK: slli a2, a2, 10 +; CHECK-NEXT: ret + %c = shl i32 %x, 10 + ret i32 %c +} + +define i32 @lshl_imm_31(i32 %x) nounwind { +; CHECK-LABEL: lshl_imm_31: +; CHECK: slli a2, a2, 31 +; CHECK-NEXT: ret + %c = shl i32 %x, 31 + ret i32 %c +} + +define i32 @lshr(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: lshr: +; CHECK: ssr a3 +; CHECK-NEXT: srl a2, a2 +; CHECK-NEXT: ret + %c = lshr i32 %x, %y + ret i32 %c +} + +define i32 @lshr_imm_1(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: lshr_imm_1: +; CHECK: srli a2, a2, 1 +; CHECK-NEXT: ret + %c = lshr i32 %x, 1 + ret i32 %c +} + +define i32 @lshr_imm_15(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: lshr_imm_15: +; CHECK: srli a2, a2, 15 +; CHECK-NEXT: ret + %c = lshr i32 %x, 15 + ret i32 %c +} + +define i32 @ashr(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: ashr: +; CHECK: ssr a3 +; CHECK-NEXT: sra a2, a2 +; CHECK-NEXT: ret + %c = ashr i32 %x, %y + ret i32 %c +} + +define i32 @ashr_imm_1(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: ashr_imm_1: +; CHECK: srai a2, a2, 1 +; CHECK-NEXT: ret + %c = ashr i32 %x, 1 + ret i32 %c +} + +define i32 @ashr_imm_10(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: ashr_imm_10: +; CHECK: srai a2, a2, 10 +; CHECK-NEXT: ret + %c = ashr i32 %x, 10 + ret i32 %c +} + +define i32 @ashr_imm_31(i32 %x, i32 %y) nounwind { +; CHECK-LABEL: ashr_imm_31: +; CHECK: srai a2, a2, 31 +; CHECK-NEXT: ret + %c = ashr i32 %x, 31 + ret i32 %c +} + define i64 @lshl_64(i64 %x, i64 %y) nounwind { ; CHECK-LABEL: lshl_64: ; CHECK: ssl a4 @@ -9,16 +100,16 @@ define i64 @lshl_64(i64 %x, i64 %y) nounwind { ; CHECK-NEXT: ssl a8 ; CHECK-NEXT: sll a10, a2 ; CHECK-NEXT: movi a9, 0 -; CHECK-NEXT: blt a8, a9, .LBB0_2 +; CHECK-NEXT: blt a8, a9, .LBB11_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: or a3, a10, a10 -; CHECK-NEXT: .LBB0_2: +; CHECK-NEXT: .LBB11_2: ; CHECK-NEXT: ssl a4 ; CHECK-NEXT: sll a2, a2 -; CHECK-NEXT: blt a8, a9, .LBB0_4 +; CHECK-NEXT: blt a8, a9, .LBB11_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: or a2, a9, a9 -; CHECK-NEXT: .LBB0_4: +; CHECK-NEXT: .LBB11_4: ; CHECK-NEXT: ret %c = shl i64 %x, %y ret i64 %c @@ -32,16 +123,16 @@ define i64 @lshr_64(i64 %x, i64 %y) nounwind { ; CHECK-NEXT: ssr a8 ; CHECK-NEXT: srl a10, a3 ; CHECK-NEXT: movi a9, 0 -; CHECK-NEXT: blt a8, a9, .LBB1_2 +; CHECK-NEXT: blt a8, a9, .LBB12_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: or a2, a10, a10 -; CHECK-NEXT: .LBB1_2: +; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: ssr a4 ; CHECK-NEXT: srl a3, a3 -; CHECK-NEXT: blt a8, a9, .LBB1_4 +; CHECK-NEXT: blt a8, a9, .LBB12_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: or a3, a9, a9 -; CHECK-NEXT: .LBB1_4: +; CHECK-NEXT: .LBB12_4: ; CHECK-NEXT: ret %c = lshr i64 %x, %y ret i64 %c @@ -55,16 +146,16 @@ define i64 @ashr_64(i64 %x, i64 %y) nounwind { ; CHECK-NEXT: ssr a9 ; CHECK-NEXT: sra a8, a3 ; CHECK-NEXT: movi a10, 0 -; CHECK-NEXT: blt a9, a10, .LBB2_2 +; CHECK-NEXT: blt a9, a10, .LBB13_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: or a2, a8, a8 -; CHECK-NEXT: .LBB2_2: +; CHECK-NEXT: .LBB13_2: ; CHECK-NEXT: ssr a4 ; CHECK-NEXT: sra a8, a3 -; CHECK-NEXT: blt a9, a10, .LBB2_4 +; CHECK-NEXT: blt a9, a10, .LBB13_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: srai a8, a3, 31 -; CHECK-NEXT: .LBB2_4: +; CHECK-NEXT: .LBB13_4: ; CHECK-NEXT: or a3, a8, a8 ; CHECK-NEXT: ret %c = ashr i64 %x, %y From 37d16f908a4cda8a7c7c7933f40d38003854476c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 6 Aug 2024 01:02:53 +0300 Subject: [PATCH 005/289] [Xtensa] Lower CTPOP operation. Implement lowering of the CTPOP operation. Also remove global variable creation code from lowerConstantPool functio. Add vector case handling to the decomposeMulByConstant function. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 105 ++++-- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 7 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 3 +- llvm/lib/Target/Xtensa/XtensaOperators.td | 5 + .../CodeGen/Xtensa/constantpool-aggregate.ll | 45 --- llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll | 346 ++++++++++-------- llvm/test/CodeGen/Xtensa/mul.ll | 11 + 7 files changed, 297 insertions(+), 225 deletions(-) delete mode 100644 llvm/test/CodeGen/Xtensa/constantpool-aggregate.ll diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index b87c081a62c96..5dbaa335f99fa 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -120,7 +120,7 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::BSWAP, MVT::i32, Expand); setOperationAction(ISD::ROTL, MVT::i32, Expand); setOperationAction(ISD::ROTR, MVT::i32, Expand); - setOperationAction(ISD::CTPOP, MVT::i32, Expand); + setOperationAction(ISD::CTPOP, MVT::i32, Custom); setOperationAction(ISD::CTTZ, MVT::i32, Expand); setOperationAction(ISD::CTLZ, MVT::i32, Expand); setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); @@ -655,11 +655,13 @@ SDValue XtensaTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); const DataLayout &TD = DAG.getDataLayout(); EVT PtrVT = Table.getValueType(); + unsigned EntrySize = MJTI->getEntrySize(TD); assert((MJTI->getEntrySize(TD) == 4) && "Unsupported jump-table entry size"); - Index = DAG.getNode(ISD::SHL, DL, Index.getValueType(), Index, - DAG.getConstant(2, DL, Index.getValueType())); + Index = DAG.getNode( + ISD::SHL, DL, Index.getValueType(), Index, + DAG.getConstant(Log2_32(EntrySize), DL, Index.getValueType())); SDValue Addr = DAG.getNode(ISD::ADD, DL, Index.getValueType(), Index, Table); SDValue LD = @@ -696,29 +698,13 @@ SDValue XtensaTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { EVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast(Op); - auto C = const_cast(CP->getConstVal()); - auto T = CP->getType(); SDValue Result; - // Do not use constant pool for aggregate or vector constant types, - // in such cases create global variable, for example to store tabel - // when we lower CTTZ operation. - if (T->isAggregateType()) { - MachineFunction &MF = DAG.getMachineFunction(); - auto AFI = MF.getInfo(); - auto M = const_cast(MF.getFunction().getParent()); - auto GV = new GlobalVariable( - *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, - Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + - Twine(MF.getFunctionNumber()) + "_" + Twine(AFI->createLabelUId())); - Result = DAG.getTargetConstantPool(GV, PtrVT, Align(4)); + if (!CP->isMachineConstantPoolEntry()) { + Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(), + CP->getOffset()); } else { - if (!CP->isMachineConstantPoolEntry()) { - Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, - CP->getAlign(), CP->getOffset()); - } else { - report_fatal_error("This constantpool type is not supported yet"); - } + report_fatal_error("This constantpool type is not supported yet"); } return getAddrPCRel(Result, DAG); @@ -854,21 +840,74 @@ SDValue XtensaTargetLowering::LowerShiftRightParts(SDValue Op, return DAG.getMergeValues(Ops, DL); } +SDValue XtensaTargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const { + EVT VT = Op->getValueType(0); + SDValue Val = Op.getOperand(0); + SDLoc DL(Op); + + if (VT != MVT::i32) + return SDValue(); + + // CTPOP expansion: + // Val = (Val - (Val >> 1)) & 0x55555555 + // Val = ((Val >> 2) & 0x33333333) + (Val & 0x33333333) + // Val = ((Val >> 4) + Val) & 0x0f0f0f0f + // Val = (Val >> 8) + Val + // Val = (extract bits [16, 20] from Val) + Val + // Val = extract bits [0, 5] from Val + + SDValue Mask = DAG.getConstant(0x55555555, DL, VT); + SDValue Shift = + DAG.getNode(ISD::SRL, DL, VT, Val, DAG.getConstant(1, DL, VT)); + SDValue ShiftAndMask = DAG.getNode(ISD::AND, DL, VT, Shift, Mask); + Val = DAG.getNode(ISD::SUB, DL, VT, Val, ShiftAndMask); + + Mask = DAG.getConstant(0x33333333, DL, VT); + Shift = DAG.getNode(ISD::SRL, DL, VT, Val, DAG.getConstant(2, DL, VT)); + SDValue ValAndMask = DAG.getNode(ISD::AND, DL, VT, Val, Mask); + ShiftAndMask = DAG.getNode(ISD::AND, DL, VT, Shift, Mask); + Val = DAG.getNode(ISD::ADD, DL, VT, ValAndMask, ShiftAndMask); + + Mask = DAG.getConstant(0x0f0f0f0f, DL, VT); + Shift = DAG.getNode(ISD::SRL, DL, VT, Val, DAG.getConstant(4, DL, VT)); + Val = DAG.getNode(ISD::ADD, DL, VT, Val, Shift); + Val = DAG.getNode(ISD::AND, DL, VT, Val, Mask); + + Shift = DAG.getNode(ISD::SRL, DL, VT, Val, DAG.getConstant(8, DL, VT)); + Val = DAG.getNode(ISD::ADD, DL, VT, Val, Shift); + + Shift = DAG.getNode(XtensaISD::EXTUI, DL, VT, Val, + DAG.getConstant(16, DL, VT), DAG.getConstant(5, DL, VT)); + Val = DAG.getNode(ISD::ADD, DL, VT, Val, Shift); + + return DAG.getNode(XtensaISD::EXTUI, DL, VT, Val, DAG.getConstant(0, DL, VT), + DAG.getConstant(6, DL, VT)); +} + bool XtensaTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const { - if (!VT.isScalarInteger()) + APInt Imm; + unsigned EltSizeInBits; + + if (ISD::isConstantSplatVector(C.getNode(), Imm)) { + EltSizeInBits = VT.getScalarSizeInBits(); + } else if (VT.isScalarInteger()) { + EltSizeInBits = VT.getSizeInBits(); + if (auto *ConstNode = dyn_cast(C.getNode())) + Imm = ConstNode->getAPIntValue(); + else + return false; + } else { return false; + } // Omit if data size exceeds. - if (VT.getSizeInBits() > 32) + if (EltSizeInBits > 32) return false; - if (auto *ConstNode = dyn_cast(C.getNode())) { - const APInt &Imm = ConstNode->getAPIntValue(); - // Convert MULT to LSL. - if (Imm.isPowerOf2() && Imm.isIntN(5)) - return true; - } + // Convert MULT to LSL. + if (Imm.isPowerOf2() && Imm.isIntN(5)) + return true; return false; } @@ -886,6 +925,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerBlockAddress(Op, DAG); case ISD::JumpTable: return LowerJumpTable(Op, DAG); + case ISD::CTPOP: + return LowerCTPOP(Op, DAG); case ISD::ConstantPool: return LowerConstantPool(Op, DAG); case ISD::SELECT_CC: @@ -913,6 +954,8 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::BR_JT"; case XtensaISD::CALL: return "XtensaISD::CALL"; + case XtensaISD::EXTUI: + return "XtensaISD::EXTUI"; case XtensaISD::PCREL_WRAPPER: return "XtensaISD::PCREL_WRAPPER"; case XtensaISD::RET: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 861c3c58847a1..8e7346b40dfe5 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -30,6 +30,11 @@ enum { // There is an optional glue operand at the end. CALL, + // Extract unsigned immediate. Operand 0 is value, operand 1 + // is bit position of the field [0..31], operand 2 is bit size + // of the field [1..16] + EXTUI, + // Wraps a TargetGlobalAddress that should be loaded using PC-relative // accesses. Operand 0 is the address. PCREL_WRAPPER, @@ -116,6 +121,8 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 18a31fef18446..a4c6d62f85769 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -138,7 +138,8 @@ let Defs = [SAR] in { } def EXTUI : RRR_Inst<0x00, 0x04, 0x00, (outs AR:$r), (ins AR:$t, uimm5:$imm1, imm1_16:$imm2), - "extui\t$r, $t, $imm1, $imm2", []> { + "extui\t$r, $t, $imm1, $imm2", + [(set AR:$r, (Xtensa_extui AR:$t, uimm5:$imm1, imm1_16:$imm2))]> { bits<5> imm1; bits<4> imm2; diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index c825359f3c5dd..3dd73b44f336a 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -28,6 +28,9 @@ def SDT_XtensaSelectCC : SDTypeProfile<1, 5, def SDT_XtensaSRC : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; +def SDT_XtensaEXTUI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, + SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; + //===----------------------------------------------------------------------===// // Node definitions //===----------------------------------------------------------------------===// @@ -54,3 +57,5 @@ def Xtensa_select_cc: SDNode<"XtensaISD::SELECT_CC", SDT_XtensaSelectCC, def Xtensa_srcl: SDNode<"XtensaISD::SRCL", SDT_XtensaSRC>; def Xtensa_srcr: SDNode<"XtensaISD::SRCR", SDT_XtensaSRC>; + +def Xtensa_extui: SDNode<"XtensaISD::EXTUI", SDT_XtensaEXTUI>; diff --git a/llvm/test/CodeGen/Xtensa/constantpool-aggregate.ll b/llvm/test/CodeGen/Xtensa/constantpool-aggregate.ll deleted file mode 100644 index 3ace3a6d604b3..0000000000000 --- a/llvm/test/CodeGen/Xtensa/constantpool-aggregate.ll +++ /dev/null @@ -1,45 +0,0 @@ -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ -; RUN: | FileCheck %s - -; Check that we place const array (CTTZ lookup table) in global variable, -; instead of constant pool and place label to this table in constant pool. - -; CHECK: .literal_position -; CHECK-NEXT: .literal .LCPI0_0, 125613361 -; CHECK-NEXT: .literal .LCPI0_1, __mulsi3 -; CHECK-NEXT: .literal .LCPI0_2, .LCP0_0 -; CHECK-NEXT: .global test_cttz_i32 - -define i32 @test_cttz_i32(i32 %a) nounwind { -; XTENSA-LABEL: test_cttz_i32: -; XTENSA: addi a8, a1, -16 -; XTENSA-NEXT: or a1, a8, a8 -; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill -; XTENSA-NEXT: or a8, a2, a2 -; XTENSA-NEXT: movi a2, 32 -; XTENSA-NEXT: beqz a8, .LBB0_2 -; XTENSA-NEXT: j .LBB2_1 -; XTENSA-NEXT: .LBB2_1: # %cond.false -; XTENSA-NEXT: neg a9, a8 -; XTENSA-NEXT: and a2, a8, a9 -; XTENSA-NEXT: l32r a3, .LCPI0_0 -; XTENSA-NEXT: l32r a8, .LCPI0_1 -; XTENSA-NEXT: callx0 a8 -; XTENSA-NEXT: movi a8, 27 -; XTENSA-NEXT: ssr a8 -; XTENSA-NEXT: srl a8, a2 -; XTENSA-NEXT: l32r a9, .LCPI0_2 -; XTENSA-NEXT: add a8, a9, a8 -; XTENSA-NEXT: l8ui a2, a8, 0 -; XTENSA-NEXT: .LBB2_2: # %cond.end -; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload -; XTENSA-NEXT: addi a8, a1, 16 -; XTENSA-NEXT: or a1, a8, a8 -; XTENSA-NEXT: ret - %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) - ret i32 %tmp -} - -; CHECK: .LCP0_0: -; CHECK-NEXT: .ascii "\000\001\034\002\035\016\030\003\036\026\024\017\031\021\004\b\037\033\r\027\025\023\020\007\032\f\022\006\013\005\n\t" -; CHECK-NEXT: .size .LCP0_0, 32 diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll index 81e4a04c6d23e..c1e590484e717 100644 --- a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll @@ -16,29 +16,35 @@ declare i64 @llvm.ctpop.i64(i64) define i8 @test_cttz_i8(i8 %a) nounwind { ; XTENSA-LABEL: test_cttz_i8: -; XTENSA: movi a8, 255 -; XTENSA-NEXT: and a9, a2, a8 +; XTENSA: movi a9, 255 +; XTENSA-NEXT: and a10, a2, a9 ; XTENSA-NEXT: movi a8, 8 -; XTENSA-NEXT: beqz a9, .LBB0_2 +; XTENSA-NEXT: beqz a10, .LBB0_2 ; XTENSA-NEXT: j .LBB0_1 ; XTENSA-NEXT: .LBB0_1: # %cond.false ; XTENSA-NEXT: movi a8, -1 ; XTENSA-NEXT: xor a8, a2, a8 -; XTENSA-NEXT: addi a9, a2, -1 +; XTENSA-NEXT: addi a10, a2, -1 +; XTENSA-NEXT: and a8, a8, a10 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: movi a10, 85 +; XTENSA-NEXT: l32r a10, .LCPI0_0 ; XTENSA-NEXT: and a9, a9, a10 ; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: movi a9, 51 +; XTENSA-NEXT: l32r a9, .LCPI0_1 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: l32r a9, .LCPI0_2 ; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a8, a8, 0, 6 ; XTENSA-NEXT: .LBB0_2: # %cond.end ; XTENSA-NEXT: or a2, a8, a8 ; XTENSA-NEXT: ret @@ -48,15 +54,16 @@ define i8 @test_cttz_i8(i8 %a) nounwind { define i16 @test_cttz_i16(i16 %a) nounwind { ; XTENSA-LABEL: test_cttz_i16: -; XTENSA: l32r a8, .LCPI1_0 -; XTENSA-NEXT: and a9, a2, a8 +; XTENSA: l32r a9, .LCPI1_0 +; XTENSA-NEXT: and a10, a2, a9 ; XTENSA-NEXT: movi a8, 16 -; XTENSA-NEXT: beqz a9, .LBB1_2 +; XTENSA-NEXT: beqz a10, .LBB1_2 ; XTENSA-NEXT: j .LBB1_1 ; XTENSA-NEXT: .LBB1_1: # %cond.false ; XTENSA-NEXT: movi a8, -1 ; XTENSA-NEXT: xor a8, a2, a8 -; XTENSA-NEXT: addi a9, a2, -1 +; XTENSA-NEXT: addi a10, a2, -1 +; XTENSA-NEXT: and a8, a8, a10 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 ; XTENSA-NEXT: l32r a10, .LCPI1_1 @@ -69,12 +76,13 @@ define i16 @test_cttz_i16(i16 %a) nounwind { ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 15 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: l32r a10, .LCPI1_3 -; XTENSA-NEXT: and a8, a8, a10 -; XTENSA-NEXT: srli a8, a8, 8 -; XTENSA-NEXT: add a8, a9, a8 +; XTENSA-NEXT: l32r a9, .LCPI1_3 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a8, a8, 0, 6 ; XTENSA-NEXT: .LBB1_2: # %cond.end ; XTENSA-NEXT: or a2, a8, a8 ; XTENSA-NEXT: ret @@ -84,29 +92,34 @@ define i16 @test_cttz_i16(i16 %a) nounwind { define i32 @test_cttz_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_cttz_i32: -; XTENSA: addi a8, a1, -16 -; XTENSA-NEXT: or a1, a8, a8 -; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill -; XTENSA-NEXT: or a8, a2, a2 -; XTENSA-NEXT: movi a2, 32 -; XTENSA-NEXT: beqz a8, .LBB2_2 +; XTENSA: movi a8, 32 +; XTENSA-NEXT: beqz a2, .LBB2_2 ; XTENSA-NEXT: j .LBB2_1 ; XTENSA-NEXT: .LBB2_1: # %cond.false -; XTENSA-NEXT: neg a9, a8 -; XTENSA-NEXT: and a2, a8, a9 -; XTENSA-NEXT: l32r a3, .LCPI2_0 -; XTENSA-NEXT: l32r a8, .LCPI2_1 -; XTENSA-NEXT: callx0 a8 -; XTENSA-NEXT: movi a8, 27 -; XTENSA-NEXT: ssr a8 -; XTENSA-NEXT: srl a8, a2 +; XTENSA-NEXT: movi a8, -1 +; XTENSA-NEXT: xor a8, a2, a8 +; XTENSA-NEXT: addi a9, a2, -1 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI2_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI2_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI2_2 -; XTENSA-NEXT: add a8, a9, a8 -; XTENSA-NEXT: l8ui a2, a8, 0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a8, a8, 0, 6 ; XTENSA-NEXT: .LBB2_2: # %cond.end -; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload -; XTENSA-NEXT: addi a8, a1, 16 -; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: or a2, a8, a8 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) ret i32 %tmp @@ -118,19 +131,26 @@ define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind { ; XTENSA-NEXT: xor a8, a2, a8 ; XTENSA-NEXT: addi a9, a2, -1 ; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: movi a9, 255 +; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: movi a10, 85 +; XTENSA-NEXT: l32r a10, .LCPI3_0 ; XTENSA-NEXT: and a9, a9, a10 ; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: movi a9, 51 +; XTENSA-NEXT: l32r a9, .LCPI3_1 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 15 -; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI3_2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 0, 6 ; XTENSA-NEXT: ret %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true) ret i8 %tmp @@ -142,23 +162,26 @@ define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind { ; XTENSA-NEXT: xor a8, a2, a8 ; XTENSA-NEXT: addi a9, a2, -1 ; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI4_0 +; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI4_0 +; XTENSA-NEXT: l32r a10, .LCPI4_1 ; XTENSA-NEXT: and a9, a9, a10 ; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI4_1 +; XTENSA-NEXT: l32r a9, .LCPI4_2 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 15 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: l32r a10, .LCPI4_2 -; XTENSA-NEXT: and a8, a8, a10 -; XTENSA-NEXT: srli a8, a8, 8 -; XTENSA-NEXT: add a2, a9, a8 +; XTENSA-NEXT: l32r a9, .LCPI4_3 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 0, 6 ; XTENSA-NEXT: ret %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true) ret i16 %tmp @@ -166,23 +189,28 @@ define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind { define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-LABEL: test_cttz_i32_zero_undef: -; XTENSA: addi a8, a1, -16 -; XTENSA-NEXT: or a1, a8, a8 -; XTENSA-NEXT: s32i a0, a1, 0 # 4-byte Folded Spill -; XTENSA-NEXT: neg a8, a2 -; XTENSA-NEXT: and a2, a2, a8 -; XTENSA-NEXT: l32r a3, .LCPI5_0 -; XTENSA-NEXT: l32r a8, .LCPI5_1 -; XTENSA-NEXT: callx0 a8 -; XTENSA-NEXT: movi a8, 27 -; XTENSA-NEXT: ssr a8 -; XTENSA-NEXT: srl a8, a2 +; XTENSA: movi a8, -1 +; XTENSA-NEXT: xor a8, a2, a8 +; XTENSA-NEXT: addi a9, a2, -1 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI5_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI5_1 +; XTENSA-NEXT: and a10, a8, a9 +; XTENSA-NEXT: srli a8, a8, 2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: add a8, a10, a8 +; XTENSA-NEXT: srli a9, a8, 4 +; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI5_2 -; XTENSA-NEXT: add a8, a9, a8 -; XTENSA-NEXT: l8ui a2, a8, 0 -; XTENSA-NEXT: l32i a0, a1, 0 # 4-byte Folded Reload -; XTENSA-NEXT: addi a8, a1, 16 -; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 0, 6 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true) ret i32 %tmp @@ -190,39 +218,45 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { define i8 @test_ctlz_i8(i8 %a) nounwind { ; XTENSA-LABEL: test_ctlz_i8: -; XTENSA: movi a8, 255 -; XTENSA-NEXT: and a9, a2, a8 +; XTENSA: movi a9, 255 +; XTENSA-NEXT: and a10, a2, a9 ; XTENSA-NEXT: movi a8, 8 -; XTENSA-NEXT: beqz a9, .LBB6_2 +; XTENSA-NEXT: beqz a10, .LBB6_2 ; XTENSA-NEXT: j .LBB6_1 ; XTENSA-NEXT: .LBB6_1: # %cond.false ; XTENSA-NEXT: movi a8, 254 ; XTENSA-NEXT: and a8, a2, a8 ; XTENSA-NEXT: srli a8, a8, 1 ; XTENSA-NEXT: or a8, a2, a8 -; XTENSA-NEXT: movi a9, 252 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: srli a9, a9, 2 -; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: movi a9, 240 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: srli a9, a9, 4 -; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: movi a9, -1 -; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: movi a10, 252 +; XTENSA-NEXT: and a10, a8, a10 +; XTENSA-NEXT: srli a10, a10, 2 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: movi a10, 240 +; XTENSA-NEXT: and a10, a8, a10 +; XTENSA-NEXT: srli a10, a10, 4 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: movi a10, -1 +; XTENSA-NEXT: xor a8, a8, a10 +; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: movi a10, 85 +; XTENSA-NEXT: l32r a10, .LCPI6_0 ; XTENSA-NEXT: and a9, a9, a10 ; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: movi a9, 51 +; XTENSA-NEXT: l32r a9, .LCPI6_1 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 15 +; XTENSA-NEXT: l32r a9, .LCPI6_2 ; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a8, a8, 0, 6 ; XTENSA-NEXT: .LBB6_2: # %cond.end ; XTENSA-NEXT: or a2, a8, a8 ; XTENSA-NEXT: ret @@ -232,30 +266,31 @@ define i8 @test_ctlz_i8(i8 %a) nounwind { define i16 @test_ctlz_i16(i16 %a) nounwind { ; XTENSA-LABEL: test_ctlz_i16: -; XTENSA: l32r a8, .LCPI7_0 -; XTENSA-NEXT: and a9, a2, a8 +; XTENSA: l32r a9, .LCPI7_0 +; XTENSA-NEXT: and a10, a2, a9 ; XTENSA-NEXT: movi a8, 16 -; XTENSA-NEXT: beqz a9, .LBB7_2 +; XTENSA-NEXT: beqz a10, .LBB7_2 ; XTENSA-NEXT: j .LBB7_1 ; XTENSA-NEXT: .LBB7_1: # %cond.false ; XTENSA-NEXT: l32r a8, .LCPI7_1 ; XTENSA-NEXT: and a8, a2, a8 ; XTENSA-NEXT: srli a8, a8, 1 ; XTENSA-NEXT: or a8, a2, a8 -; XTENSA-NEXT: l32r a9, .LCPI7_2 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: srli a9, a9, 2 -; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI7_3 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: srli a9, a9, 4 -; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI7_4 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: srli a9, a9, 8 -; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: movi a9, -1 -; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: l32r a10, .LCPI7_2 +; XTENSA-NEXT: and a10, a8, a10 +; XTENSA-NEXT: srli a10, a10, 2 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: l32r a10, .LCPI7_3 +; XTENSA-NEXT: and a10, a8, a10 +; XTENSA-NEXT: srli a10, a10, 4 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: l32r a10, .LCPI7_4 +; XTENSA-NEXT: and a10, a8, a10 +; XTENSA-NEXT: srli a10, a10, 8 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: movi a10, -1 +; XTENSA-NEXT: xor a8, a8, a10 +; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 ; XTENSA-NEXT: l32r a10, .LCPI7_5 ; XTENSA-NEXT: and a9, a9, a10 @@ -267,12 +302,13 @@ define i16 @test_ctlz_i16(i16 %a) nounwind { ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 15 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: l32r a10, .LCPI7_7 -; XTENSA-NEXT: and a8, a8, a10 -; XTENSA-NEXT: srli a8, a8, 8 -; XTENSA-NEXT: add a8, a9, a8 +; XTENSA-NEXT: l32r a9, .LCPI7_7 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a8, a8, 0, 6 ; XTENSA-NEXT: .LBB7_2: # %cond.end ; XTENSA-NEXT: or a2, a8, a8 ; XTENSA-NEXT: ret @@ -314,13 +350,11 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI8_2 ; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: srli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: extui a9, a8, 16, 5 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 24 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a2, a8 +; XTENSA-NEXT: extui a2, a8, 0, 6 ; XTENSA-NEXT: .LBB8_2: # %cond.end ; XTENSA-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) @@ -343,19 +377,26 @@ define i8 @test_ctlz_i8_zero_undef(i8 %a) nounwind { ; XTENSA-NEXT: or a8, a8, a9 ; XTENSA-NEXT: movi a9, -1 ; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: movi a9, 255 +; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: movi a10, 85 +; XTENSA-NEXT: l32r a10, .LCPI9_0 ; XTENSA-NEXT: and a9, a9, a10 ; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: movi a9, 51 +; XTENSA-NEXT: l32r a9, .LCPI9_1 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 15 -; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI9_2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 0, 6 ; XTENSA-NEXT: ret %tmp = call i8 @llvm.ctlz.i8(i8 %a, i1 true) ret i8 %tmp @@ -381,23 +422,26 @@ define i16 @test_ctlz_i16_zero_undef(i16 %a) nounwind { ; XTENSA-NEXT: or a8, a8, a9 ; XTENSA-NEXT: movi a9, -1 ; XTENSA-NEXT: xor a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI10_4 +; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI10_4 +; XTENSA-NEXT: l32r a10, .LCPI10_5 ; XTENSA-NEXT: and a9, a9, a10 ; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI10_5 +; XTENSA-NEXT: l32r a9, .LCPI10_6 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 15 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: l32r a10, .LCPI10_6 -; XTENSA-NEXT: and a8, a8, a10 -; XTENSA-NEXT: srli a8, a8, 8 -; XTENSA-NEXT: add a2, a9, a8 +; XTENSA-NEXT: l32r a9, .LCPI10_7 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 0, 6 ; XTENSA-NEXT: ret %tmp = call i16 @llvm.ctlz.i16(i16 %a, i1 true) ret i16 %tmp @@ -432,13 +476,11 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI11_2 ; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: srli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: extui a9, a8, 16, 5 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 24 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a2, a8 +; XTENSA-NEXT: extui a2, a8, 0, 6 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 true) ret i32 %tmp @@ -446,19 +488,26 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { define i8 @test_ctpop_i8(i8 %a) nounwind { ; XTENSA-LABEL: test_ctpop_i8: -; XTENSA: srli a8, a2, 1 -; XTENSA-NEXT: movi a9, 85 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: sub a8, a2, a8 -; XTENSA-NEXT: movi a9, 51 +; XTENSA: movi a8, 255 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI12_0 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI12_1 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 15 -; XTENSA-NEXT: and a2, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI12_2 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 0, 6 ; XTENSA-NEXT: ret %1 = call i8 @llvm.ctpop.i8(i8 %a) ret i8 %1 @@ -466,23 +515,26 @@ define i8 @test_ctpop_i8(i8 %a) nounwind { define i16 @test_ctpop_i16(i16 %a) nounwind { ; XTENSA-LABEL: test_ctpop_i16: -; XTENSA: srli a8, a2, 1 -; XTENSA-NEXT: l32r a9, .LCPI13_0 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: sub a8, a2, a8 -; XTENSA-NEXT: l32r a9, .LCPI13_1 +; XTENSA: l32r a8, .LCPI13_0 +; XTENSA-NEXT: and a8, a2, a8 +; XTENSA-NEXT: srli a9, a8, 1 +; XTENSA-NEXT: l32r a10, .LCPI13_1 +; XTENSA-NEXT: and a9, a9, a10 +; XTENSA-NEXT: sub a8, a8, a9 +; XTENSA-NEXT: l32r a9, .LCPI13_2 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 15 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: l32r a10, .LCPI13_2 -; XTENSA-NEXT: and a8, a8, a10 -; XTENSA-NEXT: srli a8, a8, 8 -; XTENSA-NEXT: add a2, a9, a8 +; XTENSA-NEXT: l32r a9, .LCPI13_3 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: add a8, a8, a9 +; XTENSA-NEXT: extui a2, a8, 0, 6 ; XTENSA-NEXT: ret %1 = call i16 @llvm.ctpop.i16(i16 %a) ret i16 %1 @@ -503,13 +555,11 @@ define i32 @test_ctpop_i32(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI14_2 ; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: slli a9, a8, 8 +; XTENSA-NEXT: srli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: slli a9, a8, 16 +; XTENSA-NEXT: extui a9, a8, 16, 5 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: movi a9, 24 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a2, a8 +; XTENSA-NEXT: extui a2, a8, 0, 6 ; XTENSA-NEXT: ret %1 = call i32 @llvm.ctpop.i32(i32 %a) ret i32 %1 diff --git a/llvm/test/CodeGen/Xtensa/mul.ll b/llvm/test/CodeGen/Xtensa/mul.ll index 2a96f6ce52690..08b4b1f57166a 100644 --- a/llvm/test/CodeGen/Xtensa/mul.ll +++ b/llvm/test/CodeGen/Xtensa/mul.ll @@ -658,3 +658,14 @@ define signext i32 @mul_imm_16384(i32 %a) nounwind { %1 = mul i32 %a, 16384 ret i32 %1 } + +define <4 x i32> @mul_vec_splat_constant(<4 x i32> %a) { +; XTENSA-LABEL: mul_vec_splat_constant: +; XTENSA: slli a2, a2, 2 +; XTENSA-NEXT: slli a3, a3, 2 +; XTENSA-NEXT: slli a4, a4, 2 +; XTENSA-NEXT: slli a5, a5, 2 +; XTENSA-NEXT: ret + %mul = mul <4 x i32> %a, + ret <4 x i32> %mul +} From f810f7983ee2240f365bc1b41f638f98b56b594f Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Sun, 11 Aug 2024 13:01:30 +0300 Subject: [PATCH 006/289] [Xtensa] Move SHL/SRL/SRA/SRC lowering from ISelLowering to ISelDagToDag. Remove SHL/SRL/SRA/SRC pseudo operations. Remove redundant cttz/ctlz/ctpop tests. Remove Xtensa MachineFunctionInfo implementation. --- llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp | 63 +++ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 60 --- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 24 - .../Target/Xtensa/XtensaMachineFunctionInfo.h | 53 --- .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 7 - llvm/lib/Target/Xtensa/XtensaTargetMachine.h | 4 - llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll | 429 +----------------- 7 files changed, 84 insertions(+), 556 deletions(-) delete mode 100644 llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp index 145f285036358..869db1c7fcd5a 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -125,6 +125,7 @@ FunctionPass *llvm::createXtensaISelDag(XtensaTargetMachine &TM, void XtensaDAGToDAGISel::Select(SDNode *Node) { SDLoc DL(Node); + EVT VT = Node->getValueType(0); // If we have a custom node, we already have selected! if (Node->isMachineOpcode()) { @@ -132,5 +133,67 @@ void XtensaDAGToDAGISel::Select(SDNode *Node) { return; } + switch (Node->getOpcode()) { + case ISD::SHL: { + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + if (!isa(N1)) { + SDNode *SSL = CurDAG->getMachineNode(Xtensa::SSL, DL, MVT::Glue, N1); + SDNode *SLL = + CurDAG->getMachineNode(Xtensa::SLL, DL, VT, N0, SDValue(SSL, 0)); + ReplaceNode(Node, SLL); + return; + } + break; + } + case ISD::SRL: { + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + auto *C = dyn_cast(N1); + // If C is constant in range [0..15] then we can generate SRLI + // instruction using pattern matching, otherwise generate SRL + if (!C || !isUInt<4>(C->getZExtValue())) { + SDNode *SSR = CurDAG->getMachineNode(Xtensa::SSR, DL, MVT::Glue, N1); + SDNode *SRL = + CurDAG->getMachineNode(Xtensa::SRL, DL, VT, N0, SDValue(SSR, 0)); + ReplaceNode(Node, SRL); + return; + } + break; + } + case ISD::SRA: { + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + if (!isa(N1)) { + SDNode *SSR = CurDAG->getMachineNode(Xtensa::SSR, DL, MVT::Glue, N1); + SDNode *SRA = + CurDAG->getMachineNode(Xtensa::SRA, DL, VT, N0, SDValue(SSR, 0)); + ReplaceNode(Node, SRA); + return; + } + break; + } + case XtensaISD::SRCL: { + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + SDValue N2 = Node->getOperand(2); + SDNode *SSL = CurDAG->getMachineNode(Xtensa::SSL, DL, MVT::Glue, N2); + SDNode *SRC = + CurDAG->getMachineNode(Xtensa::SRC, DL, VT, N0, N1, SDValue(SSL, 0)); + ReplaceNode(Node, SRC); + return; + } + case XtensaISD::SRCR: { + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + SDValue N2 = Node->getOperand(2); + SDNode *SSR = CurDAG->getMachineNode(Xtensa::SSR, DL, MVT::Glue, N2); + SDNode *SRC = + CurDAG->getMachineNode(Xtensa::SRC, DL, VT, N0, N1, SDValue(SSR, 0)); + ReplaceNode(Node, SRC); + return; + } + } + SelectCode(Node); } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 5dbaa335f99fa..11827021db32f 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -13,7 +13,6 @@ #include "XtensaISelLowering.h" #include "XtensaConstantPoolValue.h" -#include "XtensaMachineFunctionInfo.h" #include "XtensaSubtarget.h" #include "XtensaTargetMachine.h" #include "llvm/CodeGen/CallingConvLower.h" @@ -1040,70 +1039,11 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI, MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *MBB) const { - const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); DebugLoc DL = MI.getDebugLoc(); switch (MI.getOpcode()) { case Xtensa::SELECT: return emitSelectCC(MI, MBB); - case Xtensa::SHL_P: { - MachineOperand &R = MI.getOperand(0); - MachineOperand &S = MI.getOperand(1); - MachineOperand &SA = MI.getOperand(2); - - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSL)).addReg(SA.getReg()); - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SLL), R.getReg()).addReg(S.getReg()); - MI.eraseFromParent(); - return MBB; - } - case Xtensa::SRA_P: { - MachineOperand &R = MI.getOperand(0); - MachineOperand &T = MI.getOperand(1); - MachineOperand &SA = MI.getOperand(2); - - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSR)).addReg(SA.getReg()); - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRA), R.getReg()).addReg(T.getReg()); - MI.eraseFromParent(); - return MBB; - } - case Xtensa::SRL_P: { - MachineOperand &R = MI.getOperand(0); - MachineOperand &T = MI.getOperand(1); - MachineOperand &SA = MI.getOperand(2); - - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSR)).addReg(SA.getReg()); - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRL), R.getReg()).addReg(T.getReg()); - MI.eraseFromParent(); - return MBB; - } - case Xtensa::SRCL_P: { - MachineOperand &R = MI.getOperand(0); - MachineOperand &HI = MI.getOperand(1); - MachineOperand &LO = MI.getOperand(2); - MachineOperand &SA = MI.getOperand(3); - - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSL)).addReg(SA.getReg()); - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRC), R.getReg()) - .addReg(HI.getReg()) - .addReg(LO.getReg()); - ; - MI.eraseFromParent(); - return MBB; - } - case Xtensa::SRCR_P: { - MachineOperand &R = MI.getOperand(0); - MachineOperand &HI = MI.getOperand(1); - MachineOperand &LO = MI.getOperand(2); - MachineOperand &SA = MI.getOperand(3); - - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SSR)).addReg(SA.getReg()); - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRC), R.getReg()) - .addReg(HI.getReg()) - .addReg(LO.getReg()); - ; - MI.eraseFromParent(); - return MBB; - } default: llvm_unreachable("Unexpected instr type to insert"); } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index a4c6d62f85769..0d01864b54bc3 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -190,30 +190,6 @@ def SSAI : RRR_Inst<0x00, 0x00, 0x04, (outs), (ins uimm5:$imm), let t{0} = imm{4}; } -// Shift Pseudo instructions: -// SSL/SSR + Shift combination -let usesCustomInserter = 1 in { - def SHL_P : Pseudo<(outs AR:$r), (ins AR:$s, AR:$sa), - "# SHL_P $r, $s, $sa", - [(set i32:$r, (shl i32:$s, i32:$sa))]>; - - def SRA_P : Pseudo<(outs AR:$r), (ins AR:$t, AR:$sa), - "# SRA_P $r, $t, $sa", - [(set i32:$r, (sra i32:$t, i32:$sa))]>; - - def SRL_P : Pseudo<(outs AR:$r), (ins AR:$t, AR:$sa), - "# SRL_P $r, $t, $sa", - [(set i32:$r, (srl i32:$t, i32:$sa))]>; - - def SRCL_P : Pseudo<(outs AR:$r), (ins AR:$hi, AR:$lo, AR:$sa), - "# SRCL_P $r, $hi, $lo, $sa", - [(set i32:$r, (Xtensa_srcl i32:$hi, i32:$lo, i32:$sa))]>; - - def SRCR_P : Pseudo<(outs AR:$r), (ins AR:$hi, AR:$lo, AR:$sa), - "# SRCR_P $r, $hi, $lo, $sa", - [(set i32:$r, (Xtensa_srcr i32:$hi, i32:$lo, i32:$sa))]>; -} - //===----------------------------------------------------------------------===// // Load and store instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h deleted file mode 100644 index 86ee81128c34c..0000000000000 --- a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h +++ /dev/null @@ -1,53 +0,0 @@ -//==- XtensaMachineFunctionInfo.h - Xtensa machine function info --*- C++ -*-=// -// -// The LLVM Compiler Infrastructure -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file declares Xtensa-specific per-machine-function information. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H -#define LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H - -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineFunction.h" -#include "llvm/Target/TargetMachine.h" - -namespace llvm { - -class XtensaFunctionInfo : public MachineFunctionInfo { - unsigned VarArgsFirstGPR; - int VarArgsStackOffset; - unsigned VarArgsFrameIndex; - bool SaveFrameRegister = false; - unsigned LabelUId = 0; - -public: - explicit XtensaFunctionInfo(const Function &F, const TargetSubtargetInfo *STI) - : VarArgsFirstGPR(0), VarArgsStackOffset(0), VarArgsFrameIndex(0) {} - - unsigned getVarArgsFirstGPR() const { return VarArgsFirstGPR; } - void setVarArgsFirstGPR(unsigned GPR) { VarArgsFirstGPR = GPR; } - - int getVarArgsStackOffset() const { return VarArgsStackOffset; } - void setVarArgsStackOffset(int Offset) { VarArgsStackOffset = Offset; } - - // Get and set the frame index of the first stack vararg. - unsigned getVarArgsFrameIndex() const { return VarArgsFrameIndex; } - void setVarArgsFrameIndex(unsigned FI) { VarArgsFrameIndex = FI; } - - bool isSaveFrameRegister() const { return SaveFrameRegister; } - void setSaveFrameRegister() { SaveFrameRegister = true; } - - unsigned createLabelUId() { return LabelUId++; } -}; - -} // namespace llvm - -#endif /* LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index eba169a2fe7a9..49c7faf84df1d 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -14,7 +14,6 @@ #include "XtensaTargetMachine.h" #include "TargetInfo/XtensaTargetInfo.h" -#include "XtensaMachineFunctionInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" #include "llvm/CodeGen/TargetPassConfig.h" @@ -84,12 +83,6 @@ XtensaTargetMachine::getSubtargetImpl(const Function &F) const { return I.get(); } -MachineFunctionInfo *XtensaTargetMachine::createMachineFunctionInfo( - BumpPtrAllocator &Allocator, const Function &F, - const TargetSubtargetInfo *STI) const { - return XtensaFunctionInfo::create(Allocator, F, STI); -} - namespace { /// Xtensa Code Generator Pass Configuration Options. class XtensaPassConfig : public TargetPassConfig { diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.h b/llvm/lib/Target/Xtensa/XtensaTargetMachine.h index 6975076b5d699..f371f22ed3d0e 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.h +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.h @@ -45,10 +45,6 @@ class XtensaTargetMachine : public LLVMTargetMachine { return TLOF.get(); } - MachineFunctionInfo * - createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, - const TargetSubtargetInfo *STI) const override; - protected: mutable StringMap> SubtargetMap; }; diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll index c1e590484e717..83dbd1265a0fb 100644 --- a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll @@ -1,31 +1,19 @@ ; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s -declare i8 @llvm.cttz.i8(i8, i1) -declare i16 @llvm.cttz.i16(i16, i1) declare i32 @llvm.cttz.i32(i32, i1) -declare i64 @llvm.cttz.i64(i64, i1) -declare i8 @llvm.ctlz.i8(i8, i1) -declare i16 @llvm.ctlz.i16(i16, i1) declare i32 @llvm.ctlz.i32(i32, i1) -declare i64 @llvm.ctlz.i64(i64, i1) -declare i8 @llvm.ctpop.i8(i8) -declare i16 @llvm.ctpop.i16(i16) declare i32 @llvm.ctpop.i32(i32) -declare i64 @llvm.ctpop.i64(i64) -define i8 @test_cttz_i8(i8 %a) nounwind { -; XTENSA-LABEL: test_cttz_i8: -; XTENSA: movi a9, 255 -; XTENSA-NEXT: and a10, a2, a9 -; XTENSA-NEXT: movi a8, 8 -; XTENSA-NEXT: beqz a10, .LBB0_2 +define i32 @test_cttz_i32(i32 %a) nounwind { +; XTENSA-LABEL: test_cttz_i32: +; XTENSA: movi a8, 32 +; XTENSA-NEXT: beqz a2, .LBB0_2 ; XTENSA-NEXT: j .LBB0_1 ; XTENSA-NEXT: .LBB0_1: # %cond.false ; XTENSA-NEXT: movi a8, -1 ; XTENSA-NEXT: xor a8, a2, a8 -; XTENSA-NEXT: addi a10, a2, -1 -; XTENSA-NEXT: and a8, a8, a10 +; XTENSA-NEXT: addi a9, a2, -1 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 ; XTENSA-NEXT: l32r a10, .LCPI0_0 @@ -47,146 +35,11 @@ define i8 @test_cttz_i8(i8 %a) nounwind { ; XTENSA-NEXT: extui a8, a8, 0, 6 ; XTENSA-NEXT: .LBB0_2: # %cond.end ; XTENSA-NEXT: or a2, a8, a8 -; XTENSA-NEXT: ret - %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false) - ret i8 %tmp -} - -define i16 @test_cttz_i16(i16 %a) nounwind { -; XTENSA-LABEL: test_cttz_i16: -; XTENSA: l32r a9, .LCPI1_0 -; XTENSA-NEXT: and a10, a2, a9 -; XTENSA-NEXT: movi a8, 16 -; XTENSA-NEXT: beqz a10, .LBB1_2 -; XTENSA-NEXT: j .LBB1_1 -; XTENSA-NEXT: .LBB1_1: # %cond.false -; XTENSA-NEXT: movi a8, -1 -; XTENSA-NEXT: xor a8, a2, a8 -; XTENSA-NEXT: addi a10, a2, -1 -; XTENSA-NEXT: and a8, a8, a10 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI1_1 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI1_2 -; XTENSA-NEXT: and a10, a8, a9 -; XTENSA-NEXT: srli a8, a8, 2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: add a8, a10, a8 -; XTENSA-NEXT: srli a9, a8, 4 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI1_3 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a8, a8, 0, 6 -; XTENSA-NEXT: .LBB1_2: # %cond.end -; XTENSA-NEXT: or a2, a8, a8 -; XTENSA-NEXT: ret - %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false) - ret i16 %tmp -} - -define i32 @test_cttz_i32(i32 %a) nounwind { -; XTENSA-LABEL: test_cttz_i32: -; XTENSA: movi a8, 32 -; XTENSA-NEXT: beqz a2, .LBB2_2 -; XTENSA-NEXT: j .LBB2_1 -; XTENSA-NEXT: .LBB2_1: # %cond.false -; XTENSA-NEXT: movi a8, -1 -; XTENSA-NEXT: xor a8, a2, a8 -; XTENSA-NEXT: addi a9, a2, -1 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI2_0 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI2_1 -; XTENSA-NEXT: and a10, a8, a9 -; XTENSA-NEXT: srli a8, a8, 2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: add a8, a10, a8 -; XTENSA-NEXT: srli a9, a8, 4 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI2_2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a8, a8, 0, 6 -; XTENSA-NEXT: .LBB2_2: # %cond.end -; XTENSA-NEXT: or a2, a8, a8 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) ret i32 %tmp } -define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind { -; XTENSA-LABEL: test_cttz_i8_zero_undef: -; XTENSA: movi a8, -1 -; XTENSA-NEXT: xor a8, a2, a8 -; XTENSA-NEXT: addi a9, a2, -1 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: movi a9, 255 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI3_0 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI3_1 -; XTENSA-NEXT: and a10, a8, a9 -; XTENSA-NEXT: srli a8, a8, 2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: add a8, a10, a8 -; XTENSA-NEXT: srli a9, a8, 4 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI3_2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a2, a8, 0, 6 -; XTENSA-NEXT: ret - %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true) - ret i8 %tmp -} - -define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind { -; XTENSA-LABEL: test_cttz_i16_zero_undef: -; XTENSA: movi a8, -1 -; XTENSA-NEXT: xor a8, a2, a8 -; XTENSA-NEXT: addi a9, a2, -1 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI4_0 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI4_1 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI4_2 -; XTENSA-NEXT: and a10, a8, a9 -; XTENSA-NEXT: srli a8, a8, 2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: add a8, a10, a8 -; XTENSA-NEXT: srli a9, a8, 4 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI4_3 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a2, a8, 0, 6 -; XTENSA-NEXT: ret - %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true) - ret i16 %tmp -} - define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-LABEL: test_cttz_i32_zero_undef: ; XTENSA: movi a8, -1 @@ -194,17 +47,17 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-NEXT: addi a9, a2, -1 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI5_0 +; XTENSA-NEXT: l32r a10, .LCPI1_0 ; XTENSA-NEXT: and a9, a9, a10 ; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI5_1 +; XTENSA-NEXT: l32r a9, .LCPI1_1 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI5_2 +; XTENSA-NEXT: l32r a9, .LCPI1_2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 @@ -216,113 +69,13 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ret i32 %tmp } -define i8 @test_ctlz_i8(i8 %a) nounwind { -; XTENSA-LABEL: test_ctlz_i8: -; XTENSA: movi a9, 255 -; XTENSA-NEXT: and a10, a2, a9 -; XTENSA-NEXT: movi a8, 8 -; XTENSA-NEXT: beqz a10, .LBB6_2 -; XTENSA-NEXT: j .LBB6_1 -; XTENSA-NEXT: .LBB6_1: # %cond.false -; XTENSA-NEXT: movi a8, 254 -; XTENSA-NEXT: and a8, a2, a8 -; XTENSA-NEXT: srli a8, a8, 1 -; XTENSA-NEXT: or a8, a2, a8 -; XTENSA-NEXT: movi a10, 252 -; XTENSA-NEXT: and a10, a8, a10 -; XTENSA-NEXT: srli a10, a10, 2 -; XTENSA-NEXT: or a8, a8, a10 -; XTENSA-NEXT: movi a10, 240 -; XTENSA-NEXT: and a10, a8, a10 -; XTENSA-NEXT: srli a10, a10, 4 -; XTENSA-NEXT: or a8, a8, a10 -; XTENSA-NEXT: movi a10, -1 -; XTENSA-NEXT: xor a8, a8, a10 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI6_0 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI6_1 -; XTENSA-NEXT: and a10, a8, a9 -; XTENSA-NEXT: srli a8, a8, 2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: add a8, a10, a8 -; XTENSA-NEXT: srli a9, a8, 4 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI6_2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a8, a8, 0, 6 -; XTENSA-NEXT: .LBB6_2: # %cond.end -; XTENSA-NEXT: or a2, a8, a8 -; XTENSA-NEXT: ret - %tmp = call i8 @llvm.ctlz.i8(i8 %a, i1 false) - ret i8 %tmp -} - -define i16 @test_ctlz_i16(i16 %a) nounwind { -; XTENSA-LABEL: test_ctlz_i16: -; XTENSA: l32r a9, .LCPI7_0 -; XTENSA-NEXT: and a10, a2, a9 -; XTENSA-NEXT: movi a8, 16 -; XTENSA-NEXT: beqz a10, .LBB7_2 -; XTENSA-NEXT: j .LBB7_1 -; XTENSA-NEXT: .LBB7_1: # %cond.false -; XTENSA-NEXT: l32r a8, .LCPI7_1 -; XTENSA-NEXT: and a8, a2, a8 -; XTENSA-NEXT: srli a8, a8, 1 -; XTENSA-NEXT: or a8, a2, a8 -; XTENSA-NEXT: l32r a10, .LCPI7_2 -; XTENSA-NEXT: and a10, a8, a10 -; XTENSA-NEXT: srli a10, a10, 2 -; XTENSA-NEXT: or a8, a8, a10 -; XTENSA-NEXT: l32r a10, .LCPI7_3 -; XTENSA-NEXT: and a10, a8, a10 -; XTENSA-NEXT: srli a10, a10, 4 -; XTENSA-NEXT: or a8, a8, a10 -; XTENSA-NEXT: l32r a10, .LCPI7_4 -; XTENSA-NEXT: and a10, a8, a10 -; XTENSA-NEXT: srli a10, a10, 8 -; XTENSA-NEXT: or a8, a8, a10 -; XTENSA-NEXT: movi a10, -1 -; XTENSA-NEXT: xor a8, a8, a10 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI7_5 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI7_6 -; XTENSA-NEXT: and a10, a8, a9 -; XTENSA-NEXT: srli a8, a8, 2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: add a8, a10, a8 -; XTENSA-NEXT: srli a9, a8, 4 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI7_7 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a8, a8, 0, 6 -; XTENSA-NEXT: .LBB7_2: # %cond.end -; XTENSA-NEXT: or a2, a8, a8 -; XTENSA-NEXT: ret - %tmp = call i16 @llvm.ctlz.i16(i16 %a, i1 false) - ret i16 %tmp -} - define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_ctlz_i32: ; XTENSA: or a8, a2, a2 ; XTENSA-NEXT: movi a2, 32 -; XTENSA-NEXT: beqz a8, .LBB8_2 -; XTENSA-NEXT: j .LBB8_1 -; XTENSA-NEXT: .LBB8_1: # %cond.false +; XTENSA-NEXT: beqz a8, .LBB2_2 +; XTENSA-NEXT: j .LBB2_1 +; XTENSA-NEXT: .LBB2_1: # %cond.false ; XTENSA-NEXT: srli a9, a8, 1 ; XTENSA-NEXT: or a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 2 @@ -338,115 +91,29 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-NEXT: movi a9, -1 ; XTENSA-NEXT: xor a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI8_0 +; XTENSA-NEXT: l32r a10, .LCPI2_0 ; XTENSA-NEXT: and a9, a9, a10 ; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI8_1 +; XTENSA-NEXT: l32r a9, .LCPI2_1 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI8_2 +; XTENSA-NEXT: l32r a9, .LCPI2_2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: extui a9, a8, 16, 5 ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: extui a2, a8, 0, 6 -; XTENSA-NEXT: .LBB8_2: # %cond.end +; XTENSA-NEXT: .LBB2_2: # %cond.end ; XTENSA-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) ret i32 %tmp } -define i8 @test_ctlz_i8_zero_undef(i8 %a) nounwind { -; XTENSA-LABEL: test_ctlz_i8_zero_undef: -; XTENSA: movi a8, 254 -; XTENSA-NEXT: and a8, a2, a8 -; XTENSA-NEXT: srli a8, a8, 1 -; XTENSA-NEXT: or a8, a2, a8 -; XTENSA-NEXT: movi a9, 252 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: srli a9, a9, 2 -; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: movi a9, 240 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: srli a9, a9, 4 -; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: movi a9, -1 -; XTENSA-NEXT: xor a8, a8, a9 -; XTENSA-NEXT: movi a9, 255 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI9_0 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI9_1 -; XTENSA-NEXT: and a10, a8, a9 -; XTENSA-NEXT: srli a8, a8, 2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: add a8, a10, a8 -; XTENSA-NEXT: srli a9, a8, 4 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI9_2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a2, a8, 0, 6 -; XTENSA-NEXT: ret - %tmp = call i8 @llvm.ctlz.i8(i8 %a, i1 true) - ret i8 %tmp -} - -define i16 @test_ctlz_i16_zero_undef(i16 %a) nounwind { -; XTENSA-LABEL: test_ctlz_i16_zero_undef: -; XTENSA: l32r a8, .LCPI10_0 -; XTENSA-NEXT: and a8, a2, a8 -; XTENSA-NEXT: srli a8, a8, 1 -; XTENSA-NEXT: or a8, a2, a8 -; XTENSA-NEXT: l32r a9, .LCPI10_1 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: srli a9, a9, 2 -; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI10_2 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: srli a9, a9, 4 -; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI10_3 -; XTENSA-NEXT: and a9, a8, a9 -; XTENSA-NEXT: srli a9, a9, 8 -; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: movi a9, -1 -; XTENSA-NEXT: xor a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI10_4 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI10_5 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI10_6 -; XTENSA-NEXT: and a10, a8, a9 -; XTENSA-NEXT: srli a8, a8, 2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: add a8, a10, a8 -; XTENSA-NEXT: srli a9, a8, 4 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI10_7 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a2, a8, 0, 6 -; XTENSA-NEXT: ret - %tmp = call i16 @llvm.ctlz.i16(i16 %a, i1 true) - ret i16 %tmp -} - define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-LABEL: test_ctlz_i32_zero_undef: ; XTENSA: srli a8, a2, 1 @@ -464,17 +131,17 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-NEXT: movi a9, -1 ; XTENSA-NEXT: xor a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI11_0 +; XTENSA-NEXT: l32r a10, .LCPI3_0 ; XTENSA-NEXT: and a9, a9, a10 ; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI11_1 +; XTENSA-NEXT: l32r a9, .LCPI3_1 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI11_2 +; XTENSA-NEXT: l32r a9, .LCPI3_2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 @@ -486,74 +153,20 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { ret i32 %tmp } -define i8 @test_ctpop_i8(i8 %a) nounwind { -; XTENSA-LABEL: test_ctpop_i8: -; XTENSA: movi a8, 255 -; XTENSA-NEXT: and a8, a2, a8 -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI12_0 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI12_1 -; XTENSA-NEXT: and a10, a8, a9 -; XTENSA-NEXT: srli a8, a8, 2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: add a8, a10, a8 -; XTENSA-NEXT: srli a9, a8, 4 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI12_2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a2, a8, 0, 6 -; XTENSA-NEXT: ret - %1 = call i8 @llvm.ctpop.i8(i8 %a) - ret i8 %1 -} - -define i16 @test_ctpop_i16(i16 %a) nounwind { -; XTENSA-LABEL: test_ctpop_i16: -; XTENSA: l32r a8, .LCPI13_0 -; XTENSA-NEXT: and a8, a2, a8 -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: l32r a10, .LCPI13_1 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: sub a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI13_2 -; XTENSA-NEXT: and a10, a8, a9 -; XTENSA-NEXT: srli a8, a8, 2 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: add a8, a10, a8 -; XTENSA-NEXT: srli a9, a8, 4 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI13_3 -; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 -; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a2, a8, 0, 6 -; XTENSA-NEXT: ret - %1 = call i16 @llvm.ctpop.i16(i16 %a) - ret i16 %1 -} - define i32 @test_ctpop_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_ctpop_i32: ; XTENSA: srli a8, a2, 1 -; XTENSA-NEXT: l32r a9, .LCPI14_0 +; XTENSA-NEXT: l32r a9, .LCPI4_0 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: sub a8, a2, a8 -; XTENSA-NEXT: l32r a9, .LCPI14_1 +; XTENSA-NEXT: l32r a9, .LCPI4_1 ; XTENSA-NEXT: and a10, a8, a9 ; XTENSA-NEXT: srli a8, a8, 2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a10, a8 ; XTENSA-NEXT: srli a9, a8, 4 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: l32r a9, .LCPI14_2 +; XTENSA-NEXT: l32r a9, .LCPI4_2 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 From 0414d44c08cfac0f5bae810d7e70657d38bdd1fd Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 12 Aug 2024 02:04:37 +0300 Subject: [PATCH 007/289] [Xtensa] Minor fixes --- llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp | 10 ++++++++-- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 15 +-------------- llvm/test/CodeGen/Xtensa/bswap.ll | 1 + llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll | 1 + llvm/test/CodeGen/Xtensa/div.ll | 1 + llvm/test/CodeGen/Xtensa/mul.ll | 1 + llvm/test/CodeGen/Xtensa/rotl-rotr.ll | 1 + llvm/test/CodeGen/Xtensa/shift.ll | 1 + 8 files changed, 15 insertions(+), 16 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp index 869db1c7fcd5a..9f9fac26272de 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -137,7 +137,10 @@ void XtensaDAGToDAGISel::Select(SDNode *Node) { case ISD::SHL: { SDValue N0 = Node->getOperand(0); SDValue N1 = Node->getOperand(1); - if (!isa(N1)) { + auto *C = dyn_cast(N1); + // If C is constant in range [1..31] then we can generate SLLI + // instruction using pattern matching, otherwise generate SLL + if (!C || !(isUInt<5>(C->getZExtValue()) && !C->isZero())) { SDNode *SSL = CurDAG->getMachineNode(Xtensa::SSL, DL, MVT::Glue, N1); SDNode *SLL = CurDAG->getMachineNode(Xtensa::SLL, DL, VT, N0, SDValue(SSL, 0)); @@ -164,7 +167,10 @@ void XtensaDAGToDAGISel::Select(SDNode *Node) { case ISD::SRA: { SDValue N0 = Node->getOperand(0); SDValue N1 = Node->getOperand(1); - if (!isa(N1)) { + auto *C = dyn_cast(N1); + // If C is constant in range [0..31] then we can generate SRAI + // instruction using pattern matching, otherwise generate SRA + if (!C || !isUInt<5>(C->getZExtValue())) { SDNode *SSR = CurDAG->getMachineNode(Xtensa::SSR, DL, MVT::Glue, N1); SDNode *SRA = CurDAG->getMachineNode(Xtensa::SRA, DL, VT, N0, SDValue(SSR, 0)); diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 11827021db32f..61ddfb3f2641e 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -765,17 +765,12 @@ SDValue XtensaTargetLowering::LowerShiftLeftParts(SDValue Op, DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize); SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); - SDValue HiTrue = DAG.getNode(XtensaISD::SRCL, DL, VT, Hi, Lo, Shamt); - SDValue Zero = DAG.getConstant(0, DL, VT); - SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusRegisterSize); SDValue Cond = DAG.getSetCC(DL, VT, ShamtMinusRegisterSize, Zero, ISD::SETLT); - Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, LoTrue, Zero); - Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, HiTrue, HiFalse); return DAG.getMergeValues({Lo, Hi}, DL); @@ -806,21 +801,16 @@ SDValue XtensaTargetLowering::LowerShiftRightParts(SDValue Op, // Hi = 0; unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; - SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT); SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT); SDValue ShamtMinusRegisterSize = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize); SDValue LoTrue = DAG.getNode(XtensaISD::SRCR, DL, VT, Hi, Lo, Shamt); - SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); - SDValue Zero = DAG.getConstant(0, DL, VT); - SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusRegisterSize); - SDValue HiFalse; if (IsSRA) { @@ -830,13 +820,10 @@ SDValue XtensaTargetLowering::LowerShiftRightParts(SDValue Op, } SDValue Cond = DAG.getSetCC(DL, VT, ShamtMinusRegisterSize, Zero, ISD::SETLT); - Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, LoTrue, LoFalse); - Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, HiTrue, HiFalse); - SDValue Ops[2] = {Lo, Hi}; - return DAG.getMergeValues(Ops, DL); + return DAG.getMergeValues({Lo, Hi}, DL); } SDValue XtensaTargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const { diff --git a/llvm/test/CodeGen/Xtensa/bswap.ll b/llvm/test/CodeGen/Xtensa/bswap.ll index e4458c7cf81c3..9f52de87236a1 100644 --- a/llvm/test/CodeGen/Xtensa/bswap.ll +++ b/llvm/test/CodeGen/Xtensa/bswap.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll index 83dbd1265a0fb..5494e5568906e 100644 --- a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s diff --git a/llvm/test/CodeGen/Xtensa/div.ll b/llvm/test/CodeGen/Xtensa/div.ll index fcb58eb5bff53..883178acdf69a 100644 --- a/llvm/test/CodeGen/Xtensa/div.ll +++ b/llvm/test/CodeGen/Xtensa/div.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s diff --git a/llvm/test/CodeGen/Xtensa/mul.ll b/llvm/test/CodeGen/Xtensa/mul.ll index 08b4b1f57166a..9b13897293dc1 100644 --- a/llvm/test/CodeGen/Xtensa/mul.ll +++ b/llvm/test/CodeGen/Xtensa/mul.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s diff --git a/llvm/test/CodeGen/Xtensa/rotl-rotr.ll b/llvm/test/CodeGen/Xtensa/rotl-rotr.ll index 1dc52fbc94b41..350315e9aefda 100644 --- a/llvm/test/CodeGen/Xtensa/rotl-rotr.ll +++ b/llvm/test/CodeGen/Xtensa/rotl-rotr.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s diff --git a/llvm/test/CodeGen/Xtensa/shift.ll b/llvm/test/CodeGen/Xtensa/shift.ll index 85973e26c2ef4..2b92b8032b626 100644 --- a/llvm/test/CodeGen/Xtensa/shift.ll +++ b/llvm/test/CodeGen/Xtensa/shift.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ ; RUN: | FileCheck %s From f05cf16d51d6314d7c81b1c7166d426a3e6a8901 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 12 Aug 2024 18:10:15 +0300 Subject: [PATCH 008/289] [Xtensa] Imrpove CTPOP lowering. Minor fixes int SHL/SRC/SRA operations selection. --- llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp | 29 ++-- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 43 +----- llvm/test/CodeGen/Xtensa/bswap.ll | 128 ++++++++---------- llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll | 38 +++--- llvm/test/CodeGen/Xtensa/div.ll | 12 +- 5 files changed, 100 insertions(+), 150 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp index 9f9fac26272de..06a04ace59b0d 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -153,24 +153,33 @@ void XtensaDAGToDAGISel::Select(SDNode *Node) { SDValue N0 = Node->getOperand(0); SDValue N1 = Node->getOperand(1); auto *C = dyn_cast(N1); - // If C is constant in range [0..15] then we can generate SRLI - // instruction using pattern matching, otherwise generate SRL - if (!C || !isUInt<4>(C->getZExtValue())) { - SDNode *SSR = CurDAG->getMachineNode(Xtensa::SSR, DL, MVT::Glue, N1); - SDNode *SRL = - CurDAG->getMachineNode(Xtensa::SRL, DL, VT, N0, SDValue(SSR, 0)); - ReplaceNode(Node, SRL); + + // If C is constant then we can generate SRLI + // instruction using pattern matching or EXTUI, otherwise generate SRL + if (C) { + if (isUInt<4>(C->getZExtValue())) + break; + unsigned ShAmt = C->getZExtValue(); + SDNode *EXTUI = CurDAG->getMachineNode( + Xtensa::EXTUI, DL, VT, N0, CurDAG->getTargetConstant(ShAmt, DL, VT), + CurDAG->getTargetConstant(32 - ShAmt, DL, VT)); + ReplaceNode(Node, EXTUI); return; } - break; + + SDNode *SSR = CurDAG->getMachineNode(Xtensa::SSR, DL, MVT::Glue, N1); + SDNode *SRL = + CurDAG->getMachineNode(Xtensa::SRL, DL, VT, N0, SDValue(SSR, 0)); + ReplaceNode(Node, SRL); + return; } case ISD::SRA: { SDValue N0 = Node->getOperand(0); SDValue N1 = Node->getOperand(1); auto *C = dyn_cast(N1); - // If C is constant in range [0..31] then we can generate SRAI + // If C is constant then we can generate SRAI // instruction using pattern matching, otherwise generate SRA - if (!C || !isUInt<5>(C->getZExtValue())) { + if (!C) { SDNode *SSR = CurDAG->getMachineNode(Xtensa::SSR, DL, MVT::Glue, N1); SDNode *SRA = CurDAG->getMachineNode(Xtensa::SRA, DL, VT, N0, SDValue(SSR, 0)); diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 61ddfb3f2641e..c7675c2f50176 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -827,47 +827,8 @@ SDValue XtensaTargetLowering::LowerShiftRightParts(SDValue Op, } SDValue XtensaTargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const { - EVT VT = Op->getValueType(0); - SDValue Val = Op.getOperand(0); - SDLoc DL(Op); - - if (VT != MVT::i32) - return SDValue(); - - // CTPOP expansion: - // Val = (Val - (Val >> 1)) & 0x55555555 - // Val = ((Val >> 2) & 0x33333333) + (Val & 0x33333333) - // Val = ((Val >> 4) + Val) & 0x0f0f0f0f - // Val = (Val >> 8) + Val - // Val = (extract bits [16, 20] from Val) + Val - // Val = extract bits [0, 5] from Val - - SDValue Mask = DAG.getConstant(0x55555555, DL, VT); - SDValue Shift = - DAG.getNode(ISD::SRL, DL, VT, Val, DAG.getConstant(1, DL, VT)); - SDValue ShiftAndMask = DAG.getNode(ISD::AND, DL, VT, Shift, Mask); - Val = DAG.getNode(ISD::SUB, DL, VT, Val, ShiftAndMask); - - Mask = DAG.getConstant(0x33333333, DL, VT); - Shift = DAG.getNode(ISD::SRL, DL, VT, Val, DAG.getConstant(2, DL, VT)); - SDValue ValAndMask = DAG.getNode(ISD::AND, DL, VT, Val, Mask); - ShiftAndMask = DAG.getNode(ISD::AND, DL, VT, Shift, Mask); - Val = DAG.getNode(ISD::ADD, DL, VT, ValAndMask, ShiftAndMask); - - Mask = DAG.getConstant(0x0f0f0f0f, DL, VT); - Shift = DAG.getNode(ISD::SRL, DL, VT, Val, DAG.getConstant(4, DL, VT)); - Val = DAG.getNode(ISD::ADD, DL, VT, Val, Shift); - Val = DAG.getNode(ISD::AND, DL, VT, Val, Mask); - - Shift = DAG.getNode(ISD::SRL, DL, VT, Val, DAG.getConstant(8, DL, VT)); - Val = DAG.getNode(ISD::ADD, DL, VT, Val, Shift); - - Shift = DAG.getNode(XtensaISD::EXTUI, DL, VT, Val, - DAG.getConstant(16, DL, VT), DAG.getConstant(5, DL, VT)); - Val = DAG.getNode(ISD::ADD, DL, VT, Val, Shift); - - return DAG.getNode(XtensaISD::EXTUI, DL, VT, Val, DAG.getConstant(0, DL, VT), - DAG.getConstant(6, DL, VT)); + auto &TLI = DAG.getTargetLoweringInfo(); + return TLI.expandCTPOP(Op.getNode(), DAG); } bool XtensaTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, diff --git a/llvm/test/CodeGen/Xtensa/bswap.ll b/llvm/test/CodeGen/Xtensa/bswap.ll index 9f52de87236a1..6a87aa84351cf 100644 --- a/llvm/test/CodeGen/Xtensa/bswap.ll +++ b/llvm/test/CodeGen/Xtensa/bswap.ll @@ -24,14 +24,12 @@ define i16 @test_bswap_i16(i16 %a) nounwind { define i32 @test_bswap_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_bswap_i32: -; XTENSA: movi a8, 24 -; XTENSA-NEXT: ssr a8 -; XTENSA-NEXT: srl a8, a2 -; XTENSA-NEXT: srli a9, a2, 8 -; XTENSA-NEXT: l32r a10, .LCPI1_0 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: or a8, a9, a8 -; XTENSA-NEXT: and a9, a2, a10 +; XTENSA: srli a8, a2, 8 +; XTENSA-NEXT: l32r a9, .LCPI1_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: extui a10, a2, 24, 8 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: and a9, a2, a9 ; XTENSA-NEXT: slli a9, a9, 8 ; XTENSA-NEXT: slli a10, a2, 24 ; XTENSA-NEXT: or a9, a10, a9 @@ -43,28 +41,25 @@ define i32 @test_bswap_i32(i32 %a) nounwind { define i64 @test_bswap_i64(i64 %a) nounwind { ; XTENSA-LABEL: test_bswap_i64: -; XTENSA: movi a9, 24 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a8, a3 -; XTENSA-NEXT: srli a10, a3, 8 -; XTENSA-NEXT: l32r a11, .LCPI2_0 -; XTENSA-NEXT: and a10, a10, a11 -; XTENSA-NEXT: or a8, a10, a8 -; XTENSA-NEXT: and a10, a3, a11 +; XTENSA: srli a8, a3, 8 +; XTENSA-NEXT: l32r a9, .LCPI2_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: extui a10, a3, 24, 8 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: and a10, a3, a9 ; XTENSA-NEXT: slli a10, a10, 8 -; XTENSA-NEXT: slli a7, a3, 24 -; XTENSA-NEXT: or a10, a7, a10 +; XTENSA-NEXT: slli a11, a3, 24 +; XTENSA-NEXT: or a10, a11, a10 ; XTENSA-NEXT: or a8, a10, a8 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a9, a2 ; XTENSA-NEXT: srli a10, a2, 8 -; XTENSA-NEXT: and a10, a10, a11 -; XTENSA-NEXT: or a9, a10, a9 -; XTENSA-NEXT: and a10, a2, a11 -; XTENSA-NEXT: slli a10, a10, 8 +; XTENSA-NEXT: and a10, a10, a9 +; XTENSA-NEXT: extui a11, a2, 24, 8 +; XTENSA-NEXT: or a10, a10, a11 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: slli a9, a9, 8 ; XTENSA-NEXT: slli a11, a2, 24 -; XTENSA-NEXT: or a10, a11, a10 -; XTENSA-NEXT: or a3, a10, a9 +; XTENSA-NEXT: or a9, a11, a9 +; XTENSA-NEXT: or a3, a9, a10 ; XTENSA-NEXT: or a2, a8, a8 ; XTENSA-NEXT: ret %tmp = call i64 @llvm.bswap.i64(i64 %a) @@ -129,14 +124,12 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind { define i32 @test_bitreverse_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_bitreverse_i32: -; XTENSA: movi a8, 24 -; XTENSA-NEXT: ssr a8 -; XTENSA-NEXT: srl a8, a2 -; XTENSA-NEXT: srli a9, a2, 8 -; XTENSA-NEXT: l32r a10, .LCPI5_0 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: or a8, a9, a8 -; XTENSA-NEXT: and a9, a2, a10 +; XTENSA: srli a8, a2, 8 +; XTENSA-NEXT: l32r a9, .LCPI5_0 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: extui a10, a2, 24, 8 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: and a9, a2, a9 ; XTENSA-NEXT: slli a9, a9, 8 ; XTENSA-NEXT: slli a10, a2, 24 ; XTENSA-NEXT: or a9, a10, a9 @@ -166,45 +159,42 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind { define i64 @test_bitreverse_i64(i64 %a) nounwind { ; XTENSA-LABEL: test_bitreverse_i64: -; XTENSA: movi a10, 24 -; XTENSA-NEXT: ssr a10 -; XTENSA-NEXT: srl a8, a3 -; XTENSA-NEXT: srli a11, a3, 8 +; XTENSA: srli a8, a3, 8 ; XTENSA-NEXT: l32r a9, .LCPI6_0 -; XTENSA-NEXT: and a11, a11, a9 -; XTENSA-NEXT: or a8, a11, a8 -; XTENSA-NEXT: and a11, a3, a9 -; XTENSA-NEXT: slli a11, a11, 8 -; XTENSA-NEXT: slli a7, a3, 24 -; XTENSA-NEXT: or a11, a7, a11 -; XTENSA-NEXT: or a8, a11, a8 -; XTENSA-NEXT: srli a7, a8, 4 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: extui a10, a3, 24, 8 +; XTENSA-NEXT: or a8, a8, a10 +; XTENSA-NEXT: and a10, a3, a9 +; XTENSA-NEXT: slli a10, a10, 8 +; XTENSA-NEXT: slli a11, a3, 24 +; XTENSA-NEXT: or a10, a11, a10 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a8, 4 ; XTENSA-NEXT: l32r a11, .LCPI6_1 -; XTENSA-NEXT: and a7, a7, a11 +; XTENSA-NEXT: and a10, a10, a11 ; XTENSA-NEXT: and a8, a8, a11 ; XTENSA-NEXT: slli a8, a8, 4 -; XTENSA-NEXT: or a8, a7, a8 -; XTENSA-NEXT: srli a7, a8, 2 -; XTENSA-NEXT: l32r a6, .LCPI6_2 -; XTENSA-NEXT: and a7, a7, a6 -; XTENSA-NEXT: and a8, a8, a6 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a8, 2 +; XTENSA-NEXT: l32r a7, .LCPI6_2 +; XTENSA-NEXT: and a10, a10, a7 +; XTENSA-NEXT: and a8, a8, a7 ; XTENSA-NEXT: slli a8, a8, 2 -; XTENSA-NEXT: or a8, a7, a8 -; XTENSA-NEXT: srli a7, a8, 1 -; XTENSA-NEXT: l32r a5, .LCPI6_3 -; XTENSA-NEXT: and a7, a7, a5 -; XTENSA-NEXT: and a8, a8, a5 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a8, 1 +; XTENSA-NEXT: l32r a6, .LCPI6_3 +; XTENSA-NEXT: and a10, a10, a6 +; XTENSA-NEXT: and a8, a8, a6 ; XTENSA-NEXT: slli a8, a8, 1 -; XTENSA-NEXT: or a8, a7, a8 -; XTENSA-NEXT: ssr a10 -; XTENSA-NEXT: srl a10, a2 -; XTENSA-NEXT: srli a7, a2, 8 -; XTENSA-NEXT: and a7, a7, a9 -; XTENSA-NEXT: or a10, a7, a10 +; XTENSA-NEXT: or a8, a10, a8 +; XTENSA-NEXT: srli a10, a2, 8 +; XTENSA-NEXT: and a10, a10, a9 +; XTENSA-NEXT: extui a5, a2, 24, 8 +; XTENSA-NEXT: or a10, a10, a5 ; XTENSA-NEXT: and a9, a2, a9 ; XTENSA-NEXT: slli a9, a9, 8 -; XTENSA-NEXT: slli a7, a2, 24 -; XTENSA-NEXT: or a9, a7, a9 +; XTENSA-NEXT: slli a5, a2, 24 +; XTENSA-NEXT: or a9, a5, a9 ; XTENSA-NEXT: or a9, a9, a10 ; XTENSA-NEXT: srli a10, a9, 4 ; XTENSA-NEXT: and a10, a10, a11 @@ -212,13 +202,13 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind { ; XTENSA-NEXT: slli a9, a9, 4 ; XTENSA-NEXT: or a9, a10, a9 ; XTENSA-NEXT: srli a10, a9, 2 -; XTENSA-NEXT: and a10, a10, a6 -; XTENSA-NEXT: and a9, a9, a6 +; XTENSA-NEXT: and a10, a10, a7 +; XTENSA-NEXT: and a9, a9, a7 ; XTENSA-NEXT: slli a9, a9, 2 ; XTENSA-NEXT: or a9, a10, a9 ; XTENSA-NEXT: srli a10, a9, 1 -; XTENSA-NEXT: and a10, a10, a5 -; XTENSA-NEXT: and a9, a9, a5 +; XTENSA-NEXT: and a10, a10, a6 +; XTENSA-NEXT: and a9, a9, a6 ; XTENSA-NEXT: slli a9, a9, 1 ; XTENSA-NEXT: or a3, a10, a9 ; XTENSA-NEXT: or a2, a8, a8 diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll index 5494e5568906e..f58bed19d4ee7 100644 --- a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll @@ -29,11 +29,11 @@ define i32 @test_cttz_i32(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI0_2 ; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: slli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: slli a9, a8, 16 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a8, a8, 0, 6 +; XTENSA-NEXT: extui a8, a8, 24, 8 ; XTENSA-NEXT: .LBB0_2: # %cond.end ; XTENSA-NEXT: or a2, a8, a8 ; XTENSA-NEXT: ret @@ -60,11 +60,11 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI1_2 ; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: slli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: slli a9, a8, 16 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a2, a8, 0, 6 +; XTENSA-NEXT: extui a2, a8, 24, 8 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true) ret i32 %tmp @@ -85,9 +85,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-NEXT: or a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 8 ; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: movi a9, 16 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a9, a8 +; XTENSA-NEXT: extui a9, a8, 16, 16 ; XTENSA-NEXT: or a8, a8, a9 ; XTENSA-NEXT: movi a9, -1 ; XTENSA-NEXT: xor a8, a8, a9 @@ -104,11 +102,11 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI2_2 ; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: slli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: slli a9, a8, 16 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a2, a8, 0, 6 +; XTENSA-NEXT: extui a2, a8, 24, 8 ; XTENSA-NEXT: .LBB2_2: # %cond.end ; XTENSA-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) @@ -125,9 +123,7 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-NEXT: or a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 8 ; XTENSA-NEXT: or a8, a8, a9 -; XTENSA-NEXT: movi a9, 16 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a9, a8 +; XTENSA-NEXT: extui a9, a8, 16, 16 ; XTENSA-NEXT: or a8, a8, a9 ; XTENSA-NEXT: movi a9, -1 ; XTENSA-NEXT: xor a8, a8, a9 @@ -144,11 +140,11 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI3_2 ; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: slli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: slli a9, a8, 16 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a2, a8, 0, 6 +; XTENSA-NEXT: extui a2, a8, 24, 8 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 true) ret i32 %tmp @@ -169,11 +165,11 @@ define i32 @test_ctpop_i32(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: l32r a9, .LCPI4_2 ; XTENSA-NEXT: and a8, a8, a9 -; XTENSA-NEXT: srli a9, a8, 8 +; XTENSA-NEXT: slli a9, a8, 8 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a9, a8, 16, 5 +; XTENSA-NEXT: slli a9, a8, 16 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a2, a8, 0, 6 +; XTENSA-NEXT: extui a2, a8, 24, 8 ; XTENSA-NEXT: ret %1 = call i32 @llvm.ctpop.i32(i32 %a) ret i32 %1 diff --git a/llvm/test/CodeGen/Xtensa/div.ll b/llvm/test/CodeGen/Xtensa/div.ll index 883178acdf69a..e10e976fb1b38 100644 --- a/llvm/test/CodeGen/Xtensa/div.ll +++ b/llvm/test/CodeGen/Xtensa/div.ll @@ -451,9 +451,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind { define i32 @sdiv_pow2(i32 %a) nounwind { ; XTENSA-LABEL: sdiv_pow2: ; XTENSA: srai a8, a2, 31 -; XTENSA-NEXT: movi a9, 29 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a8, a8 +; XTENSA-NEXT: extui a8, a8, 29, 3 ; XTENSA-NEXT: add a8, a2, a8 ; XTENSA-NEXT: srai a2, a8, 3 ; XTENSA-NEXT: ret @@ -464,9 +462,7 @@ define i32 @sdiv_pow2(i32 %a) nounwind { define i32 @sdiv_pow2_2(i32 %a) nounwind { ; XTENSA-LABEL: sdiv_pow2_2: ; XTENSA: srai a8, a2, 31 -; XTENSA-NEXT: movi a9, 16 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a8, a8 +; XTENSA-NEXT: extui a8, a8, 16, 16 ; XTENSA-NEXT: add a8, a2, a8 ; XTENSA-NEXT: srai a2, a8, 16 ; XTENSA-NEXT: ret @@ -478,9 +474,7 @@ define i16 @sdiv16_pow2(i16 %a) nounwind { ; XTENSA-LABEL: sdiv16_pow2: ; XTENSA: slli a8, a2, 16 ; XTENSA-NEXT: srai a8, a8, 16 -; XTENSA-NEXT: movi a9, 28 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a8, a8 +; XTENSA-NEXT: extui a8, a8, 28, 4 ; XTENSA-NEXT: movi a9, 7 ; XTENSA-NEXT: and a8, a8, a9 ; XTENSA-NEXT: add a8, a2, a8 From a860cda4805ba1a3af22e9180067dc76b0972cd3 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 12 Aug 2024 21:03:32 +0300 Subject: [PATCH 009/289] [Xtensa] Fix shift tests. --- llvm/test/CodeGen/Xtensa/shift.ll | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/llvm/test/CodeGen/Xtensa/shift.ll b/llvm/test/CodeGen/Xtensa/shift.ll index 2b92b8032b626..729b66b12ab20 100644 --- a/llvm/test/CodeGen/Xtensa/shift.ll +++ b/llvm/test/CodeGen/Xtensa/shift.ll @@ -44,7 +44,7 @@ define i32 @lshr(i32 %x, i32 %y) nounwind { ret i32 %c } -define i32 @lshr_imm_1(i32 %x, i32 %y) nounwind { +define i32 @lshr_imm_1(i32 %x) nounwind { ; CHECK-LABEL: lshr_imm_1: ; CHECK: srli a2, a2, 1 ; CHECK-NEXT: ret @@ -52,7 +52,7 @@ define i32 @lshr_imm_1(i32 %x, i32 %y) nounwind { ret i32 %c } -define i32 @lshr_imm_15(i32 %x, i32 %y) nounwind { +define i32 @lshr_imm_15(i32 %x) nounwind { ; CHECK-LABEL: lshr_imm_15: ; CHECK: srli a2, a2, 15 ; CHECK-NEXT: ret @@ -69,7 +69,7 @@ define i32 @ashr(i32 %x, i32 %y) nounwind { ret i32 %c } -define i32 @ashr_imm_1(i32 %x, i32 %y) nounwind { +define i32 @ashr_imm_1(i32 %x) nounwind { ; CHECK-LABEL: ashr_imm_1: ; CHECK: srai a2, a2, 1 ; CHECK-NEXT: ret @@ -77,7 +77,7 @@ define i32 @ashr_imm_1(i32 %x, i32 %y) nounwind { ret i32 %c } -define i32 @ashr_imm_10(i32 %x, i32 %y) nounwind { +define i32 @ashr_imm_10(i32 %x) nounwind { ; CHECK-LABEL: ashr_imm_10: ; CHECK: srai a2, a2, 10 ; CHECK-NEXT: ret @@ -85,7 +85,7 @@ define i32 @ashr_imm_10(i32 %x, i32 %y) nounwind { ret i32 %c } -define i32 @ashr_imm_31(i32 %x, i32 %y) nounwind { +define i32 @ashr_imm_31(i32 %x) nounwind { ; CHECK-LABEL: ashr_imm_31: ; CHECK: srai a2, a2, 31 ; CHECK-NEXT: ret From 87c8d07a603e61745bdd5bc785e461da71a8c4b0 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 14 Aug 2024 15:44:24 +0300 Subject: [PATCH 010/289] [Xtensa] Implement load pseudo operations and patterns. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 21 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 7 +++++++ llvm/lib/Target/Xtensa/XtensaUtils.cpp | 1 + 3 files changed, 29 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index c7675c2f50176..b4f5e53b93aaf 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -987,11 +987,32 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI, MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *MBB) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); DebugLoc DL = MI.getDebugLoc(); switch (MI.getOpcode()) { case Xtensa::SELECT: return emitSelectCC(MI, MBB); + case Xtensa::L8I_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &Op1 = MI.getOperand(1); + MachineOperand &Op2 = MI.getOperand(2); + + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::L8UI), R1).add(Op1).add(Op2); + + unsigned R2 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SLLI), R2).addReg(R1).addImm(24); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRAI), R.getReg()) + .addReg(R2) + .addImm(24); + MI.eraseFromParent(); + return MBB; + } default: llvm_unreachable("Unexpected instr type to insert"); } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 0d01864b54bc3..79f7af190e482 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -263,6 +263,13 @@ let isCodeGenOnly = 1, mayLoad = 1 in { } } +// Xtensa missed L8I load operation, use pseudo operation +let usesCustomInserter = 1 in +def L8I_P: Pseudo<(outs AR:$t), (ins mem8:$addr), + "!L8I_P $t, $addr", + [(set AR:$t, (sextloadi8 + addr_ish1:$addr))]>; + //extending loads def : Pat<(i32 (extloadi1 addr_ish1:$addr)), (L8UI addr_ish1:$addr)>; def : Pat<(i32 (extloadi8 addr_ish1:$addr)), (L8UI addr_ish1:$addr)>; diff --git a/llvm/lib/Target/Xtensa/XtensaUtils.cpp b/llvm/lib/Target/Xtensa/XtensaUtils.cpp index 98e424f6ea440..e11cdf67693c0 100644 --- a/llvm/lib/Target/Xtensa/XtensaUtils.cpp +++ b/llvm/lib/Target/Xtensa/XtensaUtils.cpp @@ -37,6 +37,7 @@ bool isValidAddrOffset(MachineInstr &MI, int64_t Offset) { int Scale = 0; switch (MI.getOpcode()) { + case Xtensa::L8I_P: case Xtensa::L8UI: case Xtensa::S8I: Scale = 1; From ab3e8edf7171f49821f19db07fce4bdccf9ee0c7 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 14 Aug 2024 17:23:20 +0300 Subject: [PATCH 011/289] [Xtensa] Support for variable arguments --- llvm/lib/Target/Xtensa/CMakeLists.txt | 1 + .../lib/Target/Xtensa/XtensaFrameLowering.cpp | 2 +- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 190 +++++++++++++++++- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 6 + llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 2 +- .../Xtensa/XtensaMachineFunctionInfo.cpp | 19 ++ .../Target/Xtensa/XtensaMachineFunctionInfo.h | 46 +++++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp | 1 + .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 7 + llvm/lib/Target/Xtensa/XtensaTargetMachine.h | 3 + 10 files changed, 268 insertions(+), 9 deletions(-) create mode 100644 llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.cpp create mode 100644 llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h diff --git a/llvm/lib/Target/Xtensa/CMakeLists.txt b/llvm/lib/Target/Xtensa/CMakeLists.txt index 726efadc87c0b..cc6109ed7d085 100644 --- a/llvm/lib/Target/Xtensa/CMakeLists.txt +++ b/llvm/lib/Target/Xtensa/CMakeLists.txt @@ -21,6 +21,7 @@ add_llvm_target(XtensaCodeGen XtensaInstrInfo.cpp XtensaISelDAGToDAG.cpp XtensaISelLowering.cpp + XtensaMachineFunctionInfo.cpp XtensaRegisterInfo.cpp XtensaSubtarget.cpp XtensaTargetMachine.cpp diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp index e24cb7714d364..87dda2b3cc681 100644 --- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp @@ -12,8 +12,8 @@ #include "XtensaFrameLowering.h" #include "XtensaInstrInfo.h" +#include "XtensaMachineFunctionInfo.h" #include "XtensaSubtarget.h" -#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index b4f5e53b93aaf..d3dbbfc6289db 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -13,10 +13,10 @@ #include "XtensaISelLowering.h" #include "XtensaConstantPoolValue.h" +#include "XtensaMachineFunctionInfo.h" #include "XtensaSubtarget.h" #include "XtensaTargetMachine.h" #include "llvm/CodeGen/CallingConvLower.h" -#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" @@ -32,6 +32,9 @@ using namespace llvm; #define DEBUG_TYPE "xtensa-lower" +static const MCPhysReg XtensaArgRegs[6] = {Xtensa::A2, Xtensa::A3, Xtensa::A4, + Xtensa::A5, Xtensa::A6, Xtensa::A7}; + // Return true if we must use long (in fact, indirect) function call. // It's simplified version, production implimentation must // resolve a functions in ROM (usually glibc functions) @@ -131,6 +134,14 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); + // VASTART and VACOPY need to deal with the Xtensa-specific varargs + // structure, but VAEND is a no-op. + setOperationAction(ISD::VASTART, MVT::Other, Custom); + // we use special va_list structure so we have to customize this + setOperationAction(ISD::VAARG, MVT::Other, Expand); + setOperationAction(ISD::VACOPY, MVT::Other, Custom); + setOperationAction(ISD::VAEND, MVT::Other, Expand); + // Compute derived properties from the register classes computeRegisterProperties(STI.getRegisterInfo()); } @@ -141,6 +152,11 @@ bool XtensaTargetLowering::isOffsetFoldingLegal( return false; } +unsigned XtensaTargetLowering::getVaListSizeInBits(const DataLayout &DL) const { + // 2 * sizeof(int*) + sizeof(int) + return 3 * 4; +} + //===----------------------------------------------------------------------===// // Calling conventions //===----------------------------------------------------------------------===// @@ -234,13 +250,14 @@ SDValue XtensaTargetLowering::LowerFormalArguments( SelectionDAG &DAG, SmallVectorImpl &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); + XtensaFunctionInfo *XtensaFI = MF.getInfo(); + EVT PtrVT = getPointerTy(MF.getDataLayout()); + + XtensaFI->setVarArgsFrameIndex(0); // Used with vargs to acumulate store chains. std::vector OutChains; - if (IsVarArg) - report_fatal_error("Var arg not supported by FormalArguments Lowering"); - // Assign locations to all of the incoming arguments. SmallVector ArgLocs; CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, @@ -308,6 +325,66 @@ SDValue XtensaTargetLowering::LowerFormalArguments( } } + if (IsVarArg) { + ArrayRef ArgRegs = ArrayRef(XtensaArgRegs); + unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); + const TargetRegisterClass *RC = &Xtensa::ARRegClass; + MachineFrameInfo &MFI = MF.getFrameInfo(); + MachineRegisterInfo &RegInfo = MF.getRegInfo(); + unsigned RegSize = 4; + MVT RegTy = MVT::getIntegerVT(RegSize * 8); + + XtensaFI->setVarArgsFirstGPR(Idx + 2); // 2 - number of a2 register + + XtensaFI->setVarArgsStackOffset(MFI.CreateFixedObject( + PtrVT.getSizeInBits() / 8, CCInfo.getStackSize(), true)); + + // Offset of the first variable argument from stack pointer, and size of + // the vararg save area. For now, the varargs save area is either zero or + // large enough to hold a0-a7. + int VaArgOffset, VarArgsSaveSize; + + // If all registers are allocated, then all varargs must be passed on the + // stack and we don't need to save any argregs. + if (ArgRegs.size() == Idx) { + VaArgOffset = CCInfo.getStackSize(); + VarArgsSaveSize = 0; + } else { + VarArgsSaveSize = RegSize * (ArgRegs.size() - Idx); + VaArgOffset = -VarArgsSaveSize; + } + + // Record the frame index of the first variable argument + // which is a value necessary to VASTART. + int FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true); + XtensaFI->setVarArgsFrameIndex(FI); + + // Copy the integer registers that may have been used for passing varargs + // to the vararg save area. + for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += RegSize) { + const unsigned Reg = RegInfo.createVirtualRegister(RC); + unsigned FrameReg = Subtarget.getRegisterInfo()->getFrameRegister(MF); + + // Argument passed in FrameReg we save in A8 (in emitPrologue), + // so load argument from A8 + if (ArgRegs[I] == FrameReg) { + RegInfo.addLiveIn(Xtensa::A8, Reg); + } else { + RegInfo.addLiveIn(ArgRegs[I], Reg); + } + + SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy); + FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true); + SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); + SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, + MachinePointerInfo::getFixedStack(MF, FI)); + cast(Store.getNode()) + ->getMemOperand() + ->setValue((Value *)nullptr); + OutChains.push_back(Store); + } + } + // All stores are grouped in one node to allow the matching between // the size of Ins and InVals. This only happens when on varg functions if (!OutChains.empty()) { @@ -509,9 +586,6 @@ XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, const SmallVectorImpl &Outs, const SmallVectorImpl &OutVals, const SDLoc &DL, SelectionDAG &DAG) const { - if (IsVarArg) - report_fatal_error("VarArg not supported"); - MachineFunction &MF = DAG.getMachineFunction(); // Assign locations to each returned value. @@ -746,6 +820,104 @@ SDValue XtensaTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, return DAG.getMergeValues(Ops, DL); } +SDValue XtensaTargetLowering::LowerVASTART(SDValue Op, + SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + XtensaFunctionInfo *XtensaFI = MF.getInfo(); + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + SDLoc DL(Op); + + SDValue Chain = Op.getOperand(0); + SDValue Addr = Op.getOperand(1); + + // typedef struct __va_list_tag { + // int32_t *__va_stk; /* Initialized to point to the position of the + // * first argument in memory offset to account for + // the + // * arguments passed in registers and to account for + // * the size of the argument registers not being + // 16-byte + // * aligned. E.G., there are 6 argument registers + // * of 4 bytes each, but we want the __va_ndx for the + // * first stack argument to have the maximal + // * alignment of 16 bytes, so we offset the __va_stk + // address by + // * 32 bytes so that __va_stk[32] references the + // first + // * argument on the stack. + // */ + // int32_t *__va_reg; /* Points to a stack-allocated region holding the + // * contents + // * of the incoming argument registers + // */ + // int32_t __va_ndx; /* Index initialized to the position of the first + // * unnamed (variable) argument. This same index is + // also + // * used to address the arguments passed in memory. + // */ + // } __va_list_tag[1]; + + SDValue ArgAR; + SDValue OverflowPtrAdvance; + SDValue StackOffsetFI = + DAG.getFrameIndex(XtensaFI->getVarArgsStackOffset(), PtrVT); + + if (XtensaFI->getVarArgsFirstGPR() < 8) { + ArgAR = + DAG.getConstant(XtensaFI->getVarArgsFirstGPR() * 4 - 8, DL, MVT::i32); + OverflowPtrAdvance = DAG.getConstant(32, DL, PtrVT); + } else { + OverflowPtrAdvance = DAG.getNode(ISD::AND, DL, PtrVT, StackOffsetFI, + DAG.getConstant(0xf, DL, PtrVT)); + OverflowPtrAdvance = DAG.getNode(ISD::ADD, DL, PtrVT, OverflowPtrAdvance, + DAG.getConstant(32, DL, PtrVT)); + ArgAR = OverflowPtrAdvance; + } + + SDValue FR = DAG.getFrameIndex(XtensaFI->getVarArgsFrameIndex(), PtrVT); + + uint64_t FrameOffset = PtrVT.getSizeInBits() / 8; + SDValue ConstFrameOffset1 = DAG.getConstant(FrameOffset, DL, PtrVT); + SDValue ConstFrameOffset2 = DAG.getConstant(FrameOffset * 2, DL, PtrVT); + + const Value *SV = cast(Op.getOperand(2))->getValue(); + + // Store first word : arguments given in stack (__va_stk) + // Advance Argument Overflow pointer down, lest it will point to start + // after register argument va_arg finished + SDValue StackOffsetFICorr = + DAG.getNode(ISD::SUB, DL, PtrVT, StackOffsetFI, OverflowPtrAdvance); + SDValue firstStore = + DAG.getStore(Chain, DL, StackOffsetFICorr, Addr, MachinePointerInfo(SV)); + + uint64_t nextOffset = FrameOffset; + SDValue nextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, Addr, ConstFrameOffset1); + + // Store second word : arguments given on registers (__va_reg) + SDValue FRAdvance = + DAG.getConstant(XtensaFI->getVarArgsFirstGPR() * 4 - 8, DL, PtrVT); + SDValue FRDecr = DAG.getNode(ISD::SUB, DL, PtrVT, FR, FRAdvance); + SDValue secondStore = DAG.getStore(firstStore, DL, FRDecr, nextPtr, + MachinePointerInfo(SV, nextOffset)); + nextOffset += FrameOffset; + nextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, Addr, ConstFrameOffset2); + + // Store first word : number of int regs (__va_ndx) + return DAG.getStore(secondStore, DL, ArgAR, nextPtr, + MachinePointerInfo(SV, nextOffset)); +} + +SDValue XtensaTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { + // We have to copy the entire va_list struct: + // 2*sizeof(int*) + sizeof(int) = 12 Byte + unsigned VAListSize = 12; + return DAG.getMemcpy( + Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2), + DAG.getConstant(VAListSize, SDLoc(Op), MVT::i32), Align(8), + /*isVolatile=*/false, /*AlwaysInline=*/true, + /*CI=*/nullptr, std::nullopt, MachinePointerInfo(), MachinePointerInfo()); +} + SDValue XtensaTargetLowering::LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); @@ -884,6 +1056,10 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerSTACKRESTORE(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); + case ISD::VASTART: + return LowerVASTART(Op, DAG); + case ISD::VACOPY: + return LowerVACOPY(Op, DAG); case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); case ISD::SRA_PARTS: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 8e7346b40dfe5..09a0f0d7414dc 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -76,6 +76,9 @@ class XtensaTargetLowering : public TargetLowering { const char *getTargetNodeName(unsigned Opcode) const override; + /// Returns the size of the platform's va_list object. + unsigned getVaListSizeInBits(const DataLayout &DL) const override; + SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, @@ -125,6 +128,9 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index 491defb867643..13183bc51398d 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -13,9 +13,9 @@ //===----------------------------------------------------------------------===// #include "XtensaInstrInfo.h" +#include "XtensaMachineFunctionInfo.h" #include "XtensaTargetMachine.h" #include "llvm/CodeGen/MachineConstantPool.h" -#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" diff --git a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.cpp b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.cpp new file mode 100644 index 0000000000000..1a285b2aa5310 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.cpp @@ -0,0 +1,19 @@ +//===- XtensaMachineFunctionInfo.cpp - Private data used for Xtensa -------===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "XtensaMachineFunctionInfo.h" +//#include "MCTargetDesc/XtensaBaseInfo.h" +#include "XtensaInstrInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/Function.h" + +using namespace llvm; diff --git a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h new file mode 100644 index 0000000000000..ebc99f912ba0e --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h @@ -0,0 +1,46 @@ +//==- XtensaMachineFunctionInfo.h - Xtensa machine function info --*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares Xtensa-specific per-machine-function information. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H +#define LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H + +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + +class XtensaFunctionInfo : public MachineFunctionInfo { + unsigned VarArgsFirstGPR; + int VarArgsStackOffset; + unsigned VarArgsFrameIndex; + +public: + explicit XtensaFunctionInfo(const Function &F, const TargetSubtargetInfo *STI) + : VarArgsFirstGPR(0), VarArgsStackOffset(0), VarArgsFrameIndex(0) {} + + unsigned getVarArgsFirstGPR() const { return VarArgsFirstGPR; } + void setVarArgsFirstGPR(unsigned GPR) { VarArgsFirstGPR = GPR; } + + int getVarArgsStackOffset() const { return VarArgsStackOffset; } + void setVarArgsStackOffset(int Offset) { VarArgsStackOffset = Offset; } + + // Get and set the frame index of the first stack vararg. + unsigned getVarArgsFrameIndex() const { return VarArgsFrameIndex; } + void setVarArgsFrameIndex(unsigned FI) { VarArgsFrameIndex = FI; } +}; + +} // namespace llvm + +#endif /* LLVM_LIB_TARGET_XTENSA_XTENSAMACHINEFUNCTIONINFO_H */ diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp index bced2d4ad0095..fb7d45ab5c420 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp @@ -12,6 +12,7 @@ #include "XtensaRegisterInfo.h" #include "XtensaInstrInfo.h" +#include "XtensaMachineFunctionInfo.h" #include "XtensaSubtarget.h" #include "XtensaUtils.h" #include "llvm/CodeGen/MachineFrameInfo.h" diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 49c7faf84df1d..d8031f6a05ab7 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -12,6 +12,7 @@ // //===----------------------------------------------------------------------===// +#include "XtensaMachineFunctionInfo.h" #include "XtensaTargetMachine.h" #include "TargetInfo/XtensaTargetInfo.h" #include "llvm/CodeGen/Passes.h" @@ -83,6 +84,12 @@ XtensaTargetMachine::getSubtargetImpl(const Function &F) const { return I.get(); } +MachineFunctionInfo *XtensaTargetMachine::createMachineFunctionInfo( + BumpPtrAllocator &Allocator, const Function &F, + const TargetSubtargetInfo *STI) const { + return XtensaFunctionInfo::create(Allocator, F, STI); +} + namespace { /// Xtensa Code Generator Pass Configuration Options. class XtensaPassConfig : public TargetPassConfig { diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.h b/llvm/lib/Target/Xtensa/XtensaTargetMachine.h index f371f22ed3d0e..44df32fc915d9 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.h +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.h @@ -44,6 +44,9 @@ class XtensaTargetMachine : public LLVMTargetMachine { TargetLoweringObjectFile *getObjFileLowering() const override { return TLOF.get(); } + MachineFunctionInfo * + createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, + const TargetSubtargetInfo *STI) const override; protected: mutable StringMap> SubtargetMap; From 38fd8e9deacf91f8eac09c54bebcbe447a1f6811 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 14 Aug 2024 17:33:50 +0300 Subject: [PATCH 012/289] [Xtensa] Add support for address intrinsics. Add support for llvm.{frameaddress,returnaddress} intrinsics. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 41 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 2 + 2 files changed, 43 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index d3dbbfc6289db..28c7950db7773 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -667,6 +667,26 @@ SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op, FalseValue, TargetCC); } +SDValue XtensaTargetLowering::LowerRETURNADDR(SDValue Op, + SelectionDAG &DAG) const { + // check the depth + // TODO: xtensa-gcc can handle this, by navigating through the stack, we + // should be able to do this too + assert((cast(Op.getOperand(0))->getZExtValue() == 0) && + "Return address can be determined only for current frame."); + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + MVT VT = Op.getSimpleValueType(); + unsigned RA = Xtensa::A0; + MFI.setReturnAddressIsTaken(true); + + // Return RA, which contains the return address. Mark it an implicit + // live-in. + unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT)); + return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT); +} + SDValue XtensaTargetLowering::LowerImmediate(SDValue Op, SelectionDAG &DAG) const { const ConstantSDNode *CN = cast(Op); @@ -795,6 +815,23 @@ SDValue XtensaTargetLowering::LowerSTACKRESTORE(SDValue Op, Op.getOperand(1)); } +SDValue XtensaTargetLowering::LowerFRAMEADDR(SDValue Op, + SelectionDAG &DAG) const { + // check the depth + assert((cast(Op.getOperand(0))->getZExtValue() == 0) && + "Frame address can only be determined for current frame."); + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); + MFI.setFrameAddressIsTaken(true); + EVT VT = Op.getValueType(); + SDLoc DL(Op); + + unsigned FrameReg = Subtarget.getRegisterInfo()->getFrameRegister(MF); + SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); + return FrameAddr; +} + SDValue XtensaTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); // Legalize the chain. @@ -1038,6 +1075,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerBR_JT(Op, DAG); case ISD::Constant: return LowerImmediate(Op, DAG); + case ISD::RETURNADDR: + return LowerRETURNADDR(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); case ISD::BlockAddress: @@ -1054,6 +1093,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerSTACKSAVE(Op, DAG); case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); + case ISD::FRAMEADDR: + return LowerFRAMEADDR(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); case ISD::VASTART: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 09a0f0d7414dc..fe6db4d23a140 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -127,6 +127,7 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; @@ -136,6 +137,7 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; From 2d780d841480145505a67c81e7e54cf2f643004b Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 14 Aug 2024 20:06:46 +0300 Subject: [PATCH 013/289] [Xtensa] Add basic support for inline asm constraints --- .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 9 +++ .../Xtensa/MCTargetDesc/XtensaInstPrinter.h | 3 + llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp | 54 +++++++++++++ llvm/lib/Target/Xtensa/XtensaAsmPrinter.h | 10 +++ llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp | 27 +++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 76 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 24 ++++++ 7 files changed, 203 insertions(+) diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index fe1dc0e2e483e..10becc9e8c83b 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -27,6 +27,15 @@ using namespace llvm; #include "XtensaGenAsmWriter.inc" +void XtensaInstPrinter::printAddress(unsigned Base, int64_t Disp, + raw_ostream &O) { + O << Disp; + if (Base) { + O << '('; + O << getRegisterName(Base) << ')'; + } +} + static void printExpr(const MCExpr *Expr, raw_ostream &OS) { int Offset = 0; const MCSymbolRefExpr *SRE; diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index 46a35ae6f4e3f..34d03569b9bce 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -35,6 +35,9 @@ class XtensaInstPrinter : public MCInstPrinter { // Print the given operand. static void printOperand(const MCOperand &MO, raw_ostream &O); + // Print an address + static void printAddress(unsigned Base, int64_t Disp, raw_ostream &O); + // Override MCInstPrinter. void printRegName(raw_ostream &O, MCRegister Reg) const override; void printInst(const MCInst *MI, uint64_t Address, StringRef Annot, diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp index 3f99387f759d9..58e646f58563f 100644 --- a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "XtensaAsmPrinter.h" +#include "MCTargetDesc/XtensaInstPrinter.h" #include "MCTargetDesc/XtensaMCExpr.h" #include "MCTargetDesc/XtensaTargetStreamer.h" #include "TargetInfo/XtensaTargetInfo.h" @@ -157,6 +158,59 @@ void XtensaAsmPrinter::emitConstantPool() { OutStreamer->popSection(); } +void XtensaAsmPrinter::printOperand(const MachineInstr *MI, int OpNo, + raw_ostream &O) { + const MachineOperand &MO = MI->getOperand(OpNo); + + switch (MO.getType()) { + case MachineOperand::MO_Register: + case MachineOperand::MO_Immediate: { + MCOperand MC(lowerOperand(MI->getOperand(OpNo))); + XtensaInstPrinter::printOperand(MC, O); + break; + } + case MachineOperand::MO_GlobalAddress: + O << *getSymbol(MO.getGlobal()); + break; + default: + llvm_unreachable(""); + } + + if (MO.getTargetFlags()) { + O << ")"; + } +} + +bool XtensaAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) { + if (ExtraCode && *ExtraCode == 'n') { + if (!MI->getOperand(OpNo).isImm()) + return true; + O << -int64_t(MI->getOperand(OpNo).getImm()); + } else { + printOperand(MI, OpNo, O); + } + return false; +} + +bool XtensaAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, + unsigned OpNo, + const char *ExtraCode, + raw_ostream &OS) { + XtensaInstPrinter::printAddress(MI->getOperand(OpNo).getReg(), + MI->getOperand(OpNo + 1).getImm(), OS); + return false; +} + +void XtensaAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum, + raw_ostream &OS) { + OS << '%' + << XtensaInstPrinter::getRegisterName(MI->getOperand(opNum).getReg()); + OS << "("; + OS << MI->getOperand(opNum + 1).getImm(); + OS << ")"; +} + MCSymbol * XtensaAsmPrinter::GetConstantPoolIndexSymbol(const MachineOperand &MO) const { // Create a symbol for the name. diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h index f9cf5ae8c9f65..f7236a39fe6da 100644 --- a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h @@ -42,6 +42,16 @@ class LLVM_LIBRARY_VISIBILITY XtensaAsmPrinter : public AsmPrinter { void emitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) override; + void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O); + + bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) override; + + bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &OS) override; + + void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &OS); + MCSymbol *GetConstantPoolIndexSymbol(const MachineOperand &MO) const; MCSymbol *GetJumpTableSymbol(const MachineOperand &MO) const; diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp index 06a04ace59b0d..95032a7c9c8e6 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -33,6 +33,10 @@ class XtensaDAGToDAGISel : public SelectionDAGISel { void Select(SDNode *Node) override; + bool SelectInlineAsmMemoryOperand(const SDValue &Op, + InlineAsm::ConstraintCode ConstraintID, + std::vector &OutOps) override; + // For load/store instructions generate (base+offset) pair from // memory address. The offset must be a multiple of scale argument. bool selectMemRegAddr(SDValue Addr, SDValue &Base, SDValue &Offset, @@ -212,3 +216,26 @@ void XtensaDAGToDAGISel::Select(SDNode *Node) { SelectCode(Node); } + +bool XtensaDAGToDAGISel::SelectInlineAsmMemoryOperand( + const SDValue &Op, InlineAsm::ConstraintCode ConstraintID, + std::vector &OutOps) { + switch (ConstraintID) { + default: + llvm_unreachable("Unexpected asm memory constraint"); + case InlineAsm::ConstraintCode::m: { + SDValue Base, Offset; + // TODO + selectMemRegAddr(Op, Base, Offset, 4); + OutOps.push_back(Base); + OutOps.push_back(Offset); + return false; + } + case InlineAsm::ConstraintCode::i: + case InlineAsm::ConstraintCode::R: + case InlineAsm::ConstraintCode::ZC: + OutOps.push_back(Op); + return false; + } + return false; +} diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 28c7950db7773..cff0d18057871 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -157,6 +157,82 @@ unsigned XtensaTargetLowering::getVaListSizeInBits(const DataLayout &DL) const { return 3 * 4; } +//===----------------------------------------------------------------------===// +// Inline asm support +//===----------------------------------------------------------------------===// +TargetLowering::ConstraintType +XtensaTargetLowering::getConstraintType(StringRef Constraint) const { + if (Constraint.size() == 1) { + switch (Constraint[0]) { + case 'a': + case 'd': + case 'r': + return C_RegisterClass; + + default: + break; + } + } + return TargetLowering::getConstraintType(Constraint); +} + +TargetLowering::ConstraintWeight +XtensaTargetLowering::getSingleConstraintMatchWeight( + AsmOperandInfo &info, const char *constraint) const { + ConstraintWeight weight = CW_Invalid; + Value *CallOperandVal = info.CallOperandVal; + // If we don't have a value, we can't do a match, + // but allow it at the lowest weight. + if (CallOperandVal == NULL) + return CW_Default; + + // Look at the constraint type. + switch (*constraint) { + default: + weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); + break; + + case 'a': + case 'd': + case 'r': + if (CallOperandVal->getType()->isIntegerTy()) + weight = CW_Register; + break; + } + return weight; +} + +std::pair +XtensaTargetLowering::getRegForInlineAsmConstraint( + const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { + if (Constraint.size() == 1) { + // GCC Constraint Letters + switch (Constraint[0]) { + default: + break; + case 'a': // Address register + case 'd': // Data register (equivalent to 'r') + case 'r': // General-purpose register + return std::make_pair(0U, &Xtensa::ARRegClass); + } + } + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); +} + +/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops +/// vector. If it is invalid, don't add anything to Ops. +void XtensaTargetLowering::LowerAsmOperandForConstraint( + SDValue Op, StringRef Constraint, std::vector &Ops, + SelectionDAG &DAG) const { + SDLoc DL(Op); + + // Only support length 1 constraints for now. + if (Constraint.size() > 1) + return; + + TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); +} + //===----------------------------------------------------------------------===// // Calling conventions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index fe6db4d23a140..2243753acbd2f 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -79,6 +79,21 @@ class XtensaTargetLowering : public TargetLowering { /// Returns the size of the platform's va_list object. unsigned getVaListSizeInBits(const DataLayout &DL) const override; + std::pair + getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, MVT VT) const override; + + TargetLowering::ConstraintType + getConstraintType(StringRef Constraint) const override; + + TargetLowering::ConstraintWeight + getSingleConstraintMatchWeight(AsmOperandInfo &info, + const char *constraint) const override; + + void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, + std::vector &Ops, + SelectionDAG &DAG) const override; + SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, @@ -149,6 +164,15 @@ class XtensaTargetLowering : public TargetLowering { MachineBasicBlock *emitSelectCC(MachineInstr &MI, MachineBasicBlock *BB) const; + + InlineAsm::ConstraintCode + getInlineAsmMemConstraint(StringRef ConstraintCode) const override { + if (ConstraintCode == "R") + return InlineAsm::ConstraintCode::R; + else if (ConstraintCode == "ZC") + return InlineAsm::ConstraintCode::ZC; + return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); + } }; } // end namespace llvm From ef77b894d8dbf3cd0e8c4ae4da062b1fdac5023c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 14 Aug 2024 20:27:41 +0300 Subject: [PATCH 014/289] [Xtensa] Implement volatile load/store. Implement volatile load/store from/to volatile memory location. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 18 ++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 4 ++-- llvm/test/CodeGen/Xtensa/blockaddress.ll | 2 ++ 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index cff0d18057871..9690b5c656f6e 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -1296,6 +1296,11 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( const TargetRegisterClass *RC = getRegClassFor(MVT::i32); unsigned R1 = MRI.createVirtualRegister(RC); + const MachineMemOperand &MMO = **MI.memoperands_begin(); + if (MMO.isVolatile()) { + BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); + } + BuildMI(*MBB, MI, DL, TII.get(Xtensa::L8UI), R1).add(Op1).add(Op2); unsigned R2 = MRI.createVirtualRegister(RC); @@ -1306,6 +1311,19 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( MI.eraseFromParent(); return MBB; } + case Xtensa::S8I: + case Xtensa::S16I: + case Xtensa::S32I: + case Xtensa::L8UI: + case Xtensa::L16SI: + case Xtensa::L16UI: + case Xtensa::L32I: { + const MachineMemOperand &MMO = **MI.memoperands_begin(); + if (MMO.isVolatile()) { + BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); + } + return MBB; + } default: llvm_unreachable("Unexpected instr type to insert"); } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 79f7af190e482..cf73238f6efbd 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -195,7 +195,7 @@ def SSAI : RRR_Inst<0x00, 0x00, 0x04, (outs), (ins uimm5:$imm), //===----------------------------------------------------------------------===// // Load instructions -let mayLoad = 1 in { +let mayLoad = 1, usesCustomInserter = 1 in { class Load_RRI8 oper, string instrAsm, SDPatternOperator opNode, ComplexPattern addrOp, Operand memOp> @@ -216,7 +216,7 @@ def L16UI : Load_RRI8<0x01, "l16ui", zextloadi16, addr_ish2, mem16>; def L32I : Load_RRI8<0x02, "l32i", load, addr_ish4, mem32>; // Store instructions -let mayStore = 1 in { +let mayStore = 1, usesCustomInserter = 1 in { class Store_II8 oper, string instrAsm, SDPatternOperator opNode, ComplexPattern addrOp, Operand memOp> : RRI8_Inst<0x02, (outs), (ins AR:$t, memOp:$addr), diff --git a/llvm/test/CodeGen/Xtensa/blockaddress.ll b/llvm/test/CodeGen/Xtensa/blockaddress.ll index bbeb1790a1b78..debcdbc049330 100644 --- a/llvm/test/CodeGen/Xtensa/blockaddress.ll +++ b/llvm/test/CodeGen/Xtensa/blockaddress.ll @@ -12,7 +12,9 @@ define void @test_blockaddress() { ; CHECK: # %bb.0: ; CHECK-NEXT: l32r a8, .LCPI0_0 ; CHECK-NEXT: l32r a9, .LCPI0_1 +; CHECK-NEXT: memw ; CHECK-NEXT: s32i a9, a8, 0 +; CHECK-NEXT: memw ; CHECK-NEXT: l32i a8, a8, 0 ; CHECK-NEXT: jx a8 ; CHECK-NEXT: .Ltmp0: From 1903a38ee7efe12bce26860b8d48051fd1d46e93 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 14 Aug 2024 20:39:01 +0300 Subject: [PATCH 015/289] [Xtensa] Implement branch analysis --- llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 298 ++++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.h | 29 ++ llvm/test/CodeGen/Xtensa/brcc.ll | 61 ++-- llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll | 28 +- llvm/test/CodeGen/Xtensa/select-cc.ll | 9 +- llvm/utils/UpdateTestChecks/asm.py | 17 ++ 6 files changed, 382 insertions(+), 60 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index 13183bc51398d..e46fa08c8fc26 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -184,3 +184,301 @@ void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, report_fatal_error("Unsupported load immediate value"); } } + +bool XtensaInstrInfo::reverseBranchCondition( + SmallVectorImpl &Cond) const { + assert(Cond.size() <= 4 && "Invalid branch condition!"); + + switch (Cond[0].getImm()) { + case Xtensa::BEQ: + Cond[0].setImm(Xtensa::BNE); + return false; + case Xtensa::BNE: + Cond[0].setImm(Xtensa::BEQ); + return false; + case Xtensa::BLT: + Cond[0].setImm(Xtensa::BGE); + return false; + case Xtensa::BGE: + Cond[0].setImm(Xtensa::BLT); + return false; + case Xtensa::BLTU: + Cond[0].setImm(Xtensa::BGEU); + return false; + case Xtensa::BGEU: + Cond[0].setImm(Xtensa::BLTU); + return false; + + case Xtensa::BEQI: + Cond[0].setImm(Xtensa::BNEI); + return false; + case Xtensa::BNEI: + Cond[0].setImm(Xtensa::BEQI); + return false; + case Xtensa::BGEI: + Cond[0].setImm(Xtensa::BLTI); + return false; + case Xtensa::BLTI: + Cond[0].setImm(Xtensa::BGEI); + return false; + case Xtensa::BGEUI: + Cond[0].setImm(Xtensa::BLTUI); + return false; + case Xtensa::BLTUI: + Cond[0].setImm(Xtensa::BGEUI); + return false; + + case Xtensa::BEQZ: + Cond[0].setImm(Xtensa::BNEZ); + return false; + case Xtensa::BNEZ: + Cond[0].setImm(Xtensa::BEQZ); + return false; + case Xtensa::BLTZ: + Cond[0].setImm(Xtensa::BGEZ); + return false; + case Xtensa::BGEZ: + Cond[0].setImm(Xtensa::BLTZ); + return false; + + default: + llvm_unreachable("Invalid branch condition!"); + } +} + +bool XtensaInstrInfo::analyzeBranch(MachineBasicBlock &MBB, + MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify = false) const { + // Most of the code and comments here are boilerplate. + + // Start from the bottom of the block and work up, examining the + // terminator instructions. + MachineBasicBlock::iterator I = MBB.end(); + while (I != MBB.begin()) { + --I; + if (I->isDebugValue()) + continue; + + // Working from the bottom, when we see a non-terminator instruction, we're + // done. + if (!isUnpredicatedTerminator(*I)) + break; + + // A terminator that isn't a branch can't easily be handled by this + // analysis. + SmallVector ThisCond; + ThisCond.push_back(MachineOperand::CreateImm(0)); + const MachineOperand *ThisTarget; + if (!isBranch(I, ThisCond, ThisTarget)) + return true; + + // Can't handle indirect branches. + if (!ThisTarget->isMBB()) + return true; + + if (ThisCond[0].getImm() == Xtensa::J) { + // Handle unconditional branches. + if (!AllowModify) { + TBB = ThisTarget->getMBB(); + continue; + } + + // If the block has any instructions after a JMP, delete them. + while (std::next(I) != MBB.end()) + std::next(I)->eraseFromParent(); + + Cond.clear(); + FBB = 0; + + // TBB is used to indicate the unconditinal destination. + TBB = ThisTarget->getMBB(); + continue; + } + + // Working from the bottom, handle the first conditional branch. + if (Cond.empty()) { + // FIXME: add X86-style branch swap + FBB = TBB; + TBB = ThisTarget->getMBB(); + Cond.push_back(MachineOperand::CreateImm(ThisCond[0].getImm())); + + // push remaining operands + for (unsigned int i = 0; i < (I->getNumExplicitOperands() - 1); i++) + Cond.push_back(I->getOperand(i)); + + continue; + } + + // Handle subsequent conditional branches. + assert(Cond.size() <= 4); + assert(TBB); + + // Only handle the case where all conditional branches branch to the same + // destination. + if (TBB != ThisTarget->getMBB()) + return true; + + // If the conditions are the same, we can leave them alone. + unsigned OldCond = Cond[0].getImm(); + if (OldCond == ThisCond[0].getImm()) + continue; + } + + return false; +} + +unsigned XtensaInstrInfo::removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved) const { + // Most of the code and comments here are boilerplate. + MachineBasicBlock::iterator I = MBB.end(); + unsigned Count = 0; + if (BytesRemoved) + *BytesRemoved = 0; + + while (I != MBB.begin()) { + --I; + SmallVector Cond; + Cond.push_back(MachineOperand::CreateImm(0)); + const MachineOperand *Target; + if (!isBranch(I, Cond, Target)) + break; + if (!Target->isMBB()) + break; + // Remove the branch. + if (BytesRemoved) + *BytesRemoved += getInstSizeInBytes(*I); + I->eraseFromParent(); + I = MBB.end(); + ++Count; + } + return Count; +} + +unsigned XtensaInstrInfo::insertBranch( + MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, + ArrayRef Cond, const DebugLoc &DL, int *BytesAdded) const { + unsigned Count = 0; + if (BytesAdded) + *BytesAdded = 0; + if (FBB) { + // Need to build two branches then + // one to branch to TBB on Cond + // and a second one immediately after to unconditionally jump to FBB + Count = InsertBranchAtInst(MBB, MBB.end(), TBB, Cond, DL, BytesAdded); + auto &MI = *BuildMI(&MBB, DL, get(Xtensa::J)).addMBB(FBB); + Count++; + if (BytesAdded) + *BytesAdded += getInstSizeInBytes(MI); + return Count; + } + // This function inserts the branch at the end of the MBB + Count += InsertBranchAtInst(MBB, MBB.end(), TBB, Cond, DL, BytesAdded); + return Count; +} + +unsigned XtensaInstrInfo::InsertBranchAtInst(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + MachineBasicBlock *TBB, + ArrayRef Cond, + const DebugLoc &DL, + int *BytesAdded) const { + // Shouldn't be a fall through. + assert(TBB && "InsertBranch must not be told to insert a fallthrough"); + assert(Cond.size() <= 4 && + "Xtensa branch conditions have less than four components!"); + + if (Cond.empty() || (Cond[0].getImm() == Xtensa::J)) { + // Unconditional branch + MachineInstr *MI = BuildMI(MBB, I, DL, get(Xtensa::J)).addMBB(TBB); + if (BytesAdded && MI) + *BytesAdded += getInstSizeInBytes(*MI); + return 1; + } + + unsigned Count = 0; + unsigned BR_C = Cond[0].getImm(); + MachineInstr *MI = nullptr; + switch (BR_C) { + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + MI = BuildMI(MBB, I, DL, get(BR_C)) + .addReg(Cond[1].getReg()) + .addReg(Cond[2].getReg()) + .addMBB(TBB); + break; + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + MI = BuildMI(MBB, I, DL, get(BR_C)) + .addReg(Cond[1].getReg()) + .addImm(Cond[2].getImm()) + .addMBB(TBB); + break; + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + MI = BuildMI(MBB, I, DL, get(BR_C)).addReg(Cond[1].getReg()).addMBB(TBB); + break; + default: + llvm_unreachable("Invalid branch type!"); + } + if (BytesAdded && MI) + *BytesAdded += getInstSizeInBytes(*MI); + ++Count; + return Count; +} + +bool XtensaInstrInfo::isBranch(const MachineBasicBlock::iterator &MI, + SmallVectorImpl &Cond, + const MachineOperand *&Target) const { + unsigned OpCode = MI->getOpcode(); + switch (OpCode) { + case Xtensa::J: + case Xtensa::JX: + case Xtensa::BR_JT: + Cond[0].setImm(OpCode); + Target = &MI->getOperand(0); + return true; + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + Cond[0].setImm(OpCode); + Target = &MI->getOperand(2); + return true; + + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + Cond[0].setImm(OpCode); + Target = &MI->getOperand(2); + return true; + + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + Cond[0].setImm(OpCode); + Target = &MI->getOperand(1); + return true; + + default: + assert(!MI->getDesc().isBranch() && "Unknown branch opcode"); + return false; + } +} diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h index 37f157f832464..1ed38fce35b17 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h @@ -72,6 +72,35 @@ class XtensaInstrInfo : public XtensaGenInstrInfo { // physical register Reg. void loadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned *Reg, int64_t Value) const; + bool + reverseBranchCondition(SmallVectorImpl &Cond) const override; + bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const override; + + unsigned removeBranch(MachineBasicBlock &MBB, + int *BytesRemoved = nullptr) const override; + + unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, ArrayRef Cond, + const DebugLoc &DL, + int *BytesAdded = nullptr) const override; + + unsigned InsertBranchAtInst(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + MachineBasicBlock *TBB, + ArrayRef Cond, const DebugLoc &DL, + int *BytesAdded) const; + + // Return true if MI is a conditional or unconditional branch. + // When returning true, set Cond to the mask of condition-code + // values on which the instruction will branch, and set Target + // to the operand that contains the branch target. This target + // can be a register or a basic block. + bool isBranch(const MachineBasicBlock::iterator &MI, + SmallVectorImpl &Cond, + const MachineOperand *&Target) const; const XtensaSubtarget &getSubtarget() const { return STI; } }; diff --git a/llvm/test/CodeGen/Xtensa/brcc.ll b/llvm/test/CodeGen/Xtensa/brcc.ll index 8bbc39c536c56..6d542f637cf65 100644 --- a/llvm/test/CodeGen/Xtensa/brcc.ll +++ b/llvm/test/CodeGen/Xtensa/brcc.ll @@ -1,16 +1,15 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=xtensa -disable-block-placement -verify-machineinstrs < %s \ ; RUN: | FileCheck %s define i32 @brcc_sgt(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: brcc_sgt: ; CHECK: bge a3, a2, .LBB0_2 -; CHECK-NEXT: j .LBB0_1 -; CHECK-NEXT: .LBB0_1: # %t1 +; CHECK-NEXT: # %bb.1: # %t1 ; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: j .LBB0_3 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB0_2: # %t2 ; CHECK-NEXT: addi a2, a3, 8 -; CHECK-NEXT: .LBB0_3: # %exit ; CHECK-NEXT: ret %wb = icmp sgt i32 %a, %b br i1 %wb, label %t1, label %t2 @@ -28,13 +27,11 @@ exit: define i32 @brcc_ugt(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: brcc_ugt: ; CHECK: bgeu a3, a2, .LBB1_2 -; CHECK-NEXT: j .LBB1_1 -; CHECK-NEXT: .LBB1_1: # %t1 +; CHECK-NEXT: # %bb.1: # %t1 ; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: j .LBB1_3 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: # %t2 ; CHECK-NEXT: addi a2, a3, 8 -; CHECK-NEXT: .LBB1_3: # %exit ; CHECK-NEXT: ret %wb = icmp ugt i32 %a, %b br i1 %wb, label %t1, label %t2 @@ -52,13 +49,11 @@ exit: define i32 @brcc_sle(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: brcc_sle: ; CHECK: blt a3, a2, .LBB2_2 -; CHECK-NEXT: j .LBB2_1 -; CHECK-NEXT: .LBB2_1: # %t1 +; CHECK-NEXT: # %bb.1: # %t1 ; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: j .LBB2_3 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB2_2: # %t2 ; CHECK-NEXT: addi a2, a3, 8 -; CHECK-NEXT: .LBB2_3: # %exit ; CHECK-NEXT: ret %wb = icmp sle i32 %a, %b br i1 %wb, label %t1, label %t2 @@ -76,13 +71,11 @@ exit: define i32 @brcc_ule(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: brcc_ule: ; CHECK: bltu a3, a2, .LBB3_2 -; CHECK-NEXT: j .LBB3_1 -; CHECK-NEXT: .LBB3_1: # %t1 +; CHECK-NEXT: # %bb.1: # %t1 ; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: j .LBB3_3 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB3_2: # %t2 ; CHECK-NEXT: addi a2, a3, 8 -; CHECK-NEXT: .LBB3_3: # %exit ; CHECK-NEXT: ret %wb = icmp ule i32 %a, %b br i1 %wb, label %t1, label %t2 @@ -100,13 +93,11 @@ exit: define i32 @brcc_eq(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: brcc_eq: ; CHECK: bne a2, a3, .LBB4_2 -; CHECK-NEXT: j .LBB4_1 -; CHECK-NEXT: .LBB4_1: # %t1 +; CHECK-NEXT: # %bb.1: # %t1 ; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: j .LBB4_3 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB4_2: # %t2 ; CHECK-NEXT: addi a2, a3, 8 -; CHECK-NEXT: .LBB4_3: # %exit ; CHECK-NEXT: ret %wb = icmp eq i32 %a, %b br i1 %wb, label %t1, label %t2 @@ -124,13 +115,11 @@ exit: define i32 @brcc_ne(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: brcc_ne: ; CHECK: beq a2, a3, .LBB5_2 -; CHECK-NEXT: j .LBB5_1 -; CHECK-NEXT: .LBB5_1: # %t1 +; CHECK-NEXT: # %bb.1: # %t1 ; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: j .LBB5_3 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB5_2: # %t2 ; CHECK-NEXT: addi a2, a3, 8 -; CHECK-NEXT: .LBB5_3: # %exit ; CHECK-NEXT: ret %wb = icmp ne i32 %a, %b br i1 %wb, label %t1, label %t2 @@ -148,13 +137,11 @@ exit: define i32 @brcc_ge(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: brcc_ge: ; CHECK: blt a2, a3, .LBB6_2 -; CHECK-NEXT: j .LBB6_1 -; CHECK-NEXT: .LBB6_1: # %t1 +; CHECK-NEXT: # %bb.1: # %t1 ; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: j .LBB6_3 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB6_2: # %t2 ; CHECK-NEXT: addi a2, a3, 8 -; CHECK-NEXT: .LBB6_3: # %exit ; CHECK-NEXT: ret %wb = icmp sge i32 %a, %b br i1 %wb, label %t1, label %t2 @@ -172,13 +159,11 @@ exit: define i32 @brcc_lt(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: brcc_lt: ; CHECK: bge a2, a3, .LBB7_2 -; CHECK-NEXT: j .LBB7_1 -; CHECK-NEXT: .LBB7_1: # %t1 +; CHECK-NEXT: # %bb.1: # %t1 ; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: j .LBB7_3 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB7_2: # %t2 ; CHECK-NEXT: addi a2, a3, 8 -; CHECK-NEXT: .LBB7_3: # %exit ; CHECK-NEXT: ret %wb = icmp slt i32 %a, %b br i1 %wb, label %t1, label %t2 @@ -196,13 +181,11 @@ exit: define i32 @brcc_uge(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: brcc_uge: ; CHECK: bltu a2, a3, .LBB8_2 -; CHECK-NEXT: j .LBB8_1 -; CHECK-NEXT: .LBB8_1: # %t1 +; CHECK-NEXT: # %bb.1: # %t1 ; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: j .LBB8_3 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB8_2: # %t2 ; CHECK-NEXT: addi a2, a3, 8 -; CHECK-NEXT: .LBB8_3: # %exit ; CHECK-NEXT: ret %wb = icmp uge i32 %a, %b br i1 %wb, label %t1, label %t2 @@ -220,13 +203,11 @@ exit: define i32 @brcc_ult(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: brcc_ult: ; CHECK: bgeu a2, a3, .LBB9_2 -; CHECK-NEXT: j .LBB9_1 -; CHECK-NEXT: .LBB9_1: # %t1 +; CHECK-NEXT: # %bb.1: # %t1 ; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: j .LBB9_3 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB9_2: # %t2 ; CHECK-NEXT: addi a2, a3, 8 -; CHECK-NEXT: .LBB9_3: # %exit ; CHECK-NEXT: ret %wb = icmp ult i32 %a, %b br i1 %wb, label %t1, label %t2 diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll index f58bed19d4ee7..bad57d58b28a6 100644 --- a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll @@ -8,10 +8,8 @@ declare i32 @llvm.ctpop.i32(i32) define i32 @test_cttz_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_cttz_i32: -; XTENSA: movi a8, 32 -; XTENSA-NEXT: beqz a2, .LBB0_2 -; XTENSA-NEXT: j .LBB0_1 -; XTENSA-NEXT: .LBB0_1: # %cond.false +; XTENSA: beqz a2, .LBB0_1 +; XTENSA-NEXT: # %bb.2: # %cond.false ; XTENSA-NEXT: movi a8, -1 ; XTENSA-NEXT: xor a8, a2, a8 ; XTENSA-NEXT: addi a9, a2, -1 @@ -33,9 +31,10 @@ define i32 @test_cttz_i32(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: slli a9, a8, 16 ; XTENSA-NEXT: add a8, a8, a9 -; XTENSA-NEXT: extui a8, a8, 24, 8 -; XTENSA-NEXT: .LBB0_2: # %cond.end -; XTENSA-NEXT: or a2, a8, a8 +; XTENSA-NEXT: extui a2, a8, 24, 8 +; XTENSA-NEXT: ret +; XTENSA-NEXT: .LBB0_1: +; XTENSA-NEXT: movi a2, 32 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) ret i32 %tmp @@ -72,13 +71,10 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_ctlz_i32: -; XTENSA: or a8, a2, a2 -; XTENSA-NEXT: movi a2, 32 -; XTENSA-NEXT: beqz a8, .LBB2_2 -; XTENSA-NEXT: j .LBB2_1 -; XTENSA-NEXT: .LBB2_1: # %cond.false -; XTENSA-NEXT: srli a9, a8, 1 -; XTENSA-NEXT: or a8, a8, a9 +; XTENSA: beqz a2, .LBB2_1 +; XTENSA-NEXT: # %bb.2: # %cond.false +; XTENSA-NEXT: srli a8, a2, 1 +; XTENSA-NEXT: or a8, a2, a8 ; XTENSA-NEXT: srli a9, a8, 2 ; XTENSA-NEXT: or a8, a8, a9 ; XTENSA-NEXT: srli a9, a8, 4 @@ -107,7 +103,9 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-NEXT: slli a9, a8, 16 ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: extui a2, a8, 24, 8 -; XTENSA-NEXT: .LBB2_2: # %cond.end +; XTENSA-NEXT: ret +; XTENSA-NEXT: .LBB2_1: +; XTENSA-NEXT: movi a2, 32 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) ret i32 %tmp diff --git a/llvm/test/CodeGen/Xtensa/select-cc.ll b/llvm/test/CodeGen/Xtensa/select-cc.ll index 812e6a5b852ea..c86aa9f33ca36 100644 --- a/llvm/test/CodeGen/Xtensa/select-cc.ll +++ b/llvm/test/CodeGen/Xtensa/select-cc.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=xtensa -disable-block-placement -verify-machineinstrs < %s \ ; RUN: | FileCheck %s @@ -161,12 +162,10 @@ define i32 @f_slt_imm(i32 %a, ptr %b) nounwind { define i32 @f_sgt_imm(i32 %a, ptr %b) nounwind { ; CHECK-LABEL: f_sgt_imm: -; CHECK: or a8, a2, a2 -; CHECK-NEXT: l32i a2, a3, 0 -; CHECK-NEXT: movi a9, -1 -; CHECK-NEXT: bge a9, a8, .LBB11_2 +; CHECK: movi a8, -1 +; CHECK-NEXT: blt a8, a2, .LBB11_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: or a2, a8, a8 +; CHECK-NEXT: l32i a2, a3, 0 ; CHECK-NEXT: .LBB11_2: ; CHECK-NEXT: ret %val1 = load i32, ptr %b diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py index f150098eaaeef..a929e53c2913b 100644 --- a/llvm/utils/UpdateTestChecks/asm.py +++ b/llvm/utils/UpdateTestChecks/asm.py @@ -109,6 +109,13 @@ class string: flags=(re.M | re.S), ) +ASM_FUNCTION_XTENSA_RE = re.compile( + r'^_?(?P[^.:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n\s?\.?(cfi_startproc\s)?# %bb.0:.*?\n' + r'(?P.*?)\n' + r'^\.Lfunc_end\d+:\n', # Match the end label + flags=(re.M | re.S) +) + ASM_FUNCTION_PPC_RE = re.compile( r"#[ \-\t]*Begin function (?P[^.:]+)\n" r".*?" @@ -525,6 +532,15 @@ def scrub_asm_loongarch(asm, args): asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm) return asm +def scrub_asm_xtensa(asm, args): + # Scrub runs of whitespace out of the assembly, but leave the leading + # whitespace in place. + asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm) + # Expand the tabs used for indentation. + asm = string.expandtabs(asm, 2) + # Strip trailing whitespace. + asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm) + return asm # Returns a tuple of a scrub function and a function regex. Scrub function is # used to alter function body in some way, for example, remove trailing spaces. @@ -579,6 +595,7 @@ def get_run_handler(triple): "nvptx": (scrub_asm_nvptx, ASM_FUNCTION_NVPTX_RE), "loongarch32": (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE), "loongarch64": (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE), + "xtensa": (scrub_asm_xtensa, ASM_FUNCTION_XTENSA_RE), } handler = None best_prefix = "" From 36cf87e1745c842a45459638984a31d34cccecea Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 14 Aug 2024 20:42:06 +0300 Subject: [PATCH 016/289] [Xtensa] Implement support for the BranchRelaxation --- llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 181 ++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.h | 16 +- llvm/lib/Target/Xtensa/XtensaRegisterInfo.h | 4 + .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 3 + llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll | 12 +- 5 files changed, 208 insertions(+), 8 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index e46fa08c8fc26..74c9203412c3c 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -13,11 +13,13 @@ //===----------------------------------------------------------------------===// #include "XtensaInstrInfo.h" +#include "XtensaConstantPoolValue.h" #include "XtensaMachineFunctionInfo.h" #include "XtensaTargetMachine.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" #define GET_INSTRINFO_CTOR_DTOR #include "XtensaGenInstrInfo.inc" @@ -185,6 +187,18 @@ void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, } } +unsigned XtensaInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { + switch (MI.getOpcode()) { + case TargetOpcode::INLINEASM: { // Inline Asm: Variable size. + const MachineFunction *MF = MI.getParent()->getParent(); + const char *AsmStr = MI.getOperand(0).getSymbolName(); + return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); + } + default: + return MI.getDesc().getSize(); + } +} + bool XtensaInstrInfo::reverseBranchCondition( SmallVectorImpl &Cond) const { assert(Cond.size() <= 4 && "Invalid branch condition!"); @@ -246,6 +260,77 @@ bool XtensaInstrInfo::reverseBranchCondition( } } +MachineBasicBlock * +XtensaInstrInfo::getBranchDestBlock(const MachineInstr &MI) const { + unsigned OpCode = MI.getOpcode(); + switch (OpCode) { + case Xtensa::BR_JT: + case Xtensa::JX: + return nullptr; + case Xtensa::J: + return MI.getOperand(0).getMBB(); + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + return MI.getOperand(2).getMBB(); + + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + return MI.getOperand(2).getMBB(); + + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + return MI.getOperand(1).getMBB(); + + default: + llvm_unreachable("Unknown branch opcode"); + } +} + +bool XtensaInstrInfo::isBranchOffsetInRange(unsigned BranchOp, + int64_t BrOffset) const { + switch (BranchOp) { + case Xtensa::J: + BrOffset -= 4; + return isIntN(18, BrOffset); + case Xtensa::JX: + return true; + case Xtensa::BR_JT: + return true; + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + BrOffset -= 4; + return isIntN(8, BrOffset); + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + BrOffset -= 4; + return isIntN(12, BrOffset); + default: + llvm_unreachable("Unknown branch opcode"); + } +} + bool XtensaInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, @@ -378,6 +463,102 @@ unsigned XtensaInstrInfo::insertBranch( return Count; } +void XtensaInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, + MachineBasicBlock &DestBB, + MachineBasicBlock &RestoreBB, + const DebugLoc &DL, + int64_t BrOffset, + RegScavenger *RS) const { + assert(RS && "RegScavenger required for long branching"); + assert(MBB.empty() && + "new block should be inserted for expanding unconditional branch"); + assert(MBB.pred_size() == 1); + + MachineFunction *MF = MBB.getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + MachineConstantPool *ConstantPool = MF->getConstantPool(); + + if (!isInt<32>(BrOffset)) + report_fatal_error( + "Branch offsets outside of the signed 32-bit range not supported"); + XtensaConstantPoolValue *C = + XtensaConstantPoolMBB::Create(MF->getFunction().getContext(), &DestBB, 0); + unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4)); + + // FIXME: A virtual register must be used initially, as the register + // scavenger won't work with empty blocks (SIInstrInfo::insertIndirectBranch + // uses the same workaround). + Register ScratchReg = MRI.createVirtualRegister(&Xtensa::ARRegClass); + auto II = MBB.end(); + + MachineInstr &L32R = *BuildMI(MBB, II, DL, get(Xtensa::L32R), ScratchReg) + .addConstantPoolIndex(Idx); + BuildMI(MBB, II, DL, get(Xtensa::JX)).addReg(ScratchReg, RegState::Kill); + RS->enterBasicBlockEnd(MBB); + unsigned Scav = RS->scavengeRegisterBackwards(Xtensa::ARRegClass, + L32R.getIterator(), false, 0); + MRI.replaceRegWith(ScratchReg, Scav); + MRI.clearVirtRegs(); + RS->setRegUsed(Scav); +} + +unsigned XtensaInstrInfo::InsertConstBranchAtInst( + MachineBasicBlock &MBB, MachineInstr *I, int64_t offset, + ArrayRef Cond, DebugLoc DL, int *BytesAdded) const { + // Shouldn't be a fall through. + assert(&MBB && "InsertBranch must not be told to insert a fallthrough"); + assert(Cond.size() <= 4 && + "Xtensa branch conditions have less than four components!"); + + if (Cond.empty() || (Cond[0].getImm() == Xtensa::J)) { + // Unconditional branch + MachineInstr *MI = BuildMI(MBB, I, DL, get(Xtensa::J)).addImm(offset); + if (BytesAdded && MI) + *BytesAdded += getInstSizeInBytes(*MI); + return 1; + } + + unsigned Count = 0; + unsigned BR_C = Cond[0].getImm(); + MachineInstr *MI = nullptr; + switch (BR_C) { + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + MI = BuildMI(MBB, I, DL, get(BR_C)) + .addImm(offset) + .addReg(Cond[1].getReg()) + .addReg(Cond[2].getReg()); + break; + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + MI = BuildMI(MBB, I, DL, get(BR_C)) + .addImm(offset) + .addReg(Cond[1].getReg()) + .addImm(Cond[2].getImm()); + break; + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + MI = BuildMI(MBB, I, DL, get(BR_C)).addImm(offset).addReg(Cond[1].getReg()); + break; + default: + llvm_unreachable("Invalid branch type!"); + } + if (BytesAdded && MI) + *BytesAdded += getInstSizeInBytes(*MI); + ++Count; + return Count; +} + unsigned XtensaInstrInfo::InsertBranchAtInst(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineBasicBlock *TBB, diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h index 1ed38fce35b17..b03a030a6b4f8 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h @@ -37,6 +37,7 @@ class XtensaInstrInfo : public XtensaGenInstrInfo { void adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const; + unsigned getInstSizeInBytes(const MachineInstr &MI) const override; // Return the XtensaRegisterInfo, which this class owns. const XtensaRegisterInfo &getRegisterInfo() const { return RI; } @@ -74,6 +75,10 @@ class XtensaInstrInfo : public XtensaGenInstrInfo { unsigned *Reg, int64_t Value) const; bool reverseBranchCondition(SmallVectorImpl &Cond) const override; + MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override; + + bool isBranchOffsetInRange(unsigned BranchOpc, + int64_t BrOffset) const override; bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl &Cond, @@ -86,13 +91,20 @@ class XtensaInstrInfo : public XtensaGenInstrInfo { MachineBasicBlock *FBB, ArrayRef Cond, const DebugLoc &DL, int *BytesAdded = nullptr) const override; - + void insertIndirectBranch(MachineBasicBlock &MBB, + MachineBasicBlock &DestBB, + MachineBasicBlock &RestoreBB, const DebugLoc &DL, + int64_t BrOffset = 0, + RegScavenger *RS = nullptr) const override; unsigned InsertBranchAtInst(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineBasicBlock *TBB, ArrayRef Cond, const DebugLoc &DL, int *BytesAdded) const; - + unsigned InsertConstBranchAtInst(MachineBasicBlock &MBB, MachineInstr *I, + int64_t offset, + ArrayRef Cond, DebugLoc DL, + int *BytesAdded) const; // Return true if MI is a conditional or unconditional branch. // When returning true, set Cond to the mask of condition-code // values on which the instruction will branch, and set Target diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h index 8643ebb1c0f15..ede0eeb90b42d 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h @@ -38,6 +38,10 @@ class XtensaRegisterInfo : public XtensaGenRegisterInfo { return true; } + bool trackLivenessAfterRegAlloc(const MachineFunction &) const override { + return true; + } + const uint16_t * getCalleeSavedRegs(const MachineFunction *MF = 0) const override; const uint32_t *getCallPreservedMask(const MachineFunction &MF, diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index d8031f6a05ab7..4ad51e05df972 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -102,6 +102,7 @@ class XtensaPassConfig : public TargetPassConfig { } bool addInstSelector() override; + void addPreEmitPass() override; }; } // end anonymous namespace @@ -110,6 +111,8 @@ bool XtensaPassConfig::addInstSelector() { return false; } +void XtensaPassConfig::addPreEmitPass() { addPass(&BranchRelaxationPassID); } + TargetPassConfig *XtensaTargetMachine::createPassConfig(PassManagerBase &PM) { return new XtensaPassConfig(*this, PM); } diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll index bad57d58b28a6..6030323538625 100644 --- a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll @@ -8,8 +8,8 @@ declare i32 @llvm.ctpop.i32(i32) define i32 @test_cttz_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_cttz_i32: -; XTENSA: beqz a2, .LBB0_1 -; XTENSA-NEXT: # %bb.2: # %cond.false +; XTENSA: beqz a2, .LBB0_2 +; XTENSA-NEXT: # %bb.1: # %cond.false ; XTENSA-NEXT: movi a8, -1 ; XTENSA-NEXT: xor a8, a2, a8 ; XTENSA-NEXT: addi a9, a2, -1 @@ -33,7 +33,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: extui a2, a8, 24, 8 ; XTENSA-NEXT: ret -; XTENSA-NEXT: .LBB0_1: +; XTENSA-NEXT: .LBB0_2: ; XTENSA-NEXT: movi a2, 32 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) @@ -71,8 +71,8 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-LABEL: test_ctlz_i32: -; XTENSA: beqz a2, .LBB2_1 -; XTENSA-NEXT: # %bb.2: # %cond.false +; XTENSA: beqz a2, .LBB2_2 +; XTENSA-NEXT: # %bb.1: # %cond.false ; XTENSA-NEXT: srli a8, a2, 1 ; XTENSA-NEXT: or a8, a2, a8 ; XTENSA-NEXT: srli a9, a8, 2 @@ -104,7 +104,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { ; XTENSA-NEXT: add a8, a8, a9 ; XTENSA-NEXT: extui a2, a8, 24, 8 ; XTENSA-NEXT: ret -; XTENSA-NEXT: .LBB2_1: +; XTENSA-NEXT: .LBB2_2: ; XTENSA-NEXT: movi a2, 32 ; XTENSA-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) From e525aee8d0741dcd2922b2fe17cce4f75ab24a85 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 14 Aug 2024 21:17:22 +0300 Subject: [PATCH 017/289] [Xtensa] Implement code density feature operations --- .../Xtensa/AsmParser/XtensaAsmParser.cpp | 10 +++ .../Disassembler/XtensaDisassembler.cpp | 62 +++++++++++++++++ .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 22 ++++++ .../Xtensa/MCTargetDesc/XtensaInstPrinter.h | 2 + .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 48 ++++++++++++- llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp | 8 ++- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 4 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 68 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaOperands.td | 14 ++++ 9 files changed, 235 insertions(+), 3 deletions(-) diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index b0ce624a495fd..232385957a5a8 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -193,6 +193,10 @@ struct XtensaOperand : public MCParsedAsmOperand { bool isImm1_16() const { return isImm(1, 16); } + bool isImm1n_15() const { return (isImm(1, 15) || isImm(-1, -1)); } + + bool isImm32n_95() const { return isImm(-32, 95); } + bool isB4const() const { if (Kind != Immediate) return false; @@ -480,6 +484,12 @@ bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_InvalidImm1_16: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [1, 16]"); + case Match_InvalidImm1n_15: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-1, 15] except 0"); + case Match_InvalidImm32n_95: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-32, 95] except 0"); case Match_InvalidShimm1_31: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [1, 31]"); diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 2d36b94dd40c7..612f43f7b7203 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -190,6 +190,28 @@ static DecodeStatus decodeImm1_16Operand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeImm1n_15Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + if (!Imm) + Inst.addOperand(MCOperand::createImm(-1)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeImm32n_95Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<7>(Imm) && "Invalid immediate"); + if ((Imm & 0x60) == 0x60) + Inst.addOperand(MCOperand::createImm((~0x1f) | Imm)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + static DecodeStatus decodeShimm1_31Operand(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { @@ -243,6 +265,34 @@ static DecodeStatus decodeMem32Operand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeMem32nOperand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + DecodeARRegisterClass(Inst, Imm & 0xf, Address, Decoder); + Inst.addOperand(MCOperand::createImm((Imm >> 2) & 0x3c)); + return MCDisassembler::Success; +} + +/// Read two bytes from the ArrayRef and return 16 bit data sorted +/// according to the given endianness. +static DecodeStatus readInstruction16(ArrayRef Bytes, uint64_t Address, + uint64_t &Size, uint32_t &Insn, + bool IsLittleEndian) { + // We want to read exactly 2 Bytes of data. + if (Bytes.size() < 2) { + Size = 0; + return MCDisassembler::Fail; + } + + if (!IsLittleEndian) { + llvm_unreachable("Big-endian mode currently is not supported!"); + } else { + Insn = (Bytes[1] << 8) | Bytes[0]; + } + + return MCDisassembler::Success; +} + /// Read three bytes from the ArrayRef and return 24 bit data static DecodeStatus readInstruction24(ArrayRef Bytes, uint64_t Address, uint64_t &Size, uint32_t &Insn, @@ -272,6 +322,18 @@ DecodeStatus XtensaDisassembler::getInstruction(MCInst &MI, uint64_t &Size, uint32_t Insn; DecodeStatus Result; + if (hasDensity()) { + Result = readInstruction16(Bytes, Address, Size, Insn, IsLittleEndian); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + LLVM_DEBUG(dbgs() << "Trying Xtensa 16-bit instruction table :\n"); + Result = decodeInstruction(DecoderTable16, MI, Insn, Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 2; + return Result; + } + } + Result = readInstruction24(Bytes, Address, Size, Insn, IsLittleEndian); if (Result == MCDisassembler::Fail) return MCDisassembler::Fail; diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index 10becc9e8c83b..d6175eb80eadb 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -251,6 +251,28 @@ void XtensaInstPrinter::printImm1_16_AsmOperand(const MCInst *MI, int OpNum, printOperand(MI, OpNum, O); } +void XtensaInstPrinter::printImm1n_15_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -1 && (Value != 0) && Value <= 15) && + "Invalid argument, value must be in ranges <-1,-1> or <1,15>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printImm32n_95_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -32 && Value <= 95) && + "Invalid argument, value must be in ranges <-32,95>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + void XtensaInstPrinter::printOffset8m8_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O) { if (MI->getOperand(OpNum).isImm()) { diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index 34d03569b9bce..ca87d79bfb67d 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -60,6 +60,8 @@ class XtensaInstPrinter : public MCInstPrinter { void printUimm5_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printShimm1_31_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printImm1_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm1n_15_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm32n_95_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset8m8_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset8m16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset8m32_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 1afdbb38f9571..179887fa0fb08 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -103,6 +103,14 @@ class XtensaMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + uint32_t getImm1n_15OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint32_t getImm32n_95OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getShimm1_31OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; @@ -255,6 +263,8 @@ XtensaMCCodeEmitter::getMemRegEncoding(const MCInst &MI, unsigned OpNo, break; case Xtensa::S32I: case Xtensa::L32I: + case Xtensa::S32I_N: + case Xtensa::L32I_N: if (Res & 0x3) { report_fatal_error("Unexpected operand value!"); } @@ -262,7 +272,15 @@ XtensaMCCodeEmitter::getMemRegEncoding(const MCInst &MI, unsigned OpNo, break; } - assert((isUInt<8>(Res)) && "Unexpected operand value!"); + switch (MI.getOpcode()) { + case Xtensa::S32I_N: + case Xtensa::L32I_N: + assert((isUInt<4>(Res)) && "Unexpected operand value!"); + break; + default: + assert((isUInt<8>(Res)) && "Unexpected operand value!"); + break; + } uint32_t OffBits = Res << 4; uint32_t RegBits = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups, STI); @@ -354,6 +372,34 @@ XtensaMCCodeEmitter::getImm1_16OpValue(const MCInst &MI, unsigned OpNo, return (Res - 1); } +uint32_t +XtensaMCCodeEmitter::getImm1n_15OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int32_t Res = static_cast(MO.getImm()); + + assert(((Res >= -1) && (Res <= 15) && (Res != 0)) && + "Unexpected operand value!"); + + if (Res < 0) + Res = 0; + + return Res; +} + +uint32_t +XtensaMCCodeEmitter::getImm32n_95OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int32_t Res = static_cast(MO.getImm()); + + assert(((Res >= -32) && (Res <= 95)) && "Unexpected operand value!"); + + return Res; +} + uint32_t XtensaMCCodeEmitter::getB4constOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp index 95032a7c9c8e6..3d93a2cd1c516 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -27,9 +27,10 @@ using namespace llvm; namespace { class XtensaDAGToDAGISel : public SelectionDAGISel { + const XtensaSubtarget *Subtarget; public: XtensaDAGToDAGISel(XtensaTargetMachine &TM, CodeGenOptLevel OptLevel) - : SelectionDAGISel(TM, OptLevel) {} + : SelectionDAGISel(TM, OptLevel), Subtarget(nullptr) {} void Select(SDNode *Node) override; @@ -102,6 +103,11 @@ class XtensaDAGToDAGISel : public SelectionDAGISel { return selectMemRegAddr(Addr, Base, Offset, 4); } + bool runOnMachineFunction(MachineFunction &MF) { + Subtarget = &MF.getSubtarget(); + return SelectionDAGISel::runOnMachineFunction(MF); + } + // Include the pieces autogenerated from the target description. #include "XtensaGenDAGISel.inc" }; // namespace diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 9690b5c656f6e..f957f297c90a7 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -1314,10 +1314,12 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( case Xtensa::S8I: case Xtensa::S16I: case Xtensa::S32I: + case Xtensa::S32I_N: case Xtensa::L8UI: case Xtensa::L16SI: case Xtensa::L16UI: - case Xtensa::L32I: { + case Xtensa::L32I: + case Xtensa::L32I_N: { const MachineMemOperand &MMO = **MI.memoperands_begin(); if (MMO.isVolatile()) { BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index cf73238f6efbd..adaa20dcaf483 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -582,3 +582,71 @@ let usesCustomInserter = 1 in { "!select $dst, $lhs, $rhs, $t, $f, $cond", [(set i32:$dst, (Xtensa_select_cc i32:$lhs, i32:$rhs, i32:$t, i32:$f, imm:$cond))]>; } + +//===----------------------------------------------------------------------===// +// Code Density instructions +//===----------------------------------------------------------------------===// + +class ArithLogic_RRRN oper0, string instrAsm, + SDPatternOperator opNode, bit isComm = 0> + : RRRN_Inst, Requires<[HasDensity]> { + let isCommutable = isComm; + let isReMaterializable = 0; +} + +def ADD_N : ArithLogic_RRRN<0x0a, "add.n", add, 1>; + +def ADDI_N : RRRN_Inst<0x0B, (outs AR:$r), (ins AR:$s, imm1n_15:$imm), + "addi.n\t$r, $s, $imm", + [(set AR:$r, (add AR:$s, imm1n_15:$imm))]>, Requires<[HasDensity]> { + bits<4> imm; + + let t = imm; +} + +def MOV_N : RRRN_Inst<0x0D, (outs AR:$t), (ins AR:$s), + "mov.n\t$t, $s", []>, Requires<[HasDensity]> { + let r = 0; +} + +def : InstAlias<"mov\t $t, $s", (OR AR:$t, AR:$s, AR:$s)>; + +def MOVI_N : RI7_Inst<0xc, 0x0, (outs AR:$s), (ins imm32n_95:$imm7), + "movi.n\t$s, $imm7", + [(set AR:$s, imm32n_95:$imm7)]>, Requires<[HasDensity]>; + +// Load instruction +let mayLoad = 1, usesCustomInserter = 1 in { + def L32I_N : RRRN_Inst<0x8, (outs AR:$t), (ins mem32n:$addr), + "l32i.n\t$t, $addr", []>, Requires<[HasDensity]> { + bits<8> addr; + + let r{3-0} = addr{7-4}; + let s{3-0} = addr{3-0}; + } +} + +// Store instruction +let mayStore = 1, usesCustomInserter = 1 in { + def S32I_N : RRRN_Inst<0x9, (outs), (ins AR:$t, mem32n:$addr), + "s32i.n\t$t, $addr", []>, Requires<[HasDensity]> { + bits<8> addr; + + let r{3-0} = addr{7-4}; + let s{3-0} = addr{3-0}; + } +} + +//Return instruction +let isReturn = 1, isTerminator = 1, + isBarrier = 1, Uses = [A0] in { + def RET_N : RRRN_Inst<0x0D, (outs), (ins), + "ret.n", [(Xtensa_ret)]>, + Requires<[HasDensity]> { + let r = 0x0F; + let s = 0; + let t = 0; + } +} diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index f41081f9bf2f9..aa72fa0a56a6f 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -72,6 +72,20 @@ def imm1_16 : Immediate= 1 && Imm <= 16; }], "Imm1_16_AsmOp let DecoderMethod = "decodeImm1_16Operand"; } +// imm1n_15 predicate - Immediate in the range [-1,15], except 0 +def Imm1n_15_AsmOperand: ImmAsmOperand<"Imm1n_15">; +def imm1n_15: Immediate= -1 && Imm <= 15 && Imm != 0; }], "Imm1n_15_AsmOperand"> { + let EncoderMethod = "getImm1n_15OpValue"; + let DecoderMethod = "decodeImm1n_15Operand"; +} + +// imm32n_95 predicate - Immediate in the range [-32,95] +def Imm32n_95_AsmOperand: ImmAsmOperand<"Imm32n_95">; +def imm32n_95: Immediate= -32 && Imm <= 95; }], "Imm32n_95_AsmOperand"> { + let EncoderMethod = "getImm32n_95OpValue"; + let DecoderMethod = "decodeImm32n_95Operand"; +} + // shimm1_31 predicate - Immediate in the range [1,31] def Shimm1_31_AsmOperand : ImmAsmOperand<"Shimm1_31">; def shimm1_31 : Immediate= 1 && Imm <= 31; }], "Shimm1_31_AsmOperand"> { From f48a873a306bbf9f0c19c32d6a3db7336e6f82f0 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 14 Aug 2024 21:36:04 +0300 Subject: [PATCH 018/289] [Xtensa] Add code size reduction pass. --- llvm/lib/Target/Xtensa/CMakeLists.txt | 1 + llvm/lib/Target/Xtensa/Xtensa.h | 2 + .../Target/Xtensa/XtensaSizeReductionPass.cpp | 253 ++++++++++++++++++ .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 5 +- 4 files changed, 260 insertions(+), 1 deletion(-) create mode 100644 llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp diff --git a/llvm/lib/Target/Xtensa/CMakeLists.txt b/llvm/lib/Target/Xtensa/CMakeLists.txt index cc6109ed7d085..aeeec1dfbd2f9 100644 --- a/llvm/lib/Target/Xtensa/CMakeLists.txt +++ b/llvm/lib/Target/Xtensa/CMakeLists.txt @@ -23,6 +23,7 @@ add_llvm_target(XtensaCodeGen XtensaISelLowering.cpp XtensaMachineFunctionInfo.cpp XtensaRegisterInfo.cpp + XtensaSizeReductionPass.cpp XtensaSubtarget.cpp XtensaTargetMachine.cpp XtensaUtils.cpp diff --git a/llvm/lib/Target/Xtensa/Xtensa.h b/llvm/lib/Target/Xtensa/Xtensa.h index da44e30f367fa..bbf580ffd8904 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.h +++ b/llvm/lib/Target/Xtensa/Xtensa.h @@ -24,5 +24,7 @@ class FunctionPass; FunctionPass *createXtensaISelDag(XtensaTargetMachine &TM, CodeGenOptLevel OptLevel); + +FunctionPass *createXtensaSizeReductionPass(); } // namespace llvm #endif // LLVM_LIB_TARGET_XTENSA_XTENSA_H diff --git a/llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp b/llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp new file mode 100644 index 0000000000000..f69c1e601a788 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp @@ -0,0 +1,253 @@ +//===- XtensaSizeReductionPass.cpp - Xtensa Size Reduction ----------------===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "XtensaInstrInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen//MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +#define DEBUG_TYPE "xtensa-size-reduce-pass" + +STATISTIC(NumReduced, "Number of 24-bit instructions reduced to 16-bit ones"); + +class XtensaSizeReduce : public MachineFunctionPass { +public: + static char ID; + XtensaSizeReduce() : MachineFunctionPass(ID) {} + + const XtensaSubtarget *Subtarget; + static const XtensaInstrInfo *XtensaII; + + bool runOnMachineFunction(MachineFunction &MF) override; + + llvm::StringRef getPassName() const override { + return "Xtensa instruction size reduction pass"; + } + +private: + /// Reduces width of instructions in the specified basic block. + bool ReduceMBB(MachineBasicBlock &MBB); + + /// Attempts to reduce MI, returns true on success. + bool ReduceMI(const MachineBasicBlock::instr_iterator &MII); +}; + +char XtensaSizeReduce::ID = 0; +const XtensaInstrInfo *XtensaSizeReduce::XtensaII; + +bool XtensaSizeReduce::ReduceMI(const MachineBasicBlock::instr_iterator &MII) { + MachineInstr *MI = &*MII; + MachineBasicBlock &MBB = *MI->getParent(); + unsigned Opcode = MI->getOpcode(); + + switch (Opcode) { + case Xtensa::L32I: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + MachineOperand Op2 = MI->getOperand(2); + + int64_t Imm = Op2.getImm(); + if (Imm >= 0 && Imm <= 60) { + // Replace L32I to L32I.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::L32I_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + MIB.add(Op2); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } + } break; + + case Xtensa::S32I: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + MachineOperand Op2 = MI->getOperand(2); + + int64_t Imm = Op2.getImm(); + if (Imm >= 0 && Imm <= 60) { + // Replace S32I to S32I.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::S32I_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + MIB.add(Op2); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } + + } break; + + case Xtensa::MOVI: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + + int64_t Imm = Op1.getImm(); + if (Imm >= -32 && Imm <= 95) { + // Replace MOVI to MOVI.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MOVI_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } + + } break; + + case Xtensa::ADD: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + MachineOperand Op2 = MI->getOperand(2); + + // Replace ADD to ADD.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::ADD_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + MIB.add(Op2); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + + } break; + + case Xtensa::ADDI: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + MachineOperand Op2 = MI->getOperand(2); + + int64_t Imm = Op2.getImm(); + if ((Imm >= 1 && Imm <= 15) || (Imm == -1)) { + // Replace ADDI to ADDI.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::ADDI_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + MIB.add(Op2); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } + } break; + + case Xtensa::OR: { + MachineOperand Op0 = MI->getOperand(0); + MachineOperand Op1 = MI->getOperand(1); + MachineOperand Op2 = MI->getOperand(2); + + if (Op1.getReg() != Op2.getReg()) + break; + + // Replace OR R1, R2, R2 to MOV.N R1, R2 + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MOV_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + MIB.add(Op0); + MIB.add(Op1); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } break; + + case Xtensa::RET: { + // Replace RET to RET.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::RET_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } break; + + default: + break; + } + + return false; +} + +bool XtensaSizeReduce::ReduceMBB(MachineBasicBlock &MBB) { + bool Modified = false; + MachineBasicBlock::instr_iterator MII = MBB.instr_begin(), + E = MBB.instr_end(); + MachineBasicBlock::instr_iterator NextMII; + + // Iterate through the instructions in the basic block + for (; MII != E; MII = NextMII) { + NextMII = std::next(MII); + MachineInstr *MI = &*MII; + + // Don't reduce bundled instructions or pseudo operations + if (MI->isBundle() || MI->isTransient()) + continue; + + // Try to reduce 24-bit instruction into 16-bit instruction + Modified |= ReduceMI(MII); + } + + return Modified; +} + +bool XtensaSizeReduce::runOnMachineFunction(MachineFunction &MF) { + + Subtarget = &static_cast(MF.getSubtarget()); + XtensaII = static_cast(Subtarget->getInstrInfo()); + bool Modified = false; + + if (!Subtarget->hasDensity()) + return Modified; + + MachineFunction::iterator I = MF.begin(), E = MF.end(); + + for (; I != E; ++I) + Modified |= ReduceMBB(*I); + return Modified; +} + +FunctionPass *llvm::createXtensaSizeReductionPass() { + return new XtensaSizeReduce(); +} diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 4ad51e05df972..7ef350a6008a1 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -111,7 +111,10 @@ bool XtensaPassConfig::addInstSelector() { return false; } -void XtensaPassConfig::addPreEmitPass() { addPass(&BranchRelaxationPassID); } +void XtensaPassConfig::addPreEmitPass() { + addPass(createXtensaSizeReductionPass()); + addPass(&BranchRelaxationPassID); +} TargetPassConfig *XtensaTargetMachine::createPassConfig(PassManagerBase &PM) { return new XtensaPassConfig(*this, PM); From c9cd06ded6bb7610eedb7d427a0c3f0376e0e9ad Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Sat, 17 Aug 2024 20:16:16 +0300 Subject: [PATCH 019/289] [Xtensa] Implement Windowed feature operations --- .../Xtensa/AsmParser/XtensaAsmParser.cpp | 16 +++ .../Disassembler/XtensaDisassembler.cpp | 29 +++- .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 34 +++++ .../Xtensa/MCTargetDesc/XtensaInstPrinter.h | 3 + .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 52 +++++++ llvm/lib/Target/Xtensa/Xtensa.td | 5 + llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 6 + llvm/lib/Target/Xtensa/XtensaISelLowering.h | 7 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 135 ++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaOperands.td | 21 +++ llvm/lib/Target/Xtensa/XtensaOperators.td | 11 ++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 6 +- .../Target/Xtensa/XtensaSizeReductionPass.cpp | 15 +- llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 7 + 15 files changed, 345 insertions(+), 3 deletions(-) diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 232385957a5a8..accd7758f93db 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -181,6 +181,8 @@ struct XtensaOperand : public MCParsedAsmOperand { ((cast(getImm())->getValue() & 0x3) == 0); } + bool isentry_imm12() const { return isImm(0, 32760); } + bool isUimm4() const { return isImm(0, 15); } bool isUimm5() const { return isImm(0, 31); } @@ -197,6 +199,11 @@ struct XtensaOperand : public MCParsedAsmOperand { bool isImm32n_95() const { return isImm(-32, 95); } + bool isImm64n_4n() const { + return isImm(-64, -4) && + ((dyn_cast(getImm())->getValue() & 0x3) == 0); + } + bool isB4const() const { if (Kind != Immediate) return false; @@ -490,6 +497,12 @@ bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_InvalidImm32n_95: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [-32, 95] except 0"); + case Match_InvalidImm64n_4n: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-64, -4]"); + case Match_InvalidImm8n_7: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-8, 7]"); case Match_InvalidShimm1_31: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [1, 31]"); @@ -514,6 +527,9 @@ bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [0, 60], first 2 bits " "should be zero"); + case Match_Invalidentry_imm12: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 32760]"); } report_fatal_error("Unknown match type detected!"); diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 612f43f7b7203..bf36925c92c6b 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -75,7 +75,8 @@ static DecodeStatus DecodeARRegisterClass(MCInst &Inst, uint64_t RegNo, return MCDisassembler::Success; } -static const unsigned SRDecoderTable[] = {Xtensa::SAR, 3}; +static const unsigned SRDecoderTable[] = { + Xtensa::SAR, 3, Xtensa::WINDOWBASE, 72, Xtensa::WINDOWSTART, 73}; static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, @@ -212,6 +213,32 @@ static DecodeStatus decodeImm32n_95Operand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeImm8n_7Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + if (Imm > 7) + Inst.addOperand(MCOperand::createImm(Imm - 16)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeImm64n_4nOperand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm((~0x3f) | (Imm << 2))); + return MCDisassembler::Success; +} + +static DecodeStatus decodeEntry_Imm12OpValue(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<12>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm << 3)); + return MCDisassembler::Success; +} + static DecodeStatus decodeShimm1_31Operand(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index d6175eb80eadb..d1308ed56aa00 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -273,6 +273,28 @@ void XtensaInstPrinter::printImm32n_95_AsmOperand(const MCInst *MI, int OpNum, printOperand(MI, OpNum, O); } +void XtensaInstPrinter::printImm8n_7_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -8 && Value <= 7) && + "Invalid argument, value must be in ranges <-8,7>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printImm64n_4n_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -64 && Value <= -4) & ((Value & 0x3) == 0) && + "Invalid argument, value must be in ranges <-64,-4>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + void XtensaInstPrinter::printOffset8m8_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O) { if (MI->getOperand(OpNum).isImm()) { @@ -318,6 +340,18 @@ void XtensaInstPrinter::printOffset4m32_AsmOperand(const MCInst *MI, int OpNum, printOperand(MI, OpNum, O); } +void XtensaInstPrinter::printEntry_Imm12_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 32760) && + "Invalid argument, value must be multiples of eight in range " + "<0,32760>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + void XtensaInstPrinter::printB4const_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O) { if (MI->getOperand(OpNum).isImm()) { diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index ca87d79bfb67d..f8a9f592e0110 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -62,10 +62,13 @@ class XtensaInstPrinter : public MCInstPrinter { void printImm1_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printImm1n_15_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printImm32n_95_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm8n_7_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm64n_4n_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset8m8_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset8m16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset8m32_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset4m32_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printEntry_Imm12_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printB4const_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printB4constu_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); }; diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 179887fa0fb08..110e396247070 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -111,6 +111,18 @@ class XtensaMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + uint32_t getImm8n_7OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint32_t getImm64n_4nOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint32_t getEntry_Imm12OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getShimm1_31OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; @@ -400,6 +412,46 @@ XtensaMCCodeEmitter::getImm32n_95OpValue(const MCInst &MI, unsigned OpNo, return Res; } +uint32_t +XtensaMCCodeEmitter::getImm8n_7OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int32_t Res = static_cast(MO.getImm()); + + assert(((Res >= -8) && (Res <= 7)) && "Unexpected operand value!"); + + if (Res < 0) + return Res + 16; + + return Res; +} + +uint32_t +XtensaMCCodeEmitter::getImm64n_4nOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int32_t Res = static_cast(MO.getImm()); + + assert(((Res >= -64) && (Res <= -4) && ((Res & 0x3) == 0)) && + "Unexpected operand value!"); + + return Res & 0x3f; +} + +uint32_t +XtensaMCCodeEmitter::getEntry_Imm12OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint32_t res = static_cast(MO.getImm()); + + assert(((res & 0x7) == 0) && "Unexpected operand value!"); + + return res; +} + uint32_t XtensaMCCodeEmitter::getB4constOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 460a15e808b3a..aa51edd07ad5a 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -21,6 +21,11 @@ def FeatureDensity : SubtargetFeature<"density", "HasDensity", "true", "Enable Density instructions">; def HasDensity : Predicate<"Subtarget->hasDensity()">, AssemblerPredicate<(all_of FeatureDensity)>; + +def FeatureWindowed : SubtargetFeature<"windowed", "HasWindowed", "true", + "Enable Xtensa Windowed Register option">; +def HasWindowed : Predicate<"Subtarget->hasWindowed()">, + AssemblerPredicate<(all_of FeatureWindowed)>; //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index f957f297c90a7..ad74c98dc793d 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -1194,12 +1194,18 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::BR_JT"; case XtensaISD::CALL: return "XtensaISD::CALL"; + case XtensaISD::CALLW: + return "XtensaISD::CALLW"; case XtensaISD::EXTUI: return "XtensaISD::EXTUI"; + case XtensaISD::MOVSP: + return "XtensaISD::MOVSP"; case XtensaISD::PCREL_WRAPPER: return "XtensaISD::PCREL_WRAPPER"; case XtensaISD::RET: return "XtensaISD::RET"; + case XtensaISD::RETW: + return "XtensaISD::RETW"; case XtensaISD::SELECT_CC: return "XtensaISD::SELECT_CC"; case XtensaISD::SRCL: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 2243753acbd2f..7365aa34d1750 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -29,17 +29,24 @@ enum { // is the target address. The arguments start at operand 2. // There is an optional glue operand at the end. CALL, + // WinABI Call version + CALLW, // Extract unsigned immediate. Operand 0 is value, operand 1 // is bit position of the field [0..31], operand 2 is bit size // of the field [1..16] EXTUI, + MOVSP, + // Wraps a TargetGlobalAddress that should be loaded using PC-relative // accesses. Operand 0 is the address. PCREL_WRAPPER, RET, + // WinABI Return + RETW, + // Select with condition operator - This selects between a true value and // a false value (ops #2 and #3) based on the boolean result of comparing // the lhs and rhs (ops #0 and #1) of a conditional expression with the diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index adaa20dcaf483..6ad69a910679f 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -650,3 +650,138 @@ let isReturn = 1, isTerminator = 1, let t = 0; } } + +//===----------------------------------------------------------------------===// +// Windowed instructions +//===----------------------------------------------------------------------===// + +def ENTRY : BRI12_Inst<0x06, 0x3, 0x0, (outs), (ins AR:$s, entry_imm12:$imm), + "entry\t$s, $imm", []>, Requires<[HasWindowed]> { + bits<15> imm; + + let imm12{11-0} = imm{14-3}; + let Defs = [SP]; +} + +//Call instructions +let isCall = 1, Defs = [A0] in { + def CALL4 : CALL_Inst<0x05, (outs), (ins pcrel32call:$offset), + "call4\t$offset", []>, Requires<[HasWindowed]> { + let n = 1; + } + + def CALL8 : CALL_Inst<0x05, (outs), (ins pcrel32call:$offset), + "call8\t$offset", []>, Requires<[HasWindowed]> { + let n = 2; + } + + def CALL12 : CALL_Inst<0x05, (outs), (ins pcrel32call:$offset), + "call12\t$offset", []>, Requires<[HasWindowed]> { + let n = 3; + } + + def CALLX4 : CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins AR:$s), + "callx4\t$s", []>, Requires<[HasWindowed]> { + let m = 0x3; + let n = 0x1; + let r = 0; + } + + def CALLX8 : CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins AR:$s), + "callx8\t$s", []>, Requires<[HasWindowed]> { + let m = 0x3; + let n = 0x2; + let r = 0; + } + + def CALLX12 : CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins AR:$s), + "callx12\t$s", []>, Requires<[HasWindowed]> { + let m = 0x3; + let n = 0x3; + let r = 0; + } +} + +//Windowed call patterns +def : Pat<(Xtensa_callw (i32 tglobaladdr:$dst)), + (CALL8 tglobaladdr:$dst)>; +def : Pat<(Xtensa_callw (i32 texternalsym:$dst)), + (CALL8 texternalsym:$dst)>; +def : Pat<(Xtensa_callw AR:$dst), + (CALLX8 AR:$dst)>; + +def MOVSP : RRR_Inst<0x00, 0x00, 0x00, (outs AR:$t), (ins AR:$s), + "movsp\t$t, $s", + [(set AR:$t, (Xtensa_movsp AR:$s))]>, + Requires<[HasWindowed]> { + let r = 0x01; +} + +//Return instructions +let isReturn = 1, isTerminator = 1, + isBarrier = 1, Uses = [A0] in { + def RETW_N : RRRN_Inst<0x0D, (outs), (ins), + "retw.n", [(Xtensa_retw)]>, + Requires<[HasWindowed, HasDensity]> { + let r = 0x0F; + let s = 0; + let t = 1; + } + + def RETW : CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins), + "retw", [(Xtensa_retw)]>, + Requires<[HasWindowed]> { + let m = 0x2; + let n = 0x1; + let s = 0; + let r = 0; + } +} + +//Store 32-bit for Window Exceptions +def S32E : RRI4_Inst<0x00, 0x09, (outs), (ins AR:$t, AR:$s, imm64n_4n:$imm), + "s32e\t$t, $s, $imm", []>, Requires<[HasWindowed]> { + bits<6> imm; + + let r = imm{5-2}; + let imm4 = 0x4; + let mayStore = 1; +} + +def L32E : RRI4_Inst<0x00, 0x09, (outs), (ins AR:$t, AR:$s, imm64n_4n:$imm), + "l32e\t$t, $s, $imm", []>, Requires<[HasWindowed]> { + bits<6> imm; + + let r = imm{5-2}; + let imm4 = 0x0; + let mayLoad = 1; +} + +//Return from window +def RFWU : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "rfwu", []>, Requires<[HasWindowed]> { + bits<4> imm; + + let r = 0x3; + let s = 0x5; + let t = 0x0; +} + +def RFWO : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "rfwo", []>, Requires<[HasWindowed]> { + bits<4> imm; + + let r = 0x3; + let s = 0x4; + let t = 0x0; +} + +//Rotate window +def ROTW : RRR_Inst<0x00, 0x00, 0x04, (outs), (ins imm8n_7:$imm), + "rotw\t$imm", []>, Requires<[HasWindowed]> { + bits<4> imm; + + let r = 0x8; + let s = 0x0; + let t = imm{3-0}; +} diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index aa72fa0a56a6f..dd12bd2390499 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -37,6 +37,20 @@ def imm8_sh8 : Immediate= -32768 && Imm <= 32512 && ((Imm & let DecoderMethod = "decodeImm8_sh8Operand"; } +// imm8n_7 predicate - Immediate in the range [-8,7] +def Imm8n_7_AsmOperand: ImmAsmOperand<"Imm8n_7">; +def imm8n_7: Immediate= -8 && Imm <= 7; }], "Imm8n_7_AsmOperand"> { + let EncoderMethod = "getImm8n_7OpValue"; + let DecoderMethod = "decodeImm8n_7Operand"; +} + +// imm64n_4n predicate - Immediate in the range [-64,-4] +def Imm64n_4n_AsmOperand: ImmAsmOperand<"Imm64n_4n">; +def imm64n_4n: Immediate= -64 && Imm <= -4; }], "Imm64n_4n_AsmOperand"> { + let EncoderMethod = "getImm64n_4nOpValue"; + let DecoderMethod = "decodeImm64n_4nOperand"; +} + // imm12 predicate - Immediate in the range [-2048,2047] def Imm12_AsmOperand : ImmAsmOperand<"Imm12">; def imm12 : Immediate= -2048 && Imm <= 2047; }], "Imm12_AsmOperand"> { @@ -117,6 +131,13 @@ def offset4m32 : Immediate= 0 && Imm <= 60 && (Imm & 0x3 == 0); }], "Offset4m32_AsmOperand">; +// entry_imm12 predicate - Immediate in the range [0,32760], ENTRY parameter +def Entry_Imm12_AsmOperand: ImmAsmOperand<"entry_imm12">; +def entry_imm12: Immediate= 0 && Imm <= 32760 && (Imm & 0x3 == 0); }], "Entry_Imm12_AsmOperand"> { + let EncoderMethod = "getEntry_Imm12OpValue"; + let DecoderMethod = "decodeEntry_Imm12OpValue"; +} + // b4const predicate - Branch Immediate 4-bit signed operand def B4const_AsmOperand: ImmAsmOperand<"B4const">; def b4const: Immediate, SDTCisVT<5, i32>]>; +def SDT_XtensaMOVSP : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; + def SDT_XtensaSRC : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; @@ -37,9 +39,15 @@ def SDT_XtensaEXTUI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCi def Xtensa_call: SDNode<"XtensaISD::CALL", SDT_XtensaCall, [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>; +def Xtensa_callw: SDNode<"XtensaISD::CALLW", SDT_XtensaCall, + [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>; + def Xtensa_ret: SDNode<"XtensaISD::RET", SDTNone, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; +def Xtensa_retw: SDNode<"XtensaISD::RETW", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; + def Xtensa_pcrel_wrapper: SDNode<"XtensaISD::PCREL_WRAPPER", SDT_XtensaWrapPtr, []>; def Xtensa_callseq_start: SDNode<"ISD::CALLSEQ_START", SDT_XtensaCallSeqStart, @@ -59,3 +67,6 @@ def Xtensa_srcl: SDNode<"XtensaISD::SRCL", SDT_XtensaSRC>; def Xtensa_srcr: SDNode<"XtensaISD::SRCR", SDT_XtensaSRC>; def Xtensa_extui: SDNode<"XtensaISD::EXTUI", SDT_XtensaEXTUI>; + +def Xtensa_movsp: SDNode<"XtensaISD::MOVSP", SDT_XtensaMOVSP, + [SDNPInGlue]>; diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index 5c07386b060cd..9939d19ef1907 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -75,4 +75,8 @@ class SRReg num, string n, list alt = []> : XtensaReg { // Shift Amount Register def SAR : SRReg<3, "sar", ["SAR","3"]>; -def SR : RegisterClass<"Xtensa", [i32], 32, (add SAR)>; +def WINDOWBASE : SRReg<72, "windowbase", ["WINDOWBASE", "72"]>; +def WINDOWSTART : SRReg<73, "windowstart", ["WINDOWSTART", "73"]>; + +def SR : RegisterClass<"Xtensa", [i32], 32, (add SAR, + WINDOWBASE, WINDOWSTART)>; diff --git a/llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp b/llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp index f69c1e601a788..1377d7fccf1e0 100644 --- a/llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSizeReductionPass.cpp @@ -173,7 +173,7 @@ bool XtensaSizeReduce::ReduceMI(const MachineBasicBlock::instr_iterator &MII) { MachineOperand Op1 = MI->getOperand(1); MachineOperand Op2 = MI->getOperand(2); - if (Op1.getReg() != Op2.getReg()) + if (Op1.getReg() != Op2.getReg()) break; // Replace OR R1, R2, R2 to MOV.N R1, R2 @@ -203,6 +203,19 @@ bool XtensaSizeReduce::ReduceMI(const MachineBasicBlock::instr_iterator &MII) { return true; } break; + case Xtensa::RETW: { + // Replace RETW to RETW.N + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::RETW_N); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + LLVM_DEBUG(dbgs() << " to 16-bit: " << *MIB); + NumReduced++; + MBB.erase_instr(MI); + return true; + } break; + default: break; } diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index d6b1b4bc15463..2d47a36ad7b0a 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -31,6 +31,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { } HasDensity = false; + HasWindowed = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index 948dcbc5278ea..cfa612fe6de16 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -39,6 +39,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enabled Xtensa Density extension bool HasDensity; + // Enabled Xtensa Windowed Register option + bool HasWindowed; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -62,8 +65,12 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { return &TSInfo; } + bool isWinABI() const { return hasWindowed(); } + bool hasDensity() const { return HasDensity; } + bool hasWindowed() const { return HasWindowed; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; From 4c5483f2b1d58930a4baba6f96c455090fd437c6 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 00:08:06 +0300 Subject: [PATCH 020/289] [Xtensa] Implement Windowed Call ABI. --- llvm/lib/Target/Xtensa/XtensaCallingConv.td | 14 ++ .../lib/Target/Xtensa/XtensaFrameLowering.cpp | 210 +++++++++++++----- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 55 ++++- llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 10 +- llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp | 13 +- 5 files changed, 227 insertions(+), 75 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaCallingConv.td b/llvm/lib/Target/Xtensa/XtensaCallingConv.td index a348b4c890b22..01469a0e3eadf 100644 --- a/llvm/lib/Target/Xtensa/XtensaCallingConv.td +++ b/llvm/lib/Target/Xtensa/XtensaCallingConv.td @@ -22,3 +22,17 @@ def RetCC_Xtensa : CallingConv<[ //===----------------------------------------------------------------------===// def CSR_Xtensa : CalleeSavedRegs<(add A0, A12, A13, A14, A15)>; +def CSRWE_Xtensa : CalleeSavedRegs<(add)> { + let OtherPreserved = (add A0, SP, A2, A3, A4, A5, A6, A7); +} +//===----------------------------------------------------------------------===// + +def RetCCW_Xtensa : CallingConv<[ + CCIfType<[i1, i8, i16], CCPromoteToType>, + CCIfType<[f32], CCBitConvertToType>, + + //First two return values go in a10, a11, a12, a13 + CCIfType<[i32], CCAssignToReg<[A10, A11, A12, A13]>>, + CCIfType<[f32], CCAssignToReg<[A10, A11, A12, A13]>>, + CCIfType<[i64], CCAssignToRegWithShadow<[A10, A12], [A11, A13]>> +]>; diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp index 87dda2b3cc681..a479ce4fdfbda 100644 --- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp @@ -33,11 +33,18 @@ bool XtensaFrameLowering::hasFP(const MachineFunction &MF) const { MFI.hasVarSizedObjects(); } +/* minimum frame = reg save area (4 words) plus static chain (1 word) + and the total number of words must be a multiple of 128 bits. */ +/* Width of a word, in units (bytes). */ +#define UNITS_PER_WORD 4 +#define MIN_FRAME_SIZE (8 * UNITS_PER_WORD) + void XtensaFrameLowering::emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const { assert(&MBB == &MF.front() && "Shrink-wrapping not yet implemented"); MachineFrameInfo &MFI = MF.getFrameInfo(); MachineBasicBlock::iterator MBBI = MBB.begin(); + const XtensaSubtarget &STI = MF.getSubtarget(); DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); MCRegister SP = Xtensa::SP; MCRegister FP = TRI->getFrameRegister(MF); @@ -50,77 +57,129 @@ void XtensaFrameLowering::emitPrologue(MachineFunction &MF, // Round up StackSize to 16*N StackSize += (16 - StackSize) & 0xf; - // No need to allocate space on the stack. - if (StackSize == 0 && !MFI.adjustsStack()) - return; + if (STI.isWinABI()) { + StackSize += 32; + + if (StackSize <= 32760) { + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::ENTRY)) + .addReg(SP) + .addImm(StackSize); + } else { + /* Use a8 as a temporary since a0-a7 may be live. */ + unsigned TmpReg = Xtensa::A8; + + const XtensaInstrInfo &TII = *static_cast( + MBB.getParent()->getSubtarget().getInstrInfo()); + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::ENTRY)) + .addReg(SP) + .addImm(MIN_FRAME_SIZE); + TII.loadImmediate(MBB, MBBI, &TmpReg, StackSize - MIN_FRAME_SIZE); + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::SUB), TmpReg) + .addReg(SP) + .addReg(TmpReg); + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::MOVSP), SP).addReg(TmpReg); + } - // Adjust stack. - TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); - - // emit ".cfi_def_cfa_offset StackSize" - unsigned CFIIndex = - MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize)); - BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) - .addCFIIndex(CFIIndex); - - const std::vector &CSI = MFI.getCalleeSavedInfo(); - - if (!CSI.empty()) { - // Find the instruction past the last instruction that saves a - // callee-saved register to the stack. The callee-saved store - // instructions are placed at the begin of basic block, so - // iterate over instruction sequence and check that - // save instructions are placed correctly. - for (unsigned i = 0, e = CSI.size(); i < e; ++i) { + // Store FP register in A8, because FP may be used to pass function + // arguments + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::OR), Xtensa::A8) + .addReg(FP) + .addReg(FP); + + // if framepointer enabled, set it to point to the stack pointer. + if (hasFP(MF)) { + // Insert instruction "move $fp, $sp" at this location. + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::OR), FP) + .addReg(SP) + .addReg(SP) + .setMIFlag(MachineInstr::FrameSetup); + + MCCFIInstruction Inst = MCCFIInstruction::cfiDefCfa( + nullptr, MRI->getDwarfRegNum(FP, true), StackSize); + unsigned CFIIndex = MF.addFrameInst(Inst); + BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } else { + // emit ".cfi_def_cfa_offset StackSize" + unsigned CFIIndex = MF.addFrameInst( + MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize)); + BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + } else { + // No need to allocate space on the stack. + if (StackSize == 0 && !MFI.adjustsStack()) + return; + + // Adjust stack. + TII.adjustStackPtr(SP, -StackSize, MBB, MBBI); + + // emit ".cfi_def_cfa_offset StackSize" + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize)); + BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + + const std::vector &CSI = MFI.getCalleeSavedInfo(); + + if (!CSI.empty()) { + // Find the instruction past the last instruction that saves a + // callee-saved register to the stack. The callee-saved store + // instructions are placed at the begin of basic block, so + // iterate over instruction sequence and check that + // save instructions are placed correctly. + for (unsigned i = 0, e = CSI.size(); i < e; ++i) { #ifndef NDEBUG - const CalleeSavedInfo &Info = CSI[i]; - int FI = Info.getFrameIdx(); - int StoreFI = 0; + const CalleeSavedInfo &Info = CSI[i]; + int FI = Info.getFrameIdx(); + int StoreFI = 0; + + // Checking that the instruction is exactly as expected + bool IsStoreInst = false; + if (MBBI->getOpcode() == TargetOpcode::COPY && Info.isSpilledToReg()) { + Register DstReg = MBBI->getOperand(0).getReg(); + Register Reg = MBBI->getOperand(1).getReg(); + IsStoreInst = (Info.getDstReg() == DstReg) && (Info.getReg() == Reg); + } else { + Register Reg = TII.isStoreToStackSlot(*MBBI, StoreFI); + IsStoreInst = (Reg == Info.getReg()) && (StoreFI == FI); + } + assert(IsStoreInst && + "Unexpected callee-saved register store instruction"); +#endif + ++MBBI; + } - // Checking that the instruction is exactly as expected - bool IsStoreInst = false; - if (MBBI->getOpcode() == TargetOpcode::COPY && Info.isSpilledToReg()) { - Register DstReg = MBBI->getOperand(0).getReg(); - Register Reg = MBBI->getOperand(1).getReg(); - IsStoreInst = (Info.getDstReg() == DstReg) && (Info.getReg() == Reg); - } else { - Register Reg = TII.isStoreToStackSlot(*MBBI, StoreFI); - IsStoreInst = (Reg == Info.getReg()) && (StoreFI == FI); + // Iterate over list of callee-saved registers and emit .cfi_offset + // directives. + for (const auto &I : CSI) { + int64_t Offset = MFI.getObjectOffset(I.getFrameIdx()); + Register Reg = I.getReg(); + + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( + nullptr, MRI->getDwarfRegNum(Reg, 1), Offset)); + BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); } - assert(IsStoreInst && - "Unexpected callee-saved register store instruction"); -#endif - ++MBBI; } - // Iterate over list of callee-saved registers and emit .cfi_offset - // directives. - for (const auto &I : CSI) { - int64_t Offset = MFI.getObjectOffset(I.getFrameIdx()); - Register Reg = I.getReg(); - - unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( - nullptr, MRI->getDwarfRegNum(Reg, 1), Offset)); + // if framepointer enabled, set it to point to the stack pointer. + if (hasFP(MF)) { + // Insert instruction "move $fp, $sp" at this location. + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::OR), FP) + .addReg(SP) + .addReg(SP) + .setMIFlag(MachineInstr::FrameSetup); + + // emit ".cfi_def_cfa_register $fp" + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::createDefCfaRegister( + nullptr, MRI->getDwarfRegNum(FP, true))); BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex); } } - // if framepointer enabled, set it to point to the stack pointer. - if (hasFP(MF)) { - // Insert instruction "move $fp, $sp" at this location. - BuildMI(MBB, MBBI, DL, TII.get(Xtensa::OR), FP) - .addReg(SP) - .addReg(SP) - .setMIFlag(MachineInstr::FrameSetup); - - // emit ".cfi_def_cfa_register $fp" - unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfaRegister( - nullptr, MRI->getDwarfRegNum(FP, true))); - BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) - .addCFIIndex(CFIIndex); - } - if (StackSize != PrevStackSize) { MFI.setStackSize(StackSize); @@ -139,6 +198,8 @@ void XtensaFrameLowering::emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const { MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); MachineFrameInfo &MFI = MF.getFrameInfo(); + + const XtensaSubtarget &STI = MF.getSubtarget(); DebugLoc DL = MBBI->getDebugLoc(); MCRegister SP = Xtensa::SP; MCRegister FP = TRI->getFrameRegister(MF); @@ -177,9 +238,22 @@ void XtensaFrameLowering::emitEpilogue(MachineFunction &MF, #endif } - BuildMI(MBB, I, DL, TII.get(Xtensa::OR), SP).addReg(FP).addReg(FP); + if (STI.isWinABI()) { + // In most architectures, we need to explicitly restore the stack pointer + // before returning. + // + // For Xtensa Windowed Register option, it is not needed to explicitly + // restore the stack pointer. Reason being is that on function return, + // the window of the caller (including the old stack pointer) gets + // restored anyways. + } else { + BuildMI(MBB, I, DL, TII.get(Xtensa::OR), SP).addReg(FP).addReg(FP); + } } + if (STI.isWinABI()) + return; + // Get the number of bytes from FrameInfo uint64_t StackSize = MFI.getStackSize(); @@ -194,6 +268,11 @@ bool XtensaFrameLowering::spillCalleeSavedRegisters( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef CSI, const TargetRegisterInfo *TRI) const { MachineFunction *MF = MBB.getParent(); + const XtensaSubtarget &STI = MF->getSubtarget(); + + if (STI.isWinABI()) + return true; + MachineBasicBlock &EntryBlock = *(MF->begin()); for (unsigned i = 0, e = CSI.size(); i != e; ++i) { @@ -221,6 +300,10 @@ bool XtensaFrameLowering::spillCalleeSavedRegisters( bool XtensaFrameLowering::restoreCalleeSavedRegisters( MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef CSI, const TargetRegisterInfo *TRI) const { + MachineFunction *MF = MBB.getParent(); + const XtensaSubtarget &STI = MF->getSubtarget(); + if (STI.isWinABI()) + return true; return TargetFrameLowering::restoreCalleeSavedRegisters(MBB, MI, CSI, TRI); } @@ -246,8 +329,13 @@ MachineBasicBlock::iterator XtensaFrameLowering::eliminateCallFramePseudoInstr( void XtensaFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const { + const XtensaSubtarget &STI = MF.getSubtarget(); unsigned FP = TRI->getFrameRegister(MF); + if (STI.isWinABI()) { + return; + } + TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); // Mark $fp as used if function has dedicated frame pointer. diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index ad74c98dc793d..d62c17b4b2ccb 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -43,6 +43,15 @@ static bool isLongCall(const char *str) { return true; } +// The calling conventions in XtensaCallingConv.td are described in terms of the +// callee's register window. This function translates registers to the +// corresponding caller window %o register. +static unsigned toCallerWindow(unsigned Reg) { + if (Reg >= Xtensa::A2 && Reg <= Xtensa::A7) + return Reg - Xtensa::A2 + Xtensa::A10; + return Reg; +} + XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, const XtensaSubtarget &STI) : TargetLowering(TM), Subtarget(STI) { @@ -355,7 +364,17 @@ SDValue XtensaTargetLowering::LowerFormalArguments( // Transform the arguments stored on // physical registers into virtual ones - unsigned Register = MF.addLiveIn(VA.getLocReg(), RC); + unsigned Register = 0; + unsigned FrameReg = Subtarget.getRegisterInfo()->getFrameRegister(MF); + + // Argument passed in FrameReg in WinABI we save in A8 (in emitPrologue), + // so load argument from A8 + if (Subtarget.isWinABI() && (VA.getLocReg() == FrameReg)) { + Register = MF.addLiveIn(Xtensa::A8, RC); + } else { + Register = MF.addLiveIn(VA.getLocReg(), RC); + } + SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Register, RegVT); // If this is an 8 or 16-bit value, it has been passed promoted @@ -563,6 +582,8 @@ XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI, SDValue Glue; for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { unsigned Reg = RegsToPass[I].first; + if (Subtarget.isWinABI()) + Reg = toCallerWindow(Reg); Chain = DAG.getCopyToReg(Chain, DL, Reg, RegsToPass[I].second, Glue); Glue = Chain.getValue(1); } @@ -612,6 +633,8 @@ XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI, // known live into the call. for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { unsigned Reg = RegsToPass[I].first; + if (Subtarget.isWinABI()) + Reg = toCallerWindow(Reg); Ops.push_back(DAG.getRegister(Reg, RegsToPass[I].second.getValueType())); } @@ -620,7 +643,8 @@ XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI, Ops.push_back(Glue); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); - Chain = DAG.getNode(XtensaISD::CALL, DL, NodeTys, Ops); + Chain = DAG.getNode(Subtarget.isWinABI() ? XtensaISD::CALLW : XtensaISD::CALL, + DL, NodeTys, Ops); Glue = Chain.getValue(1); // Mark the end of the call, which is glued to the call itself. @@ -631,7 +655,8 @@ XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI, // Assign locations to each value returned by this call. SmallVector RetLocs; CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); - RetCCInfo.AnalyzeCallResult(Ins, RetCC_Xtensa); + RetCCInfo.AnalyzeCallResult(Ins, Subtarget.isWinABI() ? RetCCW_Xtensa + : RetCC_Xtensa); // Copy all of the result registers out of their specified physreg. for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { @@ -672,7 +697,9 @@ XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, SDValue Glue; // Quick exit for void returns if (RetLocs.empty()) - return DAG.getNode(XtensaISD::RET, DL, MVT::Other, Chain); + return DAG.getNode(Subtarget.isWinABI() ? XtensaISD::RETW + : XtensaISD::RET, + DL, MVT::Other, Chain); // Copy the result values into the output registers. SmallVector RetOps; @@ -696,7 +723,9 @@ XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, if (Glue.getNode()) RetOps.push_back(Glue); - return DAG.getNode(XtensaISD::RET, DL, MVT::Other, RetOps); + return DAG.getNode(Subtarget.isWinABI() ? XtensaISD::RETW + : XtensaISD::RET, + DL, MVT::Other, RetOps); } static unsigned getBranchOpcode(ISD::CondCode Cond) { @@ -887,8 +916,13 @@ SDValue XtensaTargetLowering::LowerSTACKSAVE(SDValue Op, SDValue XtensaTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const { - return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), Xtensa::SP, - Op.getOperand(1)); + if (Subtarget.isWinABI()) { + SDValue NewSP = + DAG.getNode(XtensaISD::MOVSP, SDLoc(Op), MVT::i32, Op.getOperand(1)); + return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), Xtensa::SP, NewSP); + } else { + return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), Xtensa::SP, Op.getOperand(1)); + } } SDValue XtensaTargetLowering::LowerFRAMEADDR(SDValue Op, @@ -924,7 +958,12 @@ SDValue XtensaTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, unsigned SPReg = Xtensa::SP; SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT); SDValue NewSP = DAG.getNode(ISD::SUB, DL, VT, SP, SizeRoundUp); // Value - Chain = DAG.getCopyToReg(SP.getValue(1), DL, SPReg, NewSP); // Output chain + if (Subtarget.isWinABI()) { + SDValue NewSP1 = DAG.getNode(XtensaISD::MOVSP, DL, MVT::i32, NewSP); + Chain = DAG.getCopyToReg(SP.getValue(1), DL, SPReg, NewSP1); // Output chain + } else { + Chain = DAG.getCopyToReg(SP.getValue(1), DL, SPReg, NewSP); // Output chain + } SDValue NewVal = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i32); Chain = NewVal.getValue(1); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index 74c9203412c3c..ae73804554432 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -99,9 +99,13 @@ void XtensaInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount, .addReg(Reg1, RegState::Kill); } - BuildMI(MBB, I, DL, get(Xtensa::OR), SP) - .addReg(Reg, RegState::Kill) - .addReg(Reg, RegState::Kill); + if (STI.isWinABI()) { + BuildMI(MBB, I, DL, get(Xtensa::MOVSP), SP).addReg(Reg, RegState::Kill); + } else { + BuildMI(MBB, I, DL, get(Xtensa::OR), SP) + .addReg(Reg, RegState::Kill) + .addReg(Reg, RegState::Kill); + } } void XtensaInstrInfo::copyPhysReg(MachineBasicBlock &MBB, diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp index fb7d45ab5c420..905e9c9788738 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp @@ -35,13 +35,19 @@ XtensaRegisterInfo::XtensaRegisterInfo(const XtensaSubtarget &STI) const uint16_t * XtensaRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { - return CSR_Xtensa_SaveList; + if (Subtarget.isWinABI()) + return CSRWE_Xtensa_SaveList; + else + return CSR_Xtensa_SaveList; } const uint32_t * XtensaRegisterInfo::getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const { - return CSR_Xtensa_RegMask; + if (Subtarget.isWinABI()) + return CSRWE_Xtensa_RegMask; + else + return CSR_Xtensa_RegMask; } BitVector XtensaRegisterInfo::getReservedRegs(const MachineFunction &MF) const { @@ -130,5 +136,6 @@ bool XtensaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, Register XtensaRegisterInfo::getFrameRegister(const MachineFunction &MF) const { const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); - return TFI->hasFP(MF) ? Xtensa::A15 : Xtensa::SP; + return TFI->hasFP(MF) ? (Subtarget.isWinABI() ? Xtensa::A7 : Xtensa::A15) + : Xtensa::SP; } From 3b8e821ed84b4078ad618798bce53f28c5af82d0 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 00:19:43 +0300 Subject: [PATCH 021/289] [Xtensa] Fix reserving of the emergency spill slot. --- .../lib/Target/Xtensa/XtensaFrameLowering.cpp | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp index a479ce4fdfbda..ed19719989c0d 100644 --- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp @@ -345,17 +345,16 @@ void XtensaFrameLowering::determineCalleeSaves(MachineFunction &MF, void XtensaFrameLowering::processFunctionBeforeFrameFinalized( MachineFunction &MF, RegScavenger *RS) const { - // Set scavenging frame index if necessary. - MachineFrameInfo &MFI = MF.getFrameInfo(); - uint64_t MaxSPOffset = MFI.estimateStackSize(MF); - - if (isInt<12>(MaxSPOffset)) - return; - - const TargetRegisterClass &RC = Xtensa::ARRegClass; - unsigned Size = TRI->getSpillSize(RC); - Align Alignment = TRI->getSpillAlign(RC); - int FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false); + const XtensaSubtarget &STI = MF.getSubtarget(); - RS->addScavengingFrameIndex(FI); + // In WinABI mode add register scavenging slot + // FIXME: It may be posssible to add spill slot by more optimal way + if (STI.isWinABI() && (MF.getFrameInfo().estimateStackSize(MF) > 256)) { + MachineFrameInfo &MFI = MF.getFrameInfo(); + const TargetRegisterClass &RC = Xtensa::ARRegClass; + const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); + unsigned Size = TRI.getSpillSize(RC); + Align Alignment = TRI.getSpillAlign(RC); + RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Alignment, false)); + } } From 857dba22041a98e9098f08218f0bb2e73e405b7d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 00:23:43 +0300 Subject: [PATCH 022/289] [Xtensa] Implement Boolean feature operations --- llvm/lib/Target/Xtensa/Xtensa.td | 6 ++ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 4 ++ llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 28 ++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 65 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 19 +++++- llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 5 ++ 7 files changed, 127 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index aa51edd07ad5a..ad2c121ef68d0 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -26,6 +26,12 @@ def FeatureWindowed : SubtargetFeature<"windowed", "HasWindowed", "true" "Enable Xtensa Windowed Register option">; def HasWindowed : Predicate<"Subtarget->hasWindowed()">, AssemblerPredicate<(all_of FeatureWindowed)>; + +def FeatureBoolean : SubtargetFeature<"bool", "HasBoolean", "true", + "Enable Xtensa Boolean extension">; +def HasBoolean : Predicate<"Subtarget->hasBoolean()">, + AssemblerPredicate<(all_of FeatureBoolean)>; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index d62c17b4b2ccb..f4fec99781983 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -153,6 +153,10 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, // Compute derived properties from the register classes computeRegisterProperties(STI.getRegisterInfo()); + + if (Subtarget.hasBoolean()) { + addRegisterClass(MVT::i1, &Xtensa::BRRegClass); + } } bool XtensaTargetLowering::isOffsetFoldingLegal( diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index ae73804554432..d5ff602bb0be0 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -259,6 +259,12 @@ bool XtensaInstrInfo::reverseBranchCondition( Cond[0].setImm(Xtensa::BLTZ); return false; + case Xtensa::BF: + Cond[0].setImm(Xtensa::BT); + return false; + case Xtensa::BT: + Cond[0].setImm(Xtensa::BF); + return false; default: llvm_unreachable("Invalid branch condition!"); } @@ -295,6 +301,10 @@ XtensaInstrInfo::getBranchDestBlock(const MachineInstr &MI) const { case Xtensa::BGEZ: return MI.getOperand(1).getMBB(); + case Xtensa::BT: + case Xtensa::BF: + return MI.getOperand(1).getMBB(); + default: llvm_unreachable("Unknown branch opcode"); } @@ -330,6 +340,10 @@ bool XtensaInstrInfo::isBranchOffsetInRange(unsigned BranchOp, case Xtensa::BGEZ: BrOffset -= 4; return isIntN(12, BrOffset); + case Xtensa::BT: + case Xtensa::BF: + BrOffset -= 4; + return isIntN(8, BrOffset); default: llvm_unreachable("Unknown branch opcode"); } @@ -554,6 +568,10 @@ unsigned XtensaInstrInfo::InsertConstBranchAtInst( case Xtensa::BGEZ: MI = BuildMI(MBB, I, DL, get(BR_C)).addImm(offset).addReg(Cond[1].getReg()); break; + case Xtensa::BT: + case Xtensa::BF: + MI = BuildMI(MBB, I, DL, get(BR_C)).addImm(offset).addReg(Cond[1].getReg()); + break; default: llvm_unreachable("Invalid branch type!"); } @@ -614,6 +632,10 @@ unsigned XtensaInstrInfo::InsertBranchAtInst(MachineBasicBlock &MBB, case Xtensa::BGEZ: MI = BuildMI(MBB, I, DL, get(BR_C)).addReg(Cond[1].getReg()).addMBB(TBB); break; + case Xtensa::BT: + case Xtensa::BF: + MI = BuildMI(MBB, I, DL, get(BR_C)).addReg(Cond[1].getReg()).addMBB(TBB); + break; default: llvm_unreachable("Invalid branch type!"); } @@ -662,6 +684,12 @@ bool XtensaInstrInfo::isBranch(const MachineBasicBlock::iterator &MI, Target = &MI->getOperand(1); return true; + case Xtensa::BT: + case Xtensa::BF: + Cond[0].setImm(OpCode); + Target = &MI->getOperand(1); + return true; + default: assert(!MI->getDesc().isBranch() && "Unknown branch opcode"); return false; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 6ad69a910679f..fb2f3985f4b0c 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -785,3 +785,68 @@ def ROTW : RRR_Inst<0x00, 0x00, 0x04, (outs), (ins imm8n_7:$imm), let s = 0x0; let t = imm{3-0}; } + +//===----------------------------------------------------------------------===// +// Boolean Instructions +//===----------------------------------------------------------------------===// + +def ALL4 : RRR_Inst<0x00, 0x00, 0x00, (outs BR:$t), (ins BR:$s), + "all4\t$t, $s", []>, Requires<[HasBoolean]> { + let r = 0x9; +} + +def ALL8 : RRR_Inst<0x00, 0x00, 0x00, (outs BR:$t), (ins BR:$s), + "all8\t$t, $s", []>, Requires<[HasBoolean]> { + let r = 0xB; +} + +def ANDB : RRR_Inst<0x00, 0x02, 0x00, (outs BR:$r), (ins BR:$s, BR:$t), + "andb\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def ANDBC : RRR_Inst<0x00, 0x02, 0x01, (outs BR:$r), (ins BR:$s, BR:$t), + "andbc\t$r, $s, $t", []>, Requires<[HasBoolean]>; + +def ANY4 : RRR_Inst<0x00, 0x00, 0x00, (outs BR:$t), (ins BR:$s), + "any4\t$t, $s", []>, Requires<[HasBoolean]> { + let r = 0x8; +} + +def ANY8 : RRR_Inst<0x00, 0x00, 0x00, (outs BR:$t), (ins BR:$s), + "any8\t$t, $s", []>, Requires<[HasBoolean]> { + let r = 0xA; +} + +let isBranch = 1, isTerminator = 1, Predicates = [HasBoolean] in { + def BT : RRI8_Inst<0x06, (outs), (ins BR:$b, brtarget:$target), + "bt\t$b, $target", []> { + bits<8> target; + bits<4> b; + + let r = 0x1; + let s = b; + let t = 0x7; + let imm8 = target; + } + + def BF : RRI8_Inst<0x06, (outs), (ins BR:$b, brtarget:$target), + "bf\t$b, $target", []> { + bits<8> target; + bits<4> b; + + let r = 0x0; + let s = b; + let t = 0x7; + let imm8 = target; + } +} + +def MOVF : RRR_Inst<0x00, 0x03, 0x0C, (outs AR:$r), (ins AR:$s, BR:$t), + "movf\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def MOVT : RRR_Inst<0x00, 0x03, 0x0D, (outs AR:$r), (ins AR:$s, BR:$t), + "movt\t$r, $s, $t", []>, Requires<[HasBoolean]>; + +def ORB : RRR_Inst<0x00, 0x02, 0x02, (outs BR:$r), (ins BR:$s, BR:$t), + "orb\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def ORBC : RRR_Inst<0x00, 0x02, 0x03, (outs BR:$r), (ins BR:$s, BR:$t), + "orbc\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def XORB : RRR_Inst<0x00, 0x02, 0x04, (outs BR:$r), (ins BR:$s, BR:$t), + "xorb\t$r, $s, $t", []>, Requires<[HasBoolean]>; diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index 9939d19ef1907..5b87a83786ac4 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -75,8 +75,25 @@ class SRReg num, string n, list alt = []> : XtensaReg { // Shift Amount Register def SAR : SRReg<3, "sar", ["SAR","3"]>; +def BREG : SRReg<4, "br", ["BR", "4"]>; + def WINDOWBASE : SRReg<72, "windowbase", ["WINDOWBASE", "72"]>; def WINDOWSTART : SRReg<73, "windowstart", ["WINDOWSTART", "73"]>; def SR : RegisterClass<"Xtensa", [i32], 32, (add SAR, - WINDOWBASE, WINDOWSTART)>; + BREG, WINDOWBASE, WINDOWSTART)>; + +//===----------------------------------------------------------------------===// +// Boolean registers +//===----------------------------------------------------------------------===// +class BReg num, string n> : XtensaReg { + let HWEncoding{3-0} = num; +} + +foreach i = 0-15 in { + def B#i : BReg, DwarfRegNum<[i]>; +} + +// Boolean register class +def BR : RegisterClass<"Xtensa", [i1], 0, (add B0, B1, +B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13, B14, B15)>; diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 2d47a36ad7b0a..3394858a6f8a5 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -32,6 +32,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasDensity = false; HasWindowed = false; + HasBoolean = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index cfa612fe6de16..fab9aa2e103e6 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -42,6 +42,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enabled Xtensa Windowed Register option bool HasWindowed; + // Enabled Xtensa Boolean extension + bool HasBoolean; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -71,6 +74,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasWindowed() const { return HasWindowed; } + bool hasBoolean() const { return HasBoolean; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; From 69c468368b681da2a12ff4bb0395b79560924e24 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 09:45:34 +0300 Subject: [PATCH 023/289] [Xtensa] Implement Floating-Point feature operations. Also implement User Registers class. --- .../Disassembler/XtensaDisassembler.cpp | 54 +++++ .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 2 + llvm/lib/Target/Xtensa/Xtensa.td | 5 + llvm/lib/Target/Xtensa/XtensaCallingConv.td | 1 + llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 50 ++++- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 20 ++ llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 13 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 198 ++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaOperators.td | 16 ++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 44 ++++ llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 5 + 12 files changed, 403 insertions(+), 6 deletions(-) diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index bf36925c92c6b..26799128dd385 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -75,6 +75,38 @@ static DecodeStatus DecodeARRegisterClass(MCInst &Inst, uint64_t RegNo, return MCDisassembler::Success; } +static const unsigned FPRDecoderTable[] = { + Xtensa::F0, Xtensa::F1, Xtensa::F2, Xtensa::F3, Xtensa::F4, Xtensa::F5, + Xtensa::F6, Xtensa::F7, Xtensa::F8, Xtensa::F9, Xtensa::F10, Xtensa::F11, + Xtensa::F12, Xtensa::F13, Xtensa::F14, Xtensa::F15}; + +static DecodeStatus DecodeFPRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= std::size(FPRDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = FPRDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static const unsigned BRDecoderTable[] = { + Xtensa::B0, Xtensa::B1, Xtensa::B2, Xtensa::B3, Xtensa::B4, Xtensa::B5, + Xtensa::B6, Xtensa::B7, Xtensa::B8, Xtensa::B9, Xtensa::B10, Xtensa::B11, + Xtensa::B12, Xtensa::B13, Xtensa::B14, Xtensa::B15}; + +static DecodeStatus DecodeBRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= std::size(BRDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = BRDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + static const unsigned SRDecoderTable[] = { Xtensa::SAR, 3, Xtensa::WINDOWBASE, 72, Xtensa::WINDOWSTART, 73}; @@ -95,6 +127,28 @@ static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, return MCDisassembler::Fail; } +static const unsigned URDecoderTable[] = {Xtensa::FCR, 232, Xtensa::FSR, 233}; + +static DecodeStatus DecodeURRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + const llvm::MCSubtargetInfo STI = + ((const MCDisassembler *)Decoder)->getSubtargetInfo(); + + if (RegNo > 255) + return MCDisassembler::Fail; + + for (unsigned i = 0; i < std::size(URDecoderTable); i += 2) { + if (URDecoderTable[i + 1] == RegNo) { + unsigned Reg = URDecoderTable[i]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; + } + } + + return MCDisassembler::Fail; +} + static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch, uint64_t Address, uint64_t Offset, uint64_t InstSize, MCInst &MI, diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 110e396247070..1d96955708f93 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -277,6 +277,8 @@ XtensaMCCodeEmitter::getMemRegEncoding(const MCInst &MI, unsigned OpNo, case Xtensa::L32I: case Xtensa::S32I_N: case Xtensa::L32I_N: + case Xtensa::S32F: + case Xtensa::L32F: if (Res & 0x3) { report_fatal_error("Unexpected operand value!"); } diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index ad2c121ef68d0..7c240b52c5b84 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -22,6 +22,11 @@ def FeatureDensity : SubtargetFeature<"density", "HasDensity", "true", def HasDensity : Predicate<"Subtarget->hasDensity()">, AssemblerPredicate<(all_of FeatureDensity)>; +def FeatureSingleFloat : SubtargetFeature<"fp", "HasSingleFloat", "true", + "Enable Xtensa Single FP instructions">; +def HasSingleFloat : Predicate<"Subtarget->hasSingleFloat()">, + AssemblerPredicate<(all_of FeatureSingleFloat)>; + def FeatureWindowed : SubtargetFeature<"windowed", "HasWindowed", "true", "Enable Xtensa Windowed Register option">; def HasWindowed : Predicate<"Subtarget->hasWindowed()">, diff --git a/llvm/lib/Target/Xtensa/XtensaCallingConv.td b/llvm/lib/Target/Xtensa/XtensaCallingConv.td index 01469a0e3eadf..a472bc02642bc 100644 --- a/llvm/lib/Target/Xtensa/XtensaCallingConv.td +++ b/llvm/lib/Target/Xtensa/XtensaCallingConv.td @@ -14,6 +14,7 @@ def RetCC_Xtensa : CallingConv<[ // First two return values go in a2, a3, a4, a5 CCIfType<[i32], CCAssignToReg<[A2, A3, A4, A5]>>, + CCIfType<[f32], CCAssignToReg<[A2, A3, A4, A5]>>, CCIfType<[i64], CCAssignToRegWithShadow<[A2, A4], [A3, A5]>> ]>; diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index f4fec99781983..ee5f3db02e631 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -59,6 +59,10 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, // Set up the register classes. addRegisterClass(MVT::i32, &Xtensa::ARRegClass); + if (Subtarget.hasSingleFloat()) { + addRegisterClass(MVT::f32, &Xtensa::FPRRegClass); + } + // Set up special registers. setStackPointerRegisterToSaveRestore(Xtensa::SP); @@ -68,6 +72,8 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::Constant, MVT::i32, Custom); setOperationAction(ISD::Constant, MVT::i64, Expand); + setOperationAction(ISD::ConstantFP, MVT::f32, Custom); + setOperationAction(ISD::ConstantFP, MVT::f64, Expand); setBooleanContents(ZeroOrOneBooleanContent); @@ -165,6 +171,11 @@ bool XtensaTargetLowering::isOffsetFoldingLegal( return false; } +bool XtensaTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, + bool ForCodeSize) const { + return false; +} + unsigned XtensaTargetLowering::getVaListSizeInBits(const DataLayout &DL) const { // 2 * sizeof(int*) + sizeof(int) return 3 * 4; @@ -293,7 +304,7 @@ static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT, bool needs64BitAlign = (ValVT == MVT::i32 && OrigAlign == Align(8)); bool needs128BitAlign = (ValVT == MVT::i32 && OrigAlign == Align(16)); - if (ValVT == MVT::i32) { + if (ValVT == MVT::i32 || ValVT == MVT::f32) { Register = State.AllocateReg(IntRegs); // If this is the first part of an i64 arg, // the allocated register must be either A2, A4 or A6. @@ -819,6 +830,21 @@ SDValue XtensaTargetLowering::LowerImmediate(SDValue Op, return Op; } +SDValue XtensaTargetLowering::LowerImmediateFP(SDValue Op, + SelectionDAG &DAG) const { + const ConstantFPSDNode *CN = cast(Op); + SDLoc DL(CN); + APFloat apval = CN->getValueAPF(); + int64_t value = llvm::bit_cast(CN->getValueAPF().convertToFloat()); + if (Op.getValueType() == MVT::f32) { + Type *Ty = Type::getInt32Ty(*DAG.getContext()); + Constant *CV = ConstantInt::get(Ty, value); + SDValue CP = DAG.getConstantPool(CV, MVT::i32); + return DAG.getNode(ISD::BITCAST, DL, MVT::f32, CP); + } + return Op; +} + SDValue XtensaTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { const GlobalAddressSDNode *G = cast(Op); @@ -1194,6 +1220,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerBR_JT(Op, DAG); case ISD::Constant: return LowerImmediate(Op, DAG); + case ISD::ConstantFP: + return LowerImmediateFP(Op, DAG); case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); case ISD::GlobalAddress: @@ -1255,6 +1283,26 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::SRCL"; case XtensaISD::SRCR: return "XtensaISD::SRCR"; + case XtensaISD::CMPUO: + return "XtensaISD::CMPUO"; + case XtensaISD::CMPUEQ: + return "XtensaISD::CMPUEQ"; + case XtensaISD::CMPULE: + return "XtensaISD::CMPULE"; + case XtensaISD::CMPULT: + return "XtensaISD::CMPULT"; + case XtensaISD::CMPOEQ: + return "XtensaISD::CMPOEQ"; + case XtensaISD::CMPOLE: + return "XtensaISD::CMPOLE"; + case XtensaISD::CMPOLT: + return "XtensaISD::CMPOLT"; + case XtensaISD::MADD: + return "XtensaISD::MADD"; + case XtensaISD::MSUB: + return "XtensaISD::MSUB"; + case XtensaISD::MOVS: + return "XtensaISD::MOVS"; } return nullptr; } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 7365aa34d1750..c7eabe6bf79fd 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -37,6 +37,21 @@ enum { // of the field [1..16] EXTUI, + // Floating point unordered compare conditions + CMPUEQ, + CMPULE, + CMPULT, + CMPUO, + // Floating point compare conditions + CMPOEQ, + CMPOLE, + CMPOLT, + // FP multipy-add/sub + MADD, + MSUB, + // FP move + MOVS, + MOVSP, // Wraps a TargetGlobalAddress that should be loaded using PC-relative @@ -83,6 +98,9 @@ class XtensaTargetLowering : public TargetLowering { const char *getTargetNodeName(unsigned Opcode) const override; + bool isFPImmLegal(const APFloat &Imm, EVT VT, + bool ForCodeSize) const override; + /// Returns the size of the platform's va_list object. unsigned getVaListSizeInBits(const DataLayout &DL) const override; @@ -138,6 +156,8 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerImmediate(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerImmediateFP(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index d5ff602bb0be0..5d569d884a6da 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -150,11 +150,14 @@ void XtensaInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, unsigned &LoadOpcode, unsigned &StoreOpcode, int64_t offset) const { - assert((RC == &Xtensa::ARRegClass) && - "Unsupported regclass to load or store"); - - LoadOpcode = Xtensa::L32I; - StoreOpcode = Xtensa::S32I; + if (RC == &Xtensa::ARRegClass) { + LoadOpcode = Xtensa::L32I; + StoreOpcode = Xtensa::S32I; + } else if (RC == &Xtensa::FPRRegClass) { + LoadOpcode = Xtensa::L32F; + StoreOpcode = Xtensa::S32F; + } else + llvm_unreachable("Unsupported regclass to load or store"); } void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index fb2f3985f4b0c..22356a867e863 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -850,3 +850,201 @@ def ORBC : RRR_Inst<0x00, 0x02, 0x03, (outs BR:$r), (ins BR:$s, BR:$t), "orbc\t$r, $s, $t", []>, Requires<[HasBoolean]>; def XORB : RRR_Inst<0x00, 0x02, 0x04, (outs BR:$r), (ins BR:$s, BR:$t), "xorb\t$r, $s, $t", []>, Requires<[HasBoolean]>; + +//===----------------------------------------------------------------------===// +// Floating-Point Instructions +//===----------------------------------------------------------------------===// + +class FPArith_RRR oper2, bits<4> oper1, string instrAsm, + SDPatternOperator opNode, bit isComm = 0> + : RRR_Inst<0x00, oper1, oper2, (outs FPR:$r), (ins FPR:$s, FPR:$t), + instrAsm#"\t$r, $s, $t", + [(set FPR:$r, (opNode FPR:$s, FPR:$t))]> { + let isCommutable = isComm; + let isReMaterializable = 0; + let Predicates = [HasSingleFloat]; +} + +def ADD_S : FPArith_RRR<0x00, 0x0A, "add.s", fadd, 1>; +def SUB_S : FPArith_RRR<0x01, 0x0A, "sub.s", fsub>; +def MUL_S : FPArith_RRR<0x02, 0x0A, "mul.s", fmul, 1>; + +def ABS_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "abs.s\t$r, $s", + [(set FPR:$r, (fabs FPR:$s))]> { + let t = 0x01; +} + +def NEG_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "neg.s\t$r, $s", + [(set FPR:$r, (fneg FPR:$s))]> { + let t = 0x06; +} + +def TRUNC_S : RRR_Inst<0x00, 0x0A, 0x09, (outs AR:$r), (ins FPR:$s), + "trunc.s\t$r, $s, 0", + [(set AR:$r, (fp_to_sint FPR:$s))]> { + let t = 0x00; +} + +def UTRUNC_S : RRR_Inst<0x00, 0x0A, 0x0e, (outs AR:$r), (ins FPR:$s), + "utrunc.s\t$r, $s, 0", + [(set AR:$r, (fp_to_uint FPR:$s))]> { + let t = 0x00; +} + +def FLOAT_S : RRR_Inst<0x00, 0x0A, 0x0c, (outs FPR:$r), (ins AR:$s), + "float.s\t$r, $s, 0", + [(set FPR:$r, (sint_to_fp AR:$s))]> { + let t = 0x00; +} + +def UFLOAT_S : RRR_Inst<0x00, 0x0A, 0x0D, (outs FPR:$r), (ins AR:$s), + "ufloat.s\t$r, $s, 0", + [(set FPR:$r, (uint_to_fp AR:$s))]> { + let t = 0x00; +} + +def RFR : RRR_Inst<0x00, 0x0A, 0x0f, (outs AR:$r), (ins FPR:$s), + "rfr\t$r, $s", + [(set AR:$r, (bitconvert FPR:$s))]> { + let t = 0x04; +} + +def WFR : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins AR:$s), + "wfr\t$r, $s", + [(set FPR:$r, (bitconvert AR:$s))]> { + let t = 0x05; +} + +// FP load instructions +let mayLoad = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { + class LoadF_RRI8 oper, string instrAsm, SDPatternOperator opNode, + ComplexPattern addrOp,Operand memOp>: RRI8_Inst<0x03, (outs FPR:$t), (ins memOp:$addr), + instrAsm#"\t$t, $addr", + [(set FPR:$t, (opNode addrOp:$addr))]> { + bits<12> addr; + + let r = oper; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } +} + +def L32F : LoadF_RRI8<0x00, "lsi", load, addr_ish4, mem32>, Requires<[]>; + +// FP store instructions +let mayStore = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { + class StoreF_RRI8 oper, string instrAsm, SDPatternOperator opNode, + ComplexPattern addrOp, Operand memOp>: RRI8_Inst<0x03, (outs), (ins FPR:$t, memOp:$addr), + instrAsm#"\t$t, $addr", + [(opNode FPR:$t, addrOp:$addr)]> { + bits<12> addr; + + let r = oper; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } +} + +def S32F : StoreF_RRI8<0x04, "ssi", store, addr_ish4, mem32>; + +// FP compare instructions +let isCompare = 1, Predicates = [HasSingleFloat] in { + class FCompare oper2, bits<4> oper1, string instrAsm, + SDPatternOperator opNode, bit isComm = 0> + : RRR_Inst<0x00, oper1, oper2, (outs BR:$r), (ins FPR:$s, FPR:$t), + instrAsm#"\t$r, $s, $t", + [(set BR:$r, (opNode FPR:$s, FPR:$t))]> { + let isCommutable = isComm; + let isReMaterializable = 0; + let Predicates = [HasSingleFloat]; + } +} + +def OEQ_S : FCompare<0x02, 0x0b, "oeq.s", Xtensa_cmpoeq, 1>; +def OLT_S : FCompare<0x04, 0x0b, "olt.s", Xtensa_cmpolt, 0>; +def OLE_S : FCompare<0x06, 0x0b, "ole.s", Xtensa_cmpole, 0>; + +def UEQ_S : FCompare<0x03, 0x0b, "ueq.s", Xtensa_cmpueq, 1>; +def ULT_S : FCompare<0x05, 0x0b, "ult.s", Xtensa_cmpult, 0>; +def ULE_S : FCompare<0x07, 0x0b, "ule.s", Xtensa_cmpule, 0>; +def UN_S : FCompare<0x01, 0x0b, "un.s", Xtensa_cmpuo, 1>; + +//FP complex operations +def MADD_S : RRR_Inst<0x00, 0x0A, 0x04, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), + "madd.s\t$r, $s, $t", + [(set FPR:$r, (Xtensa_madd FPR:$a, FPR:$s, FPR:$t))]>, + Requires<[HasSingleFloat]> { + let isCommutable = 0; + let isReMaterializable = 0; + let Constraints = "$r = $a"; +} + +def MSUB_S : RRR_Inst<0x00, 0x0A, 0x05, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), + "msub.s\t$r, $s, $t", + [(set FPR:$r, (Xtensa_msub FPR:$a, FPR:$s, FPR:$t))]>, + Requires<[HasSingleFloat]> { + let isCommutable = 0; + let isReMaterializable = 0; + let Constraints = "$r = $a"; +} + +//FP move operations +def MOV_S : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins FPR:$s), + "mov.s\t$r, $s", + [(set FPR:$r, (Xtensa_movs FPR:$s))]>, Requires<[HasSingleFloat]> +{ + let t = 0x00; +} + +def CONST_S : RRR_Inst<0x00, 0x0a, 0x0f, (outs FPR:$r), (ins uimm4:$imm), + "const.s\t$r, $imm", []>, Requires<[HasSingleFloat]> { + bits<4> imm; + + let t = 0x03; + let s = imm{3-0}; +} + +def DIV0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "div0.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x7; +} + +def MADDN_S : RRR_Inst<0x00, 0x0A, 0x06, (outs FPR:$r), (ins FPR:$s, FPR:$t), + "maddn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]> { + let isCommutable = 0; +} + +def MKDADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "mkdadj.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x0D; +} + +def MKSADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "mksadj.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x0C; +} + +def ADDEXP_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "addexp.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x0E; +} + +def ADDEXPM_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "addexpm.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x0F; +} + +def DIVN_S : RRR_Inst<0x00, 0x0A, 0x07, (outs FPR:$r), (ins FPR:$s, FPR:$t), + "divn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + +def NEXP01_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "nexp01.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x0B; +} + +def SQRT0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "sqrt0.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x09; +} diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index 1e6e4b13aa8b1..5837e48573eb2 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -26,6 +26,10 @@ def SDT_XtensaSelectCC : SDTypeProfile<1, 5, SDTCisVT<5, i32>]>; def SDT_XtensaMOVSP : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; +def SDT_XtensaCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i1>, SDTCisVT<1, f32>, SDTCisVT<2, f32>]>; +def SDT_XtensaMADD : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisVT<0, f32>]>; +def SDT_XtensaMOVS : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, f32>]>; + def SDT_XtensaSRC : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; @@ -70,3 +74,15 @@ def Xtensa_extui: SDNode<"XtensaISD::EXTUI", SDT_XtensaEXTUI>; def Xtensa_movsp: SDNode<"XtensaISD::MOVSP", SDT_XtensaMOVSP, [SDNPInGlue]>; + +def Xtensa_cmpoeq : SDNode<"XtensaISD::CMPOEQ", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpolt : SDNode<"XtensaISD::CMPOLT", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpole : SDNode<"XtensaISD::CMPOLE", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpueq : SDNode<"XtensaISD::CMPUEQ", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpult : SDNode<"XtensaISD::CMPULT", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpule : SDNode<"XtensaISD::CMPULE", SDT_XtensaCmp, [SDNPOutGlue]>; +def Xtensa_cmpuo : SDNode<"XtensaISD::CMPUO", SDT_XtensaCmp, [SDNPOutGlue]>; + +def Xtensa_madd: SDNode<"XtensaISD::MADD", SDT_XtensaMADD, [SDNPInGlue]>; +def Xtensa_msub: SDNode<"XtensaISD::MSUB", SDT_XtensaMADD, [SDNPInGlue]>; +def Xtensa_movs: SDNode<"XtensaISD::MOVS", SDT_XtensaMOVS, [SDNPInGlue]>; diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index 5b87a83786ac4..18341287347d2 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -83,6 +83,50 @@ def WINDOWSTART : SRReg<73, "windowstart", ["WINDOWSTART", "73"]>; def SR : RegisterClass<"Xtensa", [i32], 32, (add SAR, BREG, WINDOWBASE, WINDOWSTART)>; +//===----------------------------------------------------------------------===// +// USER registers +//===----------------------------------------------------------------------===// +class URReg num, string n, list alt = []> : XtensaReg { + let HWEncoding{7-0} = num; + let AltNames = alt; +} + +def FCR : URReg<232, "fcr", ["FCR"]>; +def FSR : URReg<233, "fsr", ["FSR"]>; + +def UR : RegisterClass<"Xtensa", [i32], 32, (add FCR, FSR)>; + +//===----------------------------------------------------------------------===// +// Floating-Point registers +//===----------------------------------------------------------------------===// + +// Xtensa Floating-Point regs +class FPReg num, string n> : XtensaReg { + let HWEncoding{3-0} = num; +} + +def F0 : FPReg<0, "f0">, DwarfRegNum<[19]>; +def F1 : FPReg<1, "f1">, DwarfRegNum<[20]>; +def F2 : FPReg<2, "f2">, DwarfRegNum<[21]>; +def F3 : FPReg<3, "f3">, DwarfRegNum<[22]>; +def F4 : FPReg<4, "f4">, DwarfRegNum<[23]>; +def F5 : FPReg<5, "f5">, DwarfRegNum<[24]>; +def F6 : FPReg<6, "f6">, DwarfRegNum<[25]>; +def F7 : FPReg<7, "f7">, DwarfRegNum<[26]>; +def F8 : FPReg<8, "f8">, DwarfRegNum<[27]>; +def F9 : FPReg<9, "f9">, DwarfRegNum<[28]>; +def F10 : FPReg<10, "f10">, DwarfRegNum<[29]>; +def F11 : FPReg<11, "f11">, DwarfRegNum<[30]>; +def F12 : FPReg<12, "f12">, DwarfRegNum<[31]>; +def F13 : FPReg<13, "f13">, DwarfRegNum<[32]>; +def F14 : FPReg<14, "f14">, DwarfRegNum<[33]>; +def F15 : FPReg<15, "f15">, DwarfRegNum<[34]>; + +// Floating-Point register class with allocation order +def FPR : RegisterClass<"Xtensa", [f32], 32, (add + F8, F9, F10, F11, F12, F13, F14, F15, + F7, F6, F5, F4, F3, F2, F1, F0)>; + //===----------------------------------------------------------------------===// // Boolean registers //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 3394858a6f8a5..eaa95c13ca9ec 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -31,6 +31,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { } HasDensity = false; + HasSingleFloat = false; HasWindowed = false; HasBoolean = false; diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index fab9aa2e103e6..e0c8bb289b320 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -39,6 +39,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enabled Xtensa Density extension bool HasDensity; + // Enabled Xtensa Single FP instructions + bool HasSingleFloat; + // Enabled Xtensa Windowed Register option bool HasWindowed; @@ -72,6 +75,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasDensity() const { return HasDensity; } + bool hasSingleFloat() const { return HasSingleFloat; } + bool hasWindowed() const { return HasWindowed; } bool hasBoolean() const { return HasBoolean; } From 549baa7ec55ae79987a066374fdd0603006184d9 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 14:33:40 +0300 Subject: [PATCH 024/289] [Xtensa] Lowering Floating-Point Operations SELECT_CC/SETCC/BR_CC. Implement DAG Combine for BRCOND operation with f32 operands. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 393 +++++++++++++++++- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 11 + llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 20 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 16 + llvm/lib/Target/Xtensa/XtensaOperators.td | 9 + 5 files changed, 439 insertions(+), 10 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index ee5f3db02e631..90585cfd7cc87 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -106,11 +106,23 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::BR_CC, MVT::i32, Legal); setOperationAction(ISD::BR_CC, MVT::i64, Expand); - setOperationAction(ISD::BR_CC, MVT::f32, Expand); + if (Subtarget.hasSingleFloat()) + setOperationAction(ISD::BR_CC, MVT::f32, Custom); + else + setOperationAction(ISD::BR_CC, MVT::f32, Expand); setOperationAction(ISD::SELECT, MVT::i32, Expand); setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); setOperationAction(ISD::SETCC, MVT::i32, Expand); + + setOperationAction(ISD::SELECT, MVT::f32, Expand); + if (Subtarget.hasSingleFloat()) { + setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); + setOperationAction(ISD::SETCC, MVT::f32, Custom); + } else { + setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); + setOperationAction(ISD::SETCC, MVT::f32, Expand); + } setCondCodeAction(ISD::SETGT, MVT::i32, Expand); setCondCodeAction(ISD::SETLE, MVT::i32, Expand); @@ -143,6 +155,109 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); + // Handle floating-point types. + for (unsigned I = MVT::FIRST_FP_VALUETYPE; I <= MVT::LAST_FP_VALUETYPE; ++I) { + MVT VT = MVT::SimpleValueType(I); + if (isTypeLegal(VT)) { + // We can use FI for FRINT. + // setOperationAction(ISD::FRINT, VT, Legal); + if (VT.getSizeInBits() == 32 && Subtarget.hasSingleFloat()) { + setOperationAction(ISD::FABS, VT, Legal); + setOperationAction(ISD::FADD, VT, Legal); + setOperationAction(ISD::FMA, VT, Legal); + setOperationAction(ISD::FMUL, VT, Legal); + setOperationAction(ISD::FNEG, VT, Legal); + setOperationAction(ISD::FSUB, VT, Legal); + } else { + setOperationAction(ISD::FABS, VT, Expand); + setOperationAction(ISD::FADD, VT, Expand); + setOperationAction(ISD::FMA, VT, Expand); + setOperationAction(ISD::FMUL, VT, Expand); + setOperationAction(ISD::FNEG, VT, Expand); + setOperationAction(ISD::FSUB, VT, Expand); + } + + // TODO: once implemented in InstrInfo uncomment + setOperationAction(ISD::FSQRT, VT, Expand); + + // No special instructions for these. + setOperationAction(ISD::FCBRT, VT, Expand); + setOperationAction(ISD::FCEIL, VT, Expand); + setOperationAction(ISD::FCOPYSIGN, VT, Expand); + setOperationAction(ISD::FSIN, VT, Expand); + setOperationAction(ISD::FCOS, VT, Expand); + setOperationAction(ISD::FDIV, VT, Expand); + setOperationAction(ISD::FEXP, VT, Expand); + setOperationAction(ISD::FEXP2, VT, Expand); + setOperationAction(ISD::FFLOOR, VT, Expand); + setOperationAction(ISD::FLOG, VT, Expand); + setOperationAction(ISD::FLOG2, VT, Expand); + setOperationAction(ISD::FLOG10, VT, Expand); + setOperationAction(ISD::FMAXIMUM, VT, Expand); + setOperationAction(ISD::FMINIMUM, VT, Expand); + setOperationAction(ISD::FMAXNUM, VT, Expand); + setOperationAction(ISD::FMINNUM, VT, Expand); + setOperationAction(ISD::FNEARBYINT, VT, Expand); + setOperationAction(ISD::FPOW, VT, Expand); + setOperationAction(ISD::FPOWI, VT, Expand); + setOperationAction(ISD::FREM, VT, Expand); + setOperationAction(ISD::FRINT, VT, Expand); + setOperationAction(ISD::FROUND, VT, Expand); + setOperationAction(ISD::FSIN, VT, Expand); + setOperationAction(ISD::FSINCOS, VT, Expand); + setOperationAction(ISD::FSQRT, VT, Expand); + setOperationAction(ISD::FTRUNC, VT, Expand); + setOperationAction(ISD::LLRINT, VT, Expand); + setOperationAction(ISD::LLROUND, VT, Expand); + setOperationAction(ISD::LRINT, VT, Expand); + setOperationAction(ISD::LROUND, VT, Expand); + } + } + + if (Subtarget.hasSingleFloat()) { + setOperationAction(ISD::BITCAST, MVT::i32, Legal); + setOperationAction(ISD::BITCAST, MVT::f32, Legal); + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); + setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); + setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); + } else { + setOperationAction(ISD::BITCAST, MVT::i32, Expand); + setOperationAction(ISD::BITCAST, MVT::f32, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); + setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); + setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand); + } + + setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); + setOperationAction(ISD::SINT_TO_FP, MVT::i64, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); + setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand); + + setOperationAction(ISD::SETCC, MVT::f64, Expand); + setOperationAction(ISD::BITCAST, MVT::i64, Expand); + setOperationAction(ISD::BITCAST, MVT::f64, Expand); + + if (Subtarget.hasSingleFloat()) { + setCondCodeAction(ISD::SETOGT, MVT::f32, Expand); + setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); + setCondCodeAction(ISD::SETONE, MVT::f32, Expand); + setCondCodeAction(ISD::SETUGE, MVT::f32, Expand); + setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); + + setTargetDAGCombine(ISD::BRCOND); + } + + // Needed so that we don't try to implement f128 constant loads using + // a load-and-extend of a f80 constant (in cases where the constant + // would fit in an f80). + for (MVT VT : MVT::fp_valuetypes()) + setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); + + // Floating-point truncation and stores need to be done separately. + setTruncStoreAction(MVT::f64, MVT::f32, Expand); + // Implement custom stack allocations setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); // Implement custom stack save and restore @@ -257,6 +372,50 @@ void XtensaTargetLowering::LowerAsmOperandForConstraint( TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); } +//===----------------------------------------------------------------------===// +// DAG Combine functions +//===----------------------------------------------------------------------===// + +static SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + if (DCI.isBeforeLegalizeOps()) { + SDValue Chain = N->getOperand(0); + + if (N->getOperand(1).getOpcode() != ISD::SETCC) + return SDValue(); + + SDLoc DL(N); + SDValue SetCC = N->getOperand(1); + SDValue Dest = N->getOperand(2); + ISD::CondCode CC = cast(SetCC->getOperand(2))->get(); + SDValue LHS = SetCC->getOperand(0); + SDValue RHS = SetCC->getOperand(1); + + if (LHS.getValueType() != MVT::i32) + return SDValue(); + + return DAG.getNode(ISD::BR_CC, DL, MVT::isVoid, Chain, DAG.getCondCode(CC), + LHS, RHS, Dest); + } + return SDValue(); +} + +SDValue XtensaTargetLowering::PerformDAGCombine(SDNode *N, + DAGCombinerInfo &DCI) const { + SelectionDAG &DAG = DCI.DAG; + unsigned Opc = N->getOpcode(); + + switch (Opc) { + default: + break; + case ISD::BRCOND: + return PerformBRCONDCombine(N, DAG, DCI, Subtarget); + } + + return SDValue(); +} + //===----------------------------------------------------------------------===// // Calling conventions //===----------------------------------------------------------------------===// @@ -742,6 +901,90 @@ XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, : XtensaISD::RET, DL, MVT::Other, RetOps); } + +static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, SDLoc dl, + SelectionDAG &DAG, int &br_code) { + // Minor optimization: if LHS is a constant, swap operands, then the + // constant can be folded into comparison. + if (LHS.getOpcode() == ISD::Constant) + std::swap(LHS, RHS); + int cmp_code = 0; + + switch (CC) { + default: + llvm_unreachable("Invalid condition!"); + break; + case ISD::SETUNE: + br_code = XtensaISD::BR_CC_F; + cmp_code = XtensaISD::CMPOEQ; + break; + case ISD::SETUO: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPUO; + break; + case ISD::SETO: + br_code = XtensaISD::BR_CC_F; + cmp_code = XtensaISD::CMPUO; + break; + case ISD::SETUEQ: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPUEQ; + break; + case ISD::SETULE: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPULE; + break; + case ISD::SETULT: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPULT; + break; + case ISD::SETEQ: + case ISD::SETOEQ: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPOEQ; + break; + case ISD::SETNE: + br_code = XtensaISD::BR_CC_F; + cmp_code = XtensaISD::CMPOEQ; + break; + case ISD::SETLE: + case ISD::SETOLE: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPOLE; + break; + case ISD::SETLT: + case ISD::SETOLT: + br_code = XtensaISD::BR_CC_T; + cmp_code = XtensaISD::CMPOLT; + break; + case ISD::SETGE: + br_code = XtensaISD::BR_CC_F; + cmp_code = XtensaISD::CMPOLT; + break; + case ISD::SETGT: + br_code = XtensaISD::BR_CC_F; + cmp_code = XtensaISD::CMPOLE; + break; + } + return DAG.getNode(cmp_code, dl, MVT::i1, LHS, RHS); +} + +SDValue XtensaTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { + SDValue Chain = Op.getOperand(0); + ISD::CondCode CC = cast(Op.getOperand(1))->get(); + SDValue LHS = Op.getOperand(2); + SDValue RHS = Op.getOperand(3); + SDValue Dest = Op.getOperand(4); + SDLoc DL(Op); + + if (LHS.getValueType() == MVT::f32) { + int br_code; + SDValue Flag = EmitCMP(LHS, RHS, CC, DL, DAG, br_code); + return DAG.getNode(br_code, DL, Op.getValueType(), Chain, Flag, Dest); + } else { + llvm_unreachable("invalid BR_CC to lower"); + } +} static unsigned getBranchOpcode(ISD::CondCode Cond) { switch (Cond) { @@ -782,11 +1025,60 @@ SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op, unsigned BrOpcode = getBranchOpcode(CC); SDValue TargetCC = DAG.getConstant(BrOpcode, DL, MVT::i32); + SDValue TargetCC_FP = DAG.getConstant(CC, DL, MVT::i32); + if (LHS.getValueType() == MVT::f32 || TrueValue.getValueType() == MVT::f32) + return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueValue.getValueType(), + LHS, RHS, TrueValue, FalseValue, + (LHS.getValueType() == MVT::f32) ? TargetCC_FP + : TargetCC); return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueValue, FalseValue, TargetCC); } +SDValue XtensaTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT Ty = Op.getOperand(0).getValueType(); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + ISD::CondCode CC = cast(Op.getOperand(2))->get(); + + unsigned BrOpcode = getBranchOpcode(CC); + SDValue TargetCC = DAG.getConstant(BrOpcode, DL, MVT::i32); + SDValue TargetCC_FP = DAG.getConstant(CC, DL, MVT::i32); + + // Check Op SDNode users + // If there are only CALL/CALLW nodes, don't expand Global Address + SDNode &OpNode = *Op.getNode(); + bool Val = false; + for (SDNode::use_iterator UI = OpNode.use_begin(); UI != OpNode.use_end(); + ++UI) { + SDNode &User = *UI.getUse().getUser(); + unsigned OpCode = User.getOpcode(); + if (OpCode == ISD::BRCOND) { + Val = true; + break; + } + } + + // SETCC has BRCOND predecessor, return original operation + if (Val) + return SDValue(); + + // Expand to target SELECT_CC + SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType()); + SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType()); + + if (LHS.getValueType() == MVT::f32 || TrueV.getValueType() == MVT::f32) + return DAG.getNode( + XtensaISD::SELECT_CC_FP, DL, TrueV.getValueType(), LHS, RHS, TrueV, + FalseV, (LHS.getValueType() == MVT::f32) ? TargetCC_FP : TargetCC); + else if (TrueV.getValueType().isVector()) + return SDValue(); + else + llvm_unreachable("Unknown SETCC operand type"); +} + SDValue XtensaTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { // check the depth @@ -1216,6 +1508,8 @@ bool XtensaTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue XtensaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { + case ISD::BR_CC: + return LowerBR_CC(Op, DAG); case ISD::BR_JT: return LowerBR_JT(Op, DAG); case ISD::Constant: @@ -1236,6 +1530,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerConstantPool(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); + case ISD::SETCC: + return LowerSETCC(Op, DAG); case ISD::STACKSAVE: return LowerSTACKSAVE(Op, DAG); case ISD::STACKRESTORE: @@ -1279,6 +1575,12 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::RETW"; case XtensaISD::SELECT_CC: return "XtensaISD::SELECT_CC"; + case XtensaISD::SELECT_CC_FP: + return "XtensaISD::SELECT_CC_FP"; + case XtensaISD::BR_CC_T: + return "XtensaISD::BR_CC_T"; + case XtensaISD::BR_CC_F: + return "XtensaISD::BR_CC_F"; case XtensaISD::SRCL: return "XtensaISD::SRCL"; case XtensaISD::SRCR: @@ -1311,6 +1613,65 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { // Custom insertion //===----------------------------------------------------------------------===// +static void GetFPBranchKind(int Cond, int &BrKind, int &CmpKind) { + switch (Cond) { + default: + llvm_unreachable("Invalid condition!"); + break; + case ISD::SETUNE: + BrKind = Xtensa::BF; + CmpKind = Xtensa::OEQ_S; + break; + case ISD::SETUO: + BrKind = Xtensa::BT; + CmpKind = Xtensa::UN_S; + break; + case ISD::SETO: + BrKind = Xtensa::BF; + CmpKind = Xtensa::UN_S; + break; + case ISD::SETUEQ: + BrKind = Xtensa::BT; + CmpKind = Xtensa::UEQ_S; + break; + case ISD::SETULE: + BrKind = Xtensa::BT; + CmpKind = Xtensa::ULE_S; + break; + case ISD::SETULT: + BrKind = Xtensa::BT; + CmpKind = Xtensa::ULT_S; + break; + case ISD::SETEQ: + case ISD::SETOEQ: + BrKind = Xtensa::BT; + CmpKind = Xtensa::OEQ_S; + break; + case ISD::SETNE: + BrKind = Xtensa::BF; + CmpKind = Xtensa::OEQ_S; + break; + case ISD::SETLE: + case ISD::SETOLE: + BrKind = Xtensa::BT; + CmpKind = Xtensa::OLE_S; + break; + case ISD::SETLT: + case ISD::SETOLT: + BrKind = Xtensa::BT; + CmpKind = Xtensa::OLT_S; + break; + case ISD::SETGE: + BrKind = Xtensa::BF; + CmpKind = Xtensa::OLT_S; + break; + case ISD::SETGT: + BrKind = Xtensa::BF; + CmpKind = Xtensa::OLE_S; + break; + } +} + MachineBasicBlock * XtensaTargetLowering::emitSelectCC(MachineInstr &MI, MachineBasicBlock *MBB) const { @@ -1321,7 +1682,7 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI, MachineOperand &RHS = MI.getOperand(2); MachineOperand &TrueValue = MI.getOperand(3); MachineOperand &FalseValue = MI.getOperand(4); - unsigned BrKind = MI.getOperand(5).getImm(); + unsigned Cond = MI.getOperand(5).getImm(); // To "insert" a SELECT_CC instruction, we actually have to insert // CopyMBB and SinkMBB blocks and add branch to MBB. We build phi @@ -1353,10 +1714,23 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI, MBB->addSuccessor(CopyMBB); MBB->addSuccessor(SinkMBB); - BuildMI(MBB, DL, TII.get(BrKind)) - .addReg(LHS.getReg()) - .addReg(RHS.getReg()) - .addMBB(SinkMBB); + if ((MI.getOpcode() == Xtensa::SELECT_CC_FP_FP) || + (MI.getOpcode() == Xtensa::SELECT_CC_FP_INT)) { + int BrKind = 0; + int CmpKind = 0; + unsigned b = Xtensa::B0; + + GetFPBranchKind(Cond, BrKind, CmpKind); + BuildMI(MBB, DL, TII.get(CmpKind), b) + .addReg(LHS.getReg()) + .addReg(RHS.getReg()); + BuildMI(MBB, DL, TII.get(BrKind)).addReg(b, RegState::Kill).addMBB(SinkMBB); + } else { + BuildMI(MBB, DL, TII.get(Cond)) + .addReg(LHS.getReg()) + .addReg(RHS.getReg()) + .addMBB(SinkMBB); + } CopyMBB->addSuccessor(SinkMBB); @@ -1383,6 +1757,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( DebugLoc DL = MI.getDebugLoc(); switch (MI.getOpcode()) { + case Xtensa::SELECT_CC_FP_FP: + case Xtensa::SELECT_CC_FP_INT: + case Xtensa::SELECT_CC_INT_FP: case Xtensa::SELECT: return emitSelectCC(MI, MBB); case Xtensa::L8I_P: { @@ -1412,11 +1789,13 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( case Xtensa::S16I: case Xtensa::S32I: case Xtensa::S32I_N: + case Xtensa::S32F: case Xtensa::L8UI: case Xtensa::L16SI: case Xtensa::L16UI: case Xtensa::L32I: - case Xtensa::L32I_N: { + case Xtensa::L32I_N: + case Xtensa::L32F: { const MachineMemOperand &MMO = **MI.memoperands_begin(); if (MMO.isVolatile()) { BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index c7eabe6bf79fd..43c9781762f92 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -23,6 +23,9 @@ namespace llvm { namespace XtensaISD { enum { FIRST_NUMBER = ISD::BUILTIN_OP_END, + BR_CC_T, + BR_CC_F, + BR_JT, // Calls a function. Operand 0 is the chain operand and operand 1 @@ -67,6 +70,7 @@ enum { // the lhs and rhs (ops #0 and #1) of a conditional expression with the // condition code in op #4 SELECT_CC, + SELECT_CC_FP, // SRCL(R) performs shift left(right) of the concatenation of 2 registers // and returns high(low) 32-bit part of 64-bit result @@ -119,6 +123,8 @@ class XtensaTargetLowering : public TargetLowering { std::vector &Ops, SelectionDAG &DAG) const override; + SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; + SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, @@ -160,6 +166,8 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; @@ -169,6 +177,9 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index 5d569d884a6da..1245edfa2bb39 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -112,14 +112,28 @@ void XtensaInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const { - // The MOV instruction is not present in core ISA, - // so use OR instruction. - if (Xtensa::ARRegClass.contains(DestReg, SrcReg)) + unsigned Opcode; + + // when we are copying a phys reg we want the bits for fp + if (Xtensa::ARRegClass.contains(DestReg, SrcReg)) { BuildMI(MBB, MBBI, DL, get(Xtensa::OR), DestReg) .addReg(SrcReg, getKillRegState(KillSrc)) .addReg(SrcReg, getKillRegState(KillSrc)); + return; + } else if (STI.hasSingleFloat() && Xtensa::FPRRegClass.contains(SrcReg) && + Xtensa::FPRRegClass.contains(DestReg)) + Opcode = Xtensa::MOV_S; + else if (STI.hasSingleFloat() && Xtensa::FPRRegClass.contains(SrcReg) && + Xtensa::ARRegClass.contains(DestReg)) + Opcode = Xtensa::RFR; + else if (STI.hasSingleFloat() && Xtensa::ARRegClass.contains(SrcReg) && + Xtensa::FPRRegClass.contains(DestReg)) + Opcode = Xtensa::WFR; else report_fatal_error("Impossible reg-to-reg copy"); + + BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)); } void XtensaInstrInfo::storeRegToStackSlot( diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 22356a867e863..c80bc3b223ec8 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -851,6 +851,9 @@ def ORBC : RRR_Inst<0x00, 0x02, 0x03, (outs BR:$r), (ins BR:$s, BR:$t), def XORB : RRR_Inst<0x00, 0x02, 0x04, (outs BR:$r), (ins BR:$s, BR:$t), "xorb\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def : Pat<(Xtensa_brcc_t BR:$b, bb:$target), (BT BR:$b, bb:$target)>; +def : Pat<(Xtensa_brcc_f BR:$b, bb:$target), (BF BR:$b, bb:$target)>; + //===----------------------------------------------------------------------===// // Floating-Point Instructions //===----------------------------------------------------------------------===// @@ -1048,3 +1051,16 @@ def SQRT0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "sqrt0.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x09; } + +// FP select operations +let usesCustomInserter = 1 in { + def SELECT_CC_FP_INT : Pseudo<(outs AR:$dst), (ins FPR:$lhs, FPR:$rhs, AR:$t, AR:$f, i32imm:$cond), + "!select_cc_fp_int $dst, $lhs, $rhs, $t, $f, $cond", + [(set AR:$dst, (Xtensa_select_cc_fp FPR:$lhs, FPR:$rhs, AR:$t, AR:$f, imm:$cond))]>; + def SELECT_CC_INT_FP : Pseudo<(outs FPR:$dst), (ins AR:$lhs, AR:$rhs, FPR:$t, FPR:$f, i32imm:$cond), + "!select_cc_int_fp $dst, $lhs, $rhs, $t, $f, $cond", + [(set FPR:$dst, (Xtensa_select_cc_fp AR:$lhs, AR:$rhs, FPR:$t, FPR:$f, imm:$cond))]>; + def SELECT_CC_FP_FP : Pseudo<(outs FPR:$dst), (ins FPR:$lhs, FPR:$rhs, FPR:$t, FPR:$f, i32imm:$cond), + "!select_cc_fp_fp $dst, $lhs, $rhs, $t, $f, $cond", + [(set FPR:$dst, (Xtensa_select_cc_fp FPR:$lhs, FPR:$rhs, FPR:$t, FPR:$f, imm:$cond))]>; +} diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index 5837e48573eb2..2c82a2bbfbb52 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -26,9 +26,11 @@ def SDT_XtensaSelectCC : SDTypeProfile<1, 5, SDTCisVT<5, i32>]>; def SDT_XtensaMOVSP : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; +def SDT_XtensaBrCC : SDTypeProfile<0, 2, [SDTCisVT<0, i1>, SDTCisVT<1, OtherVT>]>; def SDT_XtensaCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i1>, SDTCisVT<1, f32>, SDTCisVT<2, f32>]>; def SDT_XtensaMADD : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisVT<0, f32>]>; def SDT_XtensaMOVS : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, f32>]>; +def SDT_XtensaSelectCCFP : SDTypeProfile<1, 5, [SDTCisSameAs<0, 3>, SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisVT<5, i32>]>; def SDT_XtensaSRC : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, @@ -65,6 +67,8 @@ def Xtensa_brjt: SDNode<"XtensaISD::BR_JT", SDT_XtensaBrJT, [SDNPHasChain]>; def Xtensa_select_cc: SDNode<"XtensaISD::SELECT_CC", SDT_XtensaSelectCC, [SDNPInGlue]>; +def Xtensa_select_cc_fp: SDNode<"XtensaISD::SELECT_CC_FP", SDT_XtensaSelectCCFP, + [SDNPInGlue]>; def Xtensa_srcl: SDNode<"XtensaISD::SRCL", SDT_XtensaSRC>; @@ -75,6 +79,11 @@ def Xtensa_extui: SDNode<"XtensaISD::EXTUI", SDT_XtensaEXTUI>; def Xtensa_movsp: SDNode<"XtensaISD::MOVSP", SDT_XtensaMOVSP, [SDNPInGlue]>; +def Xtensa_brcc_t : SDNode<"XtensaISD::BR_CC_T", SDT_XtensaBrCC, + [SDNPHasChain, SDNPInGlue]>; +def Xtensa_brcc_f : SDNode<"XtensaISD::BR_CC_F", SDT_XtensaBrCC, + [SDNPHasChain, SDNPInGlue]>; + def Xtensa_cmpoeq : SDNode<"XtensaISD::CMPOEQ", SDT_XtensaCmp, [SDNPOutGlue]>; def Xtensa_cmpolt : SDNode<"XtensaISD::CMPOLT", SDT_XtensaCmp, [SDNPOutGlue]>; def Xtensa_cmpole : SDNode<"XtensaISD::CMPOLE", SDT_XtensaCmp, [SDNPOutGlue]>; From 6aabafc6e218d8a57cd17bcb37cab426921916d4 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 14:35:19 +0300 Subject: [PATCH 025/289] [Xtensa] Implement DAG Combine for FADD and FSUB operations. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 72 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 3 + 2 files changed, 75 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 90585cfd7cc87..89b66137c6f76 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -246,6 +246,8 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setCondCodeAction(ISD::SETUGE, MVT::f32, Expand); setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); + setTargetDAGCombine(ISD::FADD); + setTargetDAGCombine(ISD::FSUB); setTargetDAGCombine(ISD::BRCOND); } @@ -280,6 +282,21 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, } } +bool XtensaTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, + EVT VT) const { + if (!VT.isSimple()) + return false; + + switch (VT.getSimpleVT().SimpleTy) { + case MVT::f32: + return Subtarget.hasSingleFloat(); + default: + break; + } + + return false; +} + bool XtensaTargetLowering::isOffsetFoldingLegal( const GlobalAddressSDNode *GA) const { // The Xtensa target isn't yet aware of offsets. @@ -376,6 +393,57 @@ void XtensaTargetLowering::LowerAsmOperandForConstraint( // DAG Combine functions //===----------------------------------------------------------------------===// +static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, + const XtensaSubtarget &Subtarget) { + if (ROOTNode->getOperand(0).getValueType() != MVT::f32) + return SDValue(); + + if (ROOTNode->getOperand(0).getOpcode() != ISD::FMUL && + ROOTNode->getOperand(1).getOpcode() != ISD::FMUL) + return SDValue(); + + SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::FMUL + ? ROOTNode->getOperand(0) + : ROOTNode->getOperand(1); + + SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::FMUL + ? ROOTNode->getOperand(1) + : ROOTNode->getOperand(0); + + if (!Mult.hasOneUse()) + return SDValue(); + + SDLoc DL(ROOTNode); + + bool IsAdd = ROOTNode->getOpcode() == ISD::FADD; + unsigned Opcode = IsAdd ? XtensaISD::MADD : XtensaISD::MSUB; + SDValue MAddOps[3] = {AddOperand, Mult->getOperand(0), Mult->getOperand(1)}; + EVT VTs[3] = {MVT::f32, MVT::f32, MVT::f32}; + SDValue MAdd = CurDAG.getNode(Opcode, DL, VTs, MAddOps); + + return MAdd; +} + +static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + if (DCI.isBeforeLegalizeOps()) { + if (Subtarget.hasSingleFloat() && N->getValueType(0) == MVT::f32) + return performMADD_MSUBCombine(N, DAG, Subtarget); + } + return SDValue(); +} + +static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + if (DCI.isBeforeLegalizeOps()) { + if (Subtarget.hasSingleFloat() && N->getValueType(0) == MVT::f32) + return performMADD_MSUBCombine(N, DAG, Subtarget); + } + return SDValue(); +} + static SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const XtensaSubtarget &Subtarget) { @@ -409,6 +477,10 @@ SDValue XtensaTargetLowering::PerformDAGCombine(SDNode *N, switch (Opc) { default: break; + case ISD::FADD: + return performADDCombine(N, DAG, DCI, Subtarget); + case ISD::FSUB: + return performSUBCombine(N, DAG, DCI, Subtarget); case ISD::BRCOND: return PerformBRCONDCombine(N, DAG, DCI, Subtarget); } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 43c9781762f92..b4e5a9b093b61 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -98,6 +98,9 @@ class XtensaTargetLowering : public TargetLowering { return VT.changeVectorElementTypeToInteger(); } + bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, + EVT VT) const override; + bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; const char *getTargetNodeName(unsigned Opcode) const override; From 5115e3e20dd832504562bfdfcc5a9402c2f18d8d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 14:53:05 +0300 Subject: [PATCH 026/289] [Xtensa] Implement Loop, SEXT and NSA features. --- .../Xtensa/AsmParser/XtensaAsmParser.cpp | 5 ++ .../Disassembler/XtensaDisassembler.cpp | 12 +++- .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 11 ++++ .../Xtensa/MCTargetDesc/XtensaInstPrinter.h | 1 + .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 20 ++++++- llvm/lib/Target/Xtensa/Xtensa.td | 15 +++++ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 18 ++++-- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 56 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaOperands.td | 8 +++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 8 ++- llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 3 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 15 +++++ 12 files changed, 163 insertions(+), 9 deletions(-) diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index accd7758f93db..fbd168d304aa8 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -264,6 +264,8 @@ struct XtensaOperand : public MCParsedAsmOperand { return false; } + bool isseimm7_22() const { return isImm(7, 22); } + /// getStartLoc - Gets location of the first token of this operand SMLoc getStartLoc() const override { return StartLoc; } /// getEndLoc - Gets location of the last token of this operand @@ -530,6 +532,9 @@ bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_Invalidentry_imm12: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [0, 32760]"); + case Match_Invalidseimm7_22: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [7, 22]"); } report_fatal_error("Unknown match type detected!"); diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 26799128dd385..0e684250b7b31 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -108,7 +108,9 @@ static DecodeStatus DecodeBRRegisterClass(MCInst &Inst, uint64_t RegNo, } static const unsigned SRDecoderTable[] = { - Xtensa::SAR, 3, Xtensa::WINDOWBASE, 72, Xtensa::WINDOWSTART, 73}; + Xtensa::LBEG, 0, Xtensa::LEND, 1, Xtensa::LCOUNT, 2, + Xtensa::SAR, 3, Xtensa::BREG, 4, Xtensa ::WINDOWBASE, 72, + Xtensa::WINDOWSTART, 73}; static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, @@ -301,6 +303,14 @@ static DecodeStatus decodeShimm1_31Operand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeSeimm7_22Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<4>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm + 7)); + return MCDisassembler::Success; +} + static int64_t TableB4const[16] = {-1, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256}; static DecodeStatus decodeB4constOperand(MCInst &Inst, uint64_t Imm, diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index d1308ed56aa00..89343d203e9eb 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -413,3 +413,14 @@ void XtensaInstPrinter::printB4constu_AsmOperand(const MCInst *MI, int OpNum, } else printOperand(MI, OpNum, O); } + +void XtensaInstPrinter::printSeimm7_22_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 7 && Value <= 22) && + "Invalid argument, value must be in range <7,22>"); + O << Value; + } else + printOperand(MI, OpNum, O); +} diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index f8a9f592e0110..f6858b383cbf1 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -71,6 +71,7 @@ class XtensaInstPrinter : public MCInstPrinter { void printEntry_Imm12_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printB4const_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printB4constu_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printSeimm7_22_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); }; } // end namespace llvm diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 1d96955708f93..eadcae39255d5 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -134,6 +134,10 @@ class XtensaMCCodeEmitter : public MCCodeEmitter { uint32_t getB4constuOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + + uint32_t getSeimm7_22OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; }; } // namespace @@ -285,7 +289,7 @@ XtensaMCCodeEmitter::getMemRegEncoding(const MCInst &MI, unsigned OpNo, Res >>= 2; break; } - + switch (MI.getOpcode()) { case Xtensa::S32I_N: case Xtensa::L32I_N: @@ -551,4 +555,18 @@ XtensaMCCodeEmitter::getB4constuOpValue(const MCInst &MI, unsigned OpNo, return Res; } + +uint32_t +XtensaMCCodeEmitter::getSeimm7_22OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint32_t res = static_cast(MO.getImm()); + + res -= 7; + assert(((res & 0xf) == res) && "Unexpected operand value!"); + + return res; +} + #include "XtensaGenMCCodeEmitter.inc" diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 7c240b52c5b84..cb4b86f1c4616 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -37,6 +37,21 @@ def FeatureBoolean : SubtargetFeature<"bool", "HasBoolean", "true", def HasBoolean : Predicate<"Subtarget->hasBoolean()">, AssemblerPredicate<(all_of FeatureBoolean)>; +def FeatureLoop : SubtargetFeature<"loop", "HasLoop", "true", + "Enable Xtensa Loop extension">; +def HasLoop : Predicate<"Subtarget->hasLoop()">, + AssemblerPredicate<(all_of FeatureLoop)>; + +def FeatureSEXT : SubtargetFeature<"sext", "HasSEXT", "true", + "Enable Xtensa Sign Extend option">; +def HasSEXT : Predicate<"Subtarget->hasSEXT()">, + AssemblerPredicate<(all_of FeatureSEXT)>; + +def FeatureNSA : SubtargetFeature<"nsa", "HasNSA", "true", + "Enable Xtensa NSA option">; +def HasNSA : Predicate<"Subtarget->hasNSA()">, + AssemblerPredicate<(all_of FeatureNSA)>; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 89b66137c6f76..76717b43c256d 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -1849,14 +1849,22 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( BuildMI(*MBB, MI, DL, TII.get(Xtensa::L8UI), R1).add(Op1).add(Op2); - unsigned R2 = MRI.createVirtualRegister(RC); - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SLLI), R2).addReg(R1).addImm(24); - BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRAI), R.getReg()) - .addReg(R2) - .addImm(24); + if (Subtarget.hasSEXT()) { + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SEXT), R.getReg()) + .addReg(R1) + .addImm(7); + } else { + unsigned R2 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SLLI), R2).addReg(R1).addImm(24); + BuildMI(*MBB, MI, DL, TII.get(Xtensa::SRAI), R.getReg()) + .addReg(R2) + .addImm(24); + } + MI.eraseFromParent(); return MBB; } + case Xtensa::S8I: case Xtensa::S16I: case Xtensa::S32I: diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index c80bc3b223ec8..12ed8e22276fa 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1064,3 +1064,59 @@ let usesCustomInserter = 1 in { "!select_cc_fp_fp $dst, $lhs, $rhs, $t, $f, $cond", [(set FPR:$dst, (Xtensa_select_cc_fp FPR:$lhs, FPR:$rhs, FPR:$t, FPR:$f, imm:$cond))]>; } + +//===----------------------------------------------------------------------===// +// Loop Instructions +//===----------------------------------------------------------------------===// + +def LOOP : RRI8_Inst<0x06, (outs), (ins AR:$s, mem8:$uimm8), + "loop\t$$s, $uimm8", []>, Requires<[HasLoop]> { + bits<8> uimm8; + + let r = 0x08; + let t = 0x07; + let imm8 = uimm8; +} + +def LOOPGTZ : RRI8_Inst<0x06, (outs), (ins AR:$s, mem8:$uimm8), + "loopgtz\t$$s, $uimm8", []>, Requires<[HasLoop]> { + bits<8> uimm8; + + let r = 0x0A; + let t = 0x07; + let imm8 = uimm8; +} + +def LOOPNEZ : RRI8_Inst<0x06, (outs), (ins AR:$s, mem8:$uimm8), + "loopnez\t$$s, $uimm8", []>, Requires<[HasLoop]> { + bits<8> uimm8; + + let r = 0x09; + let t = 0x07; + let imm8 = uimm8; +} + +//===----------------------------------------------------------------------===// +// SEXT Instructions +//===----------------------------------------------------------------------===// + +def SEXT : RRR_Inst<0x00, 0x03, 0x02, (outs AR:$r), (ins AR:$s, seimm7_22:$imm), + "sext\t$r, $s, $imm", []>, Requires<[HasSEXT]> { + bits<4> imm; + + let t = imm; +} + +//===----------------------------------------------------------------------===// +// NSA Instructions +//===----------------------------------------------------------------------===// + +def NSA : RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), + "nsa\t$t, $s", []>, Requires<[HasNSA]> { + let r = 0xE; +} + +def NSAU : RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), + "nsau\t$t, $s", []>, Requires<[HasNSA]> { + let r = 0xF; +} diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index dd12bd2390499..31ca787441f8e 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -167,6 +167,14 @@ def b4constu: Immediate; +def seimm7_22: Immediate= 7 && Imm <= 22; }], "Seimm7_22_AsmOperand"> { + let EncoderMethod = "getSeimm7_22OpValue"; + let DecoderMethod = "decodeSeimm7_22Operand"; +} + //===----------------------------------------------------------------------===// // Memory address operands //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index 18341287347d2..d7fbdcff09b98 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -72,6 +72,10 @@ class SRReg num, string n, list alt = []> : XtensaReg { let AltNames = alt; } +def LBEG : SRReg<0, "lbeg", ["LBEG", "0"]>; +def LEND : SRReg<1, "lend", ["LEND", "1"]>; +def LCOUNT : SRReg<2, "lcount", ["LCOUNT", "2"]>; + // Shift Amount Register def SAR : SRReg<3, "sar", ["SAR","3"]>; @@ -80,8 +84,8 @@ def BREG : SRReg<4, "br", ["BR", "4"]>; def WINDOWBASE : SRReg<72, "windowbase", ["WINDOWBASE", "72"]>; def WINDOWSTART : SRReg<73, "windowstart", ["WINDOWSTART", "73"]>; -def SR : RegisterClass<"Xtensa", [i32], 32, (add SAR, - BREG, WINDOWBASE, WINDOWSTART)>; +def SR : RegisterClass<"Xtensa", [i32], 32, (add LBEG, LEND, LCOUNT, + SAR, BREG, WINDOWBASE, WINDOWSTART)>; //===----------------------------------------------------------------------===// // USER registers diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index eaa95c13ca9ec..23cf3528dc214 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -34,6 +34,9 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasSingleFloat = false; HasWindowed = false; HasBoolean = false; + HasLoop = false; + HasSEXT = false; + HasNSA = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index e0c8bb289b320..b942cfe3b2754 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -48,6 +48,15 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enabled Xtensa Boolean extension bool HasBoolean; + // Enabled Xtensa Loop extension + bool HasLoop; + + // Enable Xtensa Sign Extend option + bool HasSEXT; + + // Enable Xtensa NSA option + bool HasNSA; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -81,6 +90,12 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasBoolean() const { return HasBoolean; } + bool hasLoop() const { return HasLoop; } + + bool hasSEXT() const { return HasSEXT; } + + bool hasNSA() const { return HasNSA; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; From d34a88852ba757d056f53a6c8475711d332267e1 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 15:16:36 +0300 Subject: [PATCH 027/289] [Xtensa] Implement Mul32, Mul32High and Div32 features. --- llvm/lib/Target/Xtensa/Xtensa.td | 15 ++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 30 ++++++++++++++----- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 17 +++++++++++ llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 3 ++ llvm/lib/Target/Xtensa/XtensaSubtarget.h | 15 ++++++++++ 5 files changed, 73 insertions(+), 7 deletions(-) diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index cb4b86f1c4616..6711bdd1baa0c 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -52,6 +52,21 @@ def FeatureNSA : SubtargetFeature<"nsa", "HasNSA", "true", def HasNSA : Predicate<"Subtarget->hasNSA()">, AssemblerPredicate<(all_of FeatureNSA)>; +def FeatureMul32 : SubtargetFeature<"mul32", "HasMul32", "true", + "Enable Xtensa Mul32 option">; +def HasMul32 : Predicate<"Subtarget->hasMul32()">, + AssemblerPredicate<(all_of FeatureMul32)>; + +def FeatureMul32High : SubtargetFeature<"mul32high", "HasMul32High", "true", + "Enable Xtensa Mul32High option">; +def HasMul32High : Predicate<"Subtarget->hasMul32High()">, + AssemblerPredicate<(all_of FeatureMul32High)>; + +def FeatureDiv32 : SubtargetFeature<"div32", "HasDiv32", "true", + "Enable Xtensa Div32 option">; +def HasDiv32 : Predicate<"Subtarget->hasDiv32()">, + AssemblerPredicate<(all_of FeatureDiv32)>; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 76717b43c256d..342cf54b02f52 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -129,16 +129,32 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setCondCodeAction(ISD::SETUGT, MVT::i32, Expand); setCondCodeAction(ISD::SETULE, MVT::i32, Expand); - setOperationAction(ISD::MUL, MVT::i32, Expand); - setOperationAction(ISD::MULHU, MVT::i32, Expand); - setOperationAction(ISD::MULHS, MVT::i32, Expand); + if (Subtarget.hasMul32()) + setOperationAction(ISD::MUL, MVT::i32, Legal); + else + setOperationAction(ISD::MUL, MVT::i32, Expand); + + if (Subtarget.hasMul32High()) { + setOperationAction(ISD::MULHU, MVT::i32, Legal); + setOperationAction(ISD::MULHS, MVT::i32, Legal); + } else { + setOperationAction(ISD::MULHU, MVT::i32, Expand); + setOperationAction(ISD::MULHS, MVT::i32, Expand); + } setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); - setOperationAction(ISD::SDIV, MVT::i32, Expand); - setOperationAction(ISD::UDIV, MVT::i32, Expand); - setOperationAction(ISD::SREM, MVT::i32, Expand); - setOperationAction(ISD::UREM, MVT::i32, Expand); + if (Subtarget.hasDiv32()) { + setOperationAction(ISD::SDIV, MVT::i32, Legal); + setOperationAction(ISD::UDIV, MVT::i32, Legal); + setOperationAction(ISD::SREM, MVT::i32, Legal); + setOperationAction(ISD::UREM, MVT::i32, Legal); + } else { + setOperationAction(ISD::SDIV, MVT::i32, Expand); + setOperationAction(ISD::UDIV, MVT::i32, Expand); + setOperationAction(ISD::SREM, MVT::i32, Expand); + setOperationAction(ISD::UREM, MVT::i32, Expand); + } setOperationAction(ISD::SDIVREM, MVT::i32, Expand); setOperationAction(ISD::UDIVREM, MVT::i32, Expand); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 12ed8e22276fa..f1e2066872652 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1120,3 +1120,20 @@ def NSAU : RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), "nsau\t$t, $s", []>, Requires<[HasNSA]> { let r = 0xF; } + +//===----------------------------------------------------------------------===// +// Mul32 Instructions +//===----------------------------------------------------------------------===// + +def MULL : ArithLogic_RRR<0x08, 0x02, "mull", mul, 1>, Requires<[HasMul32]>; +def MULUH : ArithLogic_RRR<0x0A, 0x02, "muluh", mulhu, 1>, Requires<[HasMul32High]>; +def MULSH : ArithLogic_RRR<0x0B, 0x02, "mulsh", mulhs, 1>, Requires<[HasMul32High]>; + +//===----------------------------------------------------------------------===// +// Div32 Instructions +//===----------------------------------------------------------------------===// + +def QUOS : ArithLogic_RRR<0x0D, 0x02, "quos", sdiv>, Requires<[HasDiv32]>; +def QUOU : ArithLogic_RRR<0x0C, 0x02, "quou", udiv>, Requires<[HasDiv32]>; +def REMS : ArithLogic_RRR<0x0F, 0x02, "rems", srem>, Requires<[HasDiv32]>; +def REMU : ArithLogic_RRR<0x0E, 0x02, "remu", urem>, Requires<[HasDiv32]>; diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 23cf3528dc214..77971c75a94af 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -37,6 +37,9 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasLoop = false; HasSEXT = false; HasNSA = false; + HasMul32 = false; + HasMul32High = false; + HasDiv32 = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index b942cfe3b2754..23ae494cc456e 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -57,6 +57,15 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa NSA option bool HasNSA; + // Enable Xtensa Mul32 option + bool HasMul32; + + // Enable Xtensa Mul32High option + bool HasMul32High; + + // Enable Xtensa Div32 option + bool HasDiv32; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -96,6 +105,12 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasNSA() const { return HasNSA; } + bool hasMul32() const { return HasMul32; } + + bool hasMul32High() const { return HasMul32High; } + + bool hasDiv32() const { return HasDiv32; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; From 403ab84289b183f52396d7de78a91092936fcd9b Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 15:32:22 +0300 Subject: [PATCH 028/289] [Xtensa] Implement Mac16 feature and operations. --- .../Disassembler/XtensaDisassembler.cpp | 47 ++- llvm/lib/Target/Xtensa/Xtensa.td | 5 + llvm/lib/Target/Xtensa/XtensaDSPInstrInfo.td | 353 ++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 5 + llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 12 +- llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 5 + llvm/test/MC/Xtensa/xtensa-valid-mac16.s | 234 ++++++++++++ 8 files changed, 658 insertions(+), 4 deletions(-) create mode 100644 llvm/lib/Target/Xtensa/XtensaDSPInstrInfo.td create mode 100644 llvm/test/MC/Xtensa/xtensa-valid-mac16.s diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 0e684250b7b31..5abb901f16f63 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -107,10 +107,51 @@ static DecodeStatus DecodeBRRegisterClass(MCInst &Inst, uint64_t RegNo, return MCDisassembler::Success; } +static const unsigned MRDecoderTable[] = {Xtensa::M0, Xtensa::M1, Xtensa::M2, + Xtensa::M3}; + +static DecodeStatus DecodeMRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= std::size(MRDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = MRDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static const unsigned MR01DecoderTable[] = {Xtensa::M0, Xtensa::M1}; + +static DecodeStatus DecodeMR01RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 2) + return MCDisassembler::Fail; + + unsigned Reg = MR01DecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static const unsigned MR23DecoderTable[] = {Xtensa::M2, Xtensa::M3}; + +static DecodeStatus DecodeMR23RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if ((RegNo < 2) || (RegNo > 3)) + return MCDisassembler::Fail; + + unsigned Reg = MR23DecoderTable[RegNo - 2]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + static const unsigned SRDecoderTable[] = { - Xtensa::LBEG, 0, Xtensa::LEND, 1, Xtensa::LCOUNT, 2, - Xtensa::SAR, 3, Xtensa::BREG, 4, Xtensa ::WINDOWBASE, 72, - Xtensa::WINDOWSTART, 73}; + Xtensa::LEND, 1, Xtensa::LCOUNT, 2, Xtensa::SAR, 3, + Xtensa::BREG, 4, Xtensa::ACCLO, 16, Xtensa::ACCHI, 17, + Xtensa::M0, 32, Xtensa::M1, 33, Xtensa::M2, 34, + Xtensa::M3, 35, Xtensa ::WINDOWBASE, 72, Xtensa::WINDOWSTART, 73}; static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 6711bdd1baa0c..7cd4a2d89c3b5 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -67,6 +67,11 @@ def FeatureDiv32 : SubtargetFeature<"div32", "HasDiv32", "true", def HasDiv32 : Predicate<"Subtarget->hasDiv32()">, AssemblerPredicate<(all_of FeatureDiv32)>; +def FeatureMAC16 : SubtargetFeature<"mac16", "HasMAC16", "true", + "Enable Xtensa MAC16 instructions">; +def HasMAC16 : Predicate<"Subtarget->hasMAC16()">, + AssemblerPredicate<(all_of FeatureMAC16)>; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaDSPInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaDSPInstrInfo.td new file mode 100644 index 0000000000000..d80df46320643 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaDSPInstrInfo.td @@ -0,0 +1,353 @@ +//===- XtensaDSPInstrInfo.td - Xtensa Target Description ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the Xtensa DSP instructions in TableGen format. +// +//===----------------------------------------------------------------------===// + +// Multiply +class UMUL_AA oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x07, (outs), (ins AR:$s, AR:$t), + instrAsm#"\t$s, $t", []>, Requires<[HasMAC16]> { + let r = 0; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def UMUL_AA_LL : UMUL_AA<0x00, "umul.aa.ll">; +def UMUL_AA_HL : UMUL_AA<0x01, "umul.aa.hl">; +def UMUL_AA_LH : UMUL_AA<0x02, "umul.aa.lh">; +def UMUL_AA_HH : UMUL_AA<0x03, "umul.aa.hh">; + +class MUL_AA oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x07, (outs), (ins AR:$s, AR:$t), + instrAsm#"\t$s, $t", []>, Requires<[HasMAC16]> { + let r = 0; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MUL_AA_LL : MUL_AA<0x04, "mul.aa.ll">; +def MUL_AA_HL : MUL_AA<0x05, "mul.aa.hl">; +def MUL_AA_LH : MUL_AA<0x06, "mul.aa.lh">; +def MUL_AA_HH : MUL_AA<0x07, "mul.aa.hh">; + +class MUL_AD oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x03, (outs), (ins AR:$s, MR23:$y), + instrAsm#"\t$s, $y", []>, Requires<[HasMAC16]> { + bits<2> y; + + let r = 0; + let t{3} = 0; + let t{2} = y{0}; + let t{1-0} = 0; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MUL_AD_LL : MUL_AD<0x04, "mul.ad.ll">; +def MUL_AD_HL : MUL_AD<0x05, "mul.ad.hl">; +def MUL_AD_LH : MUL_AD<0x06, "mul.ad.lh">; +def MUL_AD_HH : MUL_AD<0x07, "mul.ad.hh">; + +class MUL_DA oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x06, (outs), (ins MR01:$x, AR:$t), + instrAsm#"\t$x, $t", []>, Requires<[HasMAC16]> { + bits<2> x; + + let r{3} = 0; + let r{2} = x{0}; + let r{1-0} = 0; + let s = 0; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MUL_DA_LL : MUL_DA<0x04, "mul.da.ll">; +def MUL_DA_HL : MUL_DA<0x05, "mul.da.hl">; +def MUL_DA_LH : MUL_DA<0x06, "mul.da.lh">; +def MUL_DA_HH : MUL_DA<0x07, "mul.da.hh">; + +class MUL_DD oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x02, (outs), (ins MR01:$x, MR23:$y), + instrAsm#"\t$x, $y", []>, Requires<[HasMAC16]> { + bits<2> x; + bits<2> y; + + let r{3} = 0; + let r{2} = x{0}; + let r{1-0} = 0; + let s = 0; + let t{3} = 0; + let t{2} = y{0}; + let t{1-0} = 0; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MUL_DD_LL : MUL_DD<0x04, "mul.dd.ll">; +def MUL_DD_HL : MUL_DD<0x05, "mul.dd.hl">; +def MUL_DD_LH : MUL_DD<0x06, "mul.dd.lh">; +def MUL_DD_HH : MUL_DD<0x07, "mul.dd.hh">; + +class MULA_AA oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x07, (outs), (ins AR:$s, AR:$t), + instrAsm#"\t$s, $t", []>, Requires<[HasMAC16]> { + let r = 0; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULA_AA_LL : MULA_AA<0x08, "mula.aa.ll">; +def MULA_AA_HL : MULA_AA<0x09, "mula.aa.hl">; +def MULA_AA_LH : MULA_AA<0x0A, "mula.aa.lh">; +def MULA_AA_HH : MULA_AA<0x0B, "mula.aa.hh">; + +class MULA_AD oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x03, (outs), (ins AR:$s, MR23:$y), + instrAsm#"\t$s, $y", []>, Requires<[HasMAC16]> { + bits<2> y; + + let r = 0; + let t{3} = 0; + let t{2} = y{0}; + let t{1-0} = 0; + + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULA_AD_LL : MULA_AD<0x08, "mula.ad.ll">; +def MULA_AD_HL : MULA_AD<0x09, "mula.ad.hl">; +def MULA_AD_LH : MULA_AD<0x0A, "mula.ad.lh">; +def MULA_AD_HH : MULA_AD<0x0B, "mula.ad.hh">; + +class MULA_DA oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x06, (outs), (ins MR01:$x, AR:$t), + instrAsm#"\t$x, $t", []>, Requires<[HasMAC16]> { + bits<2> x; + + let r{3} = 0; + let r{2} = x{0}; + let r{1-0} = 0; + let s = 0; + + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULA_DA_LL : MULA_DA<0x08, "mula.da.ll">; +def MULA_DA_HL : MULA_DA<0x09, "mula.da.hl">; +def MULA_DA_LH : MULA_DA<0x0A, "mula.da.lh">; +def MULA_DA_HH : MULA_DA<0x0B, "mula.da.hh">; + +class MULA_DD oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x02, (outs), (ins MR01:$x, MR23:$y), + instrAsm#"\t$x, $y", []>, Requires<[HasMAC16]> { + bits<2> x; + bits<2> y; + + let r{3} = 0; + let r{2} = x{0}; + let r{1-0} = 0; + let s = 0; + let t{3} = 0; + let t{2} = y{0}; + let t{1-0} = 0; + + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULA_DD_LL : MULA_DD<0x08, "mula.dd.ll">; +def MULA_DD_HL : MULA_DD<0x09, "mula.dd.hl">; +def MULA_DD_LH : MULA_DD<0x0A, "mula.dd.lh">; +def MULA_DD_HH : MULA_DD<0x0B, "mula.dd.hh">; + +class MULS_AA oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x07, (outs), (ins AR:$s, AR:$t), + instrAsm#"\t$s, $t", []>, Requires<[HasMAC16]> { + let r = 0; + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULS_AA_LL : MULS_AA<0x0C, "muls.aa.ll">; +def MULS_AA_HL : MULS_AA<0x0D, "muls.aa.hl">; +def MULS_AA_LH : MULS_AA<0x0E, "muls.aa.lh">; +def MULS_AA_HH : MULS_AA<0x0F, "muls.aa.hh">; + +class MULS_AD oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x03, (outs), (ins AR:$s, MR23:$y), + instrAsm#"\t$s, $y", []>, Requires<[HasMAC16]> { + bits<2> y; + + let r = 0; + let t{3} = 0; + let t{2} = y{0}; + let t{1-0} = 0; + + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULS_AD_LL : MULS_AD<0x0C, "muls.ad.ll">; +def MULS_AD_HL : MULS_AD<0x0D, "muls.ad.hl">; +def MULS_AD_LH : MULS_AD<0x0E, "muls.ad.lh">; +def MULS_AD_HH : MULS_AD<0x0F, "muls.ad.hh">; + +class MULS_DA oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x06, (outs), (ins MR01:$x, AR:$t), + instrAsm#"\t$x, $t", []>, Requires<[HasMAC16]> { + bits<2> x; + + let r{3} = 0; + let r{2} = x{0}; + let r{1-0} = 0; + let s = 0; + + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULS_DA_LL : MULS_DA<0x0C, "muls.da.ll">; +def MULS_DA_HL : MULS_DA<0x0D, "muls.da.hl">; +def MULS_DA_LH : MULS_DA<0x0E, "muls.da.lh">; +def MULS_DA_HH : MULS_DA<0x0F, "muls.da.hh">; + +class MULS_DD oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x02, (outs), (ins MR01:$x, MR23:$y), + instrAsm#"\t$x, $y", []>, Requires<[HasMAC16]> { + bits<2> x; + bits<2> y; + + let r{3} = 0; + let r{2} = x{0}; + let r{1-0} = 0; + let s = 0; + let t{3} = 0; + let t{2} = y{0}; + let t{1-0} = 0; + + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULS_DD_LL : MULS_DD<0x0C, "muls.dd.ll">; +def MULS_DD_HL : MULS_DD<0x0D, "muls.dd.hl">; +def MULS_DD_LH : MULS_DD<0x0E, "muls.dd.lh">; +def MULS_DD_HH : MULS_DD<0x0F, "muls.dd.hh">; + +//===----------------------------------------------------------------------===// +// Multiply-accumulate with load + +class MULA_DA_LDDEC oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x05, (outs MR:$w, AR:$d), (ins AR:$s, MR01:$x, AR:$t), + instrAsm#"\t $w, $s, $x, $t", []>, Requires<[HasMAC16]> { + bits<2> x; + bits<2> w; + + let Constraints = "$s = $d"; + let mayLoad = 1; + let r{3} = 0; + let r{2} = x{0}; + let r{1-0} = w{1-0}; + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULA_DA_LL_LDDEC : MULA_DA_LDDEC<0x08, "mula.da.ll.lddec">; +def MULA_DA_HL_LDDEC : MULA_DA_LDDEC<0x09, "mula.da.hl.lddec">; +def MULA_DA_LH_LDDEC : MULA_DA_LDDEC<0x0A, "mula.da.lh.lddec">; +def MULA_DA_HH_LDDEC : MULA_DA_LDDEC<0x0B, "mula.da.hh.lddec">; + +class MULA_DA_LDINC oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x04, (outs MR:$w, AR:$d), (ins AR:$s, MR:$x, AR:$t), + instrAsm#"\t $w, $s, $x, $t", []>, Requires<[HasMAC16]> { + bits<1> x; + bits<2> w; + + let Constraints = "$s = $d"; + let mayLoad = 1; + let r{3} = 0; + let r{2} = x{0}; + let r{1-0} = w{1-0}; + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULA_DA_LL_LDINC: MULA_DA_LDINC<0x08, "mula.da.ll.ldinc">; +def MULA_DA_HL_LDINC: MULA_DA_LDINC<0x09, "mula.da.hl.ldinc">; +def MULA_DA_LH_LDINC: MULA_DA_LDINC<0x0A, "mula.da.lh.ldinc">; +def MULA_DA_HH_LDINC: MULA_DA_LDINC<0x0B, "mula.da.hh.ldinc">; + +class MULA_DD_LDDEC oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x01, (outs MR:$w, AR:$d), (ins AR:$s, MR01:$x, MR23:$y), + instrAsm#"\t $w, $s, $x, $y", []>, Requires<[HasMAC16]> { + bits<2> x; + bits<2> y; + bits<2> w; + + let Constraints = "$s = $d"; + let mayLoad = 1; + let r{3} = 0; + let r{2} = x{0}; + let r{1-0} = w{1-0}; + let t{3} = 0; + let t{2} = y{0}; + let t{1-0} = 0; + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULA_DD_LL_LDDEC : MULA_DD_LDDEC<0x08, "mula.dd.ll.lddec">; +def MULA_DD_HL_LDDEC : MULA_DD_LDDEC<0x09, "mula.dd.hl.lddec">; +def MULA_DD_LH_LDDEC : MULA_DD_LDDEC<0x0A, "mula.dd.lh.lddec">; +def MULA_DD_HH_LDDEC : MULA_DD_LDDEC<0x0B, "mula.dd.hh.lddec">; + +class MULA_DD_LDINC oper1, string instrAsm> + : RRR_Inst<0x04, oper1, 0x00, (outs MR:$w, AR:$d), (ins AR:$s, MR01:$x, MR23:$y), + instrAsm#"\t $w, $s, $x, $y", []>, Requires<[HasMAC16]> { + bits<2> x; + bits<2> y; + bits<2> w; + + let Constraints = "$s = $d"; + let mayLoad = 1; + let r{3} = 0; + let r{2} = x{0}; + let r{1-0} = w{1-0}; + let t{3} = 0; + let t{2} = y{0}; + let t{1-0} = 0; + let Uses = [ACCLO, ACCHI]; + let Defs = [M1, M2, ACCLO, ACCHI]; +} + +def MULA_DD_LL_LDINC : MULA_DD_LDINC<0x08, "mula.dd.ll.ldinc">; +def MULA_DD_HL_LDINC : MULA_DD_LDINC<0x09, "mula.dd.hl.ldinc">; +def MULA_DD_LH_LDINC : MULA_DD_LDINC<0x0A, "mula.dd.lh.ldinc">; +def MULA_DD_HH_LDINC : MULA_DD_LDINC<0x0B, "mula.dd.hh.ldinc">; + +def LDDEC : RRR_Inst<0x04, 0x00, 0x09, (outs MR:$w, AR:$d), (ins AR:$s), + "lddec\t $w, $s", []>, Requires<[HasMAC16]> { + bits<2> w; + + let Constraints = "$s = $d"; + let mayLoad = 1; + let r{3-2} = 0; + let r{1-0} = w{1-0}; + let t = 0x00; +} + +def LDINC : RRR_Inst<0x04, 0x00, 0x08, (outs MR:$w, AR:$d), (ins AR:$s), + "ldinc\t $w, $s", []>, Requires<[HasMAC16]> { + bits<2> w; + + let Constraints = "$s = $d"; + let mayLoad = 1; + let r{3-2} = 0; + let r{1-0} = w{1-0}; + let t = 0; +} diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index f1e2066872652..90c4b0eba1221 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1137,3 +1137,8 @@ def QUOS : ArithLogic_RRR<0x0D, 0x02, "quos", sdiv>, Requires<[HasDiv32]>; def QUOU : ArithLogic_RRR<0x0C, 0x02, "quou", udiv>, Requires<[HasDiv32]>; def REMS : ArithLogic_RRR<0x0F, 0x02, "rems", srem>, Requires<[HasDiv32]>; def REMU : ArithLogic_RRR<0x0E, 0x02, "remu", urem>, Requires<[HasDiv32]>; + +//===----------------------------------------------------------------------===// +// DSP Instructions +//===----------------------------------------------------------------------===// +include "XtensaDSPInstrInfo.td" diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index d7fbdcff09b98..4e953f7bfe159 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -81,11 +81,21 @@ def SAR : SRReg<3, "sar", ["SAR","3"]>; def BREG : SRReg<4, "br", ["BR", "4"]>; +def ACCLO : SRReg<16, "acclo", ["ACCLO", "16"]>; +def ACCHI : SRReg<17, "acchi", ["ACCHI", "17"]>; +def M0 : SRReg<32, "m0", ["M0", "32"]>; +def M1 : SRReg<33, "m1", ["M1", "33"]>; +def M2 : SRReg<34, "m2", ["M2", "34"]>; +def M3 : SRReg<35, "m3", ["M3", "35"]>; def WINDOWBASE : SRReg<72, "windowbase", ["WINDOWBASE", "72"]>; def WINDOWSTART : SRReg<73, "windowstart", ["WINDOWSTART", "73"]>; +def MR01 : RegisterClass<"Xtensa", [i32], 32, (add M0, M1)>; +def MR23 : RegisterClass<"Xtensa", [i32], 32, (add M2, M3)>; +def MR : RegisterClass<"Xtensa", [i32], 32, (add MR01, MR23)>; + def SR : RegisterClass<"Xtensa", [i32], 32, (add LBEG, LEND, LCOUNT, - SAR, BREG, WINDOWBASE, WINDOWSTART)>; + SAR, BREG, MR, WINDOWBASE, WINDOWSTART)>; //===----------------------------------------------------------------------===// // USER registers diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 77971c75a94af..d87b6201e1531 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -40,6 +40,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasMul32 = false; HasMul32High = false; HasDiv32 = false; + HasMAC16 = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index 23ae494cc456e..67218ab8b8f3c 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -66,6 +66,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa Div32 option bool HasDiv32; + // Enabled Xtensa MAC16 instructions + bool HasMAC16; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -111,6 +114,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasDiv32() const { return HasDiv32; } + bool hasMAC16() const { return HasMAC16; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; diff --git a/llvm/test/MC/Xtensa/xtensa-valid-mac16.s b/llvm/test/MC/Xtensa/xtensa-valid-mac16.s new file mode 100644 index 0000000000000..dee79da7755d9 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-valid-mac16.s @@ -0,0 +1,234 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+mac16 -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +.align 4 +LBL0: + +# CHECK-INST: umul.aa.ll a2, a3 +# CHECK: encoding: [0x34,0x02,0x70] + umul.aa.ll a2, a3 +# CHECK-INST: umul.aa.lh a2, a3 +# CHECK: encoding: [0x34,0x02,0x72] + umul.aa.lh a2, a3 +# CHECK-INST: umul.aa.hl a2, a3 +# CHECK: encoding: [0x34,0x02,0x71] + umul.aa.hl a2, a3 +# CHECK-INST: umul.aa.hh a2, a3 +# CHECK: encoding: [0x34,0x02,0x73] + umul.aa.hh a2, a3 + +# CHECK-INST: mul.aa.ll a2, a3 +# CHECK: encoding: [0x34,0x02,0x74] + mul.aa.ll a2, a3 +# CHECK-INST: mul.aa.lh a2, a3 +# CHECK: encoding: [0x34,0x02,0x76] + mul.aa.lh a2, a3 +# CHECK-INST: mul.aa.hl a2, a3 +# CHECK: encoding: [0x34,0x02,0x75] + mul.aa.hl a2, a3 +# CHECK-INST: mul.aa.hh a2, a3 +# CHECK: encoding: [0x34,0x02,0x77] + mul.aa.hh a2, a3 + +# CHECK-INST: mul.ad.ll a2, m2 +# CHECK: encoding: [0x04,0x02,0x34] + mul.ad.ll a2, m2 +# CHECK-INST: mul.ad.lh a2, m2 +# CHECK: encoding: [0x04,0x02,0x36] + mul.ad.lh a2, m2 +# CHECK-INST: mul.ad.hl a2, m2 +# CHECK: encoding: [0x04,0x02,0x35] + mul.ad.hl a2, m2 +# CHECK-INST: mul.ad.hh a2, m2 +# CHECK: encoding: [0x04,0x02,0x37] + mul.ad.hh a2, m2 + +# CHECK-INST: mul.da.ll m1, a3 +# CHECK: encoding: [0x34,0x40,0x64] + mul.da.ll m1, a3 +# CHECK-INST: mul.da.lh m1, a3 +# CHECK: encoding: [0x34,0x40,0x66] + mul.da.lh m1, a3 +# CHECK-INST: mul.da.hl m1, a3 +# CHECK: encoding: [0x34,0x40,0x65] + mul.da.hl m1, a3 +# CHECK-INST: mul.da.hh m1, a3 +# CHECK: encoding: [0x34,0x40,0x67] + mul.da.hh m1, a3 + +# CHECK-INST: mul.dd.ll m1, m2 +# CHECK: encoding: [0x04,0x40,0x24] + mul.dd.ll m1, m2 +# CHECK-INST: mul.dd.lh m1, m2 +# CHECK: encoding: [0x04,0x40,0x26] + mul.dd.lh m1, m2 +# CHECK-INST: mul.dd.hl m1, m2 +# CHECK: encoding: [0x04,0x40,0x25] + mul.dd.hl m1, m2 +# CHECK-INST: mul.dd.hh m1, m2 +# CHECK: encoding: [0x04,0x40,0x27] + mul.dd.hh m1, m2 + +# CHECK-INST: mula.aa.ll a2, a3 +# CHECK: encoding: [0x34,0x02,0x78] + mula.aa.ll a2, a3 +# CHECK-INST: mula.aa.lh a2, a3 +# CHECK: encoding: [0x34,0x02,0x7a] + mula.aa.lh a2, a3 +# CHECK-INST: mula.aa.hl a2, a3 +# CHECK: encoding: [0x34,0x02,0x79] + mula.aa.hl a2, a3 +# CHECK-INST: mula.aa.hh a2, a3 +# CHECK: encoding: [0x34,0x02,0x7b] + mula.aa.hh a2, a3 + +# CHECK-INST: mula.ad.ll a2, m2 +# CHECK: encoding: [0x04,0x02,0x38] + mula.ad.ll a2, m2 +# CHECK-INST: mula.ad.lh a2, m2 +# CHECK: encoding: [0x04,0x02,0x3a] + mula.ad.lh a2, m2 +# CHECK-INST: mula.ad.hl a2, m2 +# CHECK: encoding: [0x04,0x02,0x39] + mula.ad.hl a2, m2 +# CHECK-INST: mula.ad.hh a2, m2 +# CHECK: encoding: [0x04,0x02,0x3b] + mula.ad.hh a2, m2 + +# CHECK-INST: mula.da.ll m1, a3 +# CHECK: encoding: [0x34,0x40,0x68] + mula.da.ll m1, a3 +# CHECK-INST: mula.da.lh m1, a3 +# CHECK: encoding: [0x34,0x40,0x6a] + mula.da.lh m1, a3 +# CHECK-INST: mula.da.hl m1, a3 +# CHECK: encoding: [0x34,0x40,0x69] + mula.da.hl m1, a3 +# CHECK-INST: mula.da.hh m1, a3 +# CHECK: encoding: [0x34,0x40,0x6b] + mula.da.hh m1, a3 + +# CHECK-INST: mula.dd.ll m1, m2 +# CHECK: encoding: [0x04,0x40,0x28] + mula.dd.ll m1, m2 +# CHECK-INST: mula.dd.lh m1, m2 +# CHECK: encoding: [0x04,0x40,0x2a] + mula.dd.lh m1, m2 +# CHECK-INST: mula.dd.hl m1, m2 +# CHECK: encoding: [0x04,0x40,0x29] + mula.dd.hl m1, m2 +# CHECK-INST: mula.dd.hh m1, m2 +# CHECK: encoding: [0x04,0x40,0x2b] + mula.dd.hh m1, m2 + +# CHECK-INST: muls.aa.ll a2, a3 +# CHECK: encoding: [0x34,0x02,0x7c] + muls.aa.ll a2, a3 +# CHECK-INST: muls.aa.lh a2, a3 +# CHECK: encoding: [0x34,0x02,0x7e] + muls.aa.lh a2, a3 +# CHECK-INST: muls.aa.hl a2, a3 +# CHECK: encoding: [0x34,0x02,0x7d] + muls.aa.hl a2, a3 +# CHECK-INST: muls.aa.hh a2, a3 +# CHECK: encoding: [0x34,0x02,0x7f] + muls.aa.hh a2, a3 + +# CHECK-INST: muls.ad.ll a2, m2 +# CHECK: encoding: [0x04,0x02,0x3c] + muls.ad.ll a2, m2 +# CHECK-INST: muls.ad.lh a2, m2 +# CHECK: encoding: [0x04,0x02,0x3e] + muls.ad.lh a2, m2 +# CHECK-INST: muls.ad.hl a2, m2 +# CHECK: encoding: [0x04,0x02,0x3d] + muls.ad.hl a2, m2 +# CHECK-INST: muls.ad.hh a2, m2 +# CHECK: encoding: [0x04,0x02,0x3f] + muls.ad.hh a2, m2 + +# CHECK-INST: muls.da.ll m1, a3 +# CHECK: encoding: [0x34,0x40,0x6c] + muls.da.ll m1, a3 +# CHECK-INST: muls.da.lh m1, a3 +# CHECK: encoding: [0x34,0x40,0x6e] + muls.da.lh m1, a3 +# CHECK-INST: muls.da.hl m1, a3 +# CHECK: encoding: [0x34,0x40,0x6d] + muls.da.hl m1, a3 +# CHECK-INST: muls.da.hh m1, a3 +# CHECK: encoding: [0x34,0x40,0x6f] + muls.da.hh m1, a3 + +# CHECK-INST: muls.dd.ll m1, m2 +# CHECK: encoding: [0x04,0x40,0x2c] + muls.dd.ll m1, m2 +# CHECK-INST: muls.dd.lh m1, m2 +# CHECK: encoding: [0x04,0x40,0x2e] + muls.dd.lh m1, m2 +# CHECK-INST: muls.dd.hl m1, m2 +# CHECK: encoding: [0x04,0x40,0x2d] + muls.dd.hl m1, m2 +# CHECK-INST: muls.dd.hh m1, m2 +# CHECK: encoding: [0x04,0x40,0x2f] + muls.dd.hh m1, m2 + +# CHECK-INST: mula.da.ll.lddec m1, a8, m0, a3 +# CHECK: encoding: [0x34,0x18,0x58] + mula.da.ll.lddec m1, a8, m0, a3 +# CHECK-INST: mula.da.hl.lddec m1, a8, m0, a3 +# CHECK: encoding: [0x34,0x18,0x59] + mula.da.hl.lddec m1, a8, m0, a3 +# CHECK-INST: mula.da.lh.lddec m1, a8, m0, a3 +# CHECK: encoding: [0x34,0x18,0x5a] + mula.da.lh.lddec m1, a8, m0, a3 +# CHECK-INST: mula.da.hh.lddec m1, a8, m0, a3 +# CHECK: encoding: [0x34,0x18,0x5b] + mula.da.hh.lddec m1, a8, m0, a3 + +# CHECK-INST: mula.dd.ll.lddec m1, a8, m0, m2 +# CHECK: encoding: [0x04,0x18,0x18] + mula.dd.ll.lddec m1, a8, m0, m2 +# CHECK-INST: mula.dd.hl.lddec m1, a8, m0, m2 +# CHECK: encoding: [0x04,0x18,0x19] + mula.dd.hl.lddec m1, a8, m0, m2 +# CHECK-INST: mula.dd.lh.lddec m1, a8, m0, m2 +# CHECK: encoding: [0x04,0x18,0x1a] + mula.dd.lh.lddec m1, a8, m0, m2 +# CHECK-INST: mula.dd.hh.lddec m1, a8, m0, m2 +# CHECK: encoding: [0x04,0x18,0x1b] + mula.dd.hh.lddec m1, a8, m0, m2 + +# CHECK-INST: mula.da.ll.ldinc m1, a8, m0, a3 +# CHECK: encoding: [0x34,0x18,0x48] + mula.da.ll.ldinc m1, a8, m0, a3 +# CHECK-INST: mula.da.hl.ldinc m1, a8, m0, a3 +# CHECK: encoding: [0x34,0x18,0x49] + mula.da.hl.ldinc m1, a8, m0, a3 +# CHECK-INST: mula.da.lh.ldinc m1, a8, m0, a3 +# CHECK: encoding: [0x34,0x18,0x4a] + mula.da.lh.ldinc m1, a8, m0, a3 +# CHECK-INST: mula.da.hh.ldinc m1, a8, m0, a3 +# CHECK: encoding: [0x34,0x18,0x4b] + mula.da.hh.ldinc m1, a8, m0, a3 + +# CHECK-INST: mula.dd.ll.ldinc m1, a8, m0, m2 +# CHECK: encoding: [0x04,0x18,0x08] + mula.dd.ll.ldinc m1, a8, m0, m2 +# CHECK-INST: mula.dd.hl.ldinc m1, a8, m0, m2 +# CHECK: encoding: [0x04,0x18,0x09] + mula.dd.hl.ldinc m1, a8, m0, m2 +# CHECK-INST: mula.dd.lh.ldinc m1, a8, m0, m2 +# CHECK: encoding: [0x04,0x18,0x0a] + mula.dd.lh.ldinc m1, a8, m0, m2 +# CHECK-INST: mula.dd.hh.ldinc m1, a8, m0, m2 +# CHECK: encoding: [0x04,0x18,0x0b] + mula.dd.hh.ldinc m1, a8, m0, m2 + +# CHECK-INST: lddec m0, a8 +# CHECK: encoding: [0x04,0x08,0x90] + lddec m0, a8 +# CHECK-INST: ldinc m0, a8 +# CHECK: encoding: [0x04,0x08,0x80] + ldinc m0, a8 + From 66c182abb79df1c19ff292f8e573bddc238d9a07 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 15:35:56 +0300 Subject: [PATCH 029/289] [Xtensa] Implement Xtensa features and operations. Implement Debug, DFPAccel, S32C1I, THREADPTR, Extended L32R, ATOMCTL, MEMCTL features. --- .../Disassembler/XtensaDisassembler.cpp | 21 ++++++-- .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 1 + llvm/lib/Target/Xtensa/Xtensa.td | 35 +++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 2 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 39 +++++++++++++++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 49 ++++++++++++++++++- llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 7 +++ llvm/lib/Target/Xtensa/XtensaSubtarget.h | 35 +++++++++++++ llvm/test/MC/Xtensa/xtensa-valid-dbg.s | 9 ++++ 9 files changed, 191 insertions(+), 7 deletions(-) create mode 100644 llvm/test/MC/Xtensa/xtensa-valid-dbg.s diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 5abb901f16f63..b13468e583571 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -148,10 +148,19 @@ static DecodeStatus DecodeMR23RegisterClass(MCInst &Inst, uint64_t RegNo, } static const unsigned SRDecoderTable[] = { - Xtensa::LEND, 1, Xtensa::LCOUNT, 2, Xtensa::SAR, 3, - Xtensa::BREG, 4, Xtensa::ACCLO, 16, Xtensa::ACCHI, 17, - Xtensa::M0, 32, Xtensa::M1, 33, Xtensa::M2, 34, - Xtensa::M3, 35, Xtensa ::WINDOWBASE, 72, Xtensa::WINDOWSTART, 73}; + Xtensa::LBEG, 0, Xtensa::LEND, 1, + Xtensa::LCOUNT, 2, Xtensa::SAR, 3, + Xtensa::BREG, 4, Xtensa::LITBASE, 5, + Xtensa::ACCLO, 16, Xtensa::ACCHI, 17, + Xtensa::M0, 32, Xtensa::M1, 33, + Xtensa::M2, 34, Xtensa::M3, 35, + Xtensa::WINDOWBASE, 72, Xtensa::WINDOWSTART, 73, + Xtensa::IBREAKENABLE, 96, Xtensa::MEMCTL, 97, + Xtensa::ATOMCTL, 99, Xtensa::IBREAKA0, 128, + Xtensa::IBREAKA1, 129, Xtensa::DBREAKA0, 144, + Xtensa::DBREAKA1, 145, Xtensa::DBREAKC0, 160, + Xtensa::DBREAKC1, 161, Xtensa::DEBUGCAUSE, 233, + Xtensa::ICOUNT, 236, Xtensa::ICOUNTLEVEL, 237}; static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, @@ -170,7 +179,9 @@ static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, return MCDisassembler::Fail; } -static const unsigned URDecoderTable[] = {Xtensa::FCR, 232, Xtensa::FSR, 233}; +static const unsigned URDecoderTable[] = { + Xtensa::THREADPTR, 231, Xtensa::FCR, 232, Xtensa::FSR, 233, + Xtensa::F64R_LO, 234, Xtensa::F64R_HI, 235, Xtensa::F64S, 236}; static DecodeStatus DecodeURRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index eadcae39255d5..01c7bd4bd3185 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -283,6 +283,7 @@ XtensaMCCodeEmitter::getMemRegEncoding(const MCInst &MI, unsigned OpNo, case Xtensa::L32I_N: case Xtensa::S32F: case Xtensa::L32F: + case Xtensa::S32C1I: if (Res & 0x3) { report_fatal_error("Unexpected operand value!"); } diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 7cd4a2d89c3b5..b793af66ef387 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -72,6 +72,41 @@ def FeatureMAC16 : SubtargetFeature<"mac16", "HasMAC16", "true", def HasMAC16 : Predicate<"Subtarget->hasMAC16()">, AssemblerPredicate<(all_of FeatureMAC16)>; +def FeatureDFPAccel : SubtargetFeature<"dfpaccel", "HasDFPAccel", "true", + "Enable Xtensa Double Precision FP acceleration">; +def HasDFPAccel : Predicate<"Subtarget->hasDFPAccel()">, + AssemblerPredicate<(all_of FeatureDFPAccel)>; + +def FeatureS32C1I : SubtargetFeature<"s32c1i", "HasS32C1I", "true", + "Enable Xtensa S32C1I option">; +def HasS32C1I : Predicate<"Subtarget->hasS32C1I()">, + AssemblerPredicate<(all_of FeatureS32C1I)>; + +def FeatureTHREADPTR : SubtargetFeature<"threadptr", "HasTHREADPTR", "true", + "Enable Xtensa THREADPTR option">; +def HasTHREADPTR : Predicate<"Subtarget->hasTHREADPTR()">, + AssemblerPredicate<(all_of FeatureTHREADPTR)>; + +def FeatureExtendedL32R : SubtargetFeature<"extendedl32r", "HasExtendedL32R", "true", + "Enable Xtensa Extended L32R option">; +def HasExtendedL32R : Predicate<"Subtarget->hasExtendedL32R()">, + AssemblerPredicate<(all_of FeatureExtendedL32R)>; + +def FeatureATOMCTL : SubtargetFeature<"atomctl", "HasATOMCTL", "true", + "Enable Xtensa ATOMCTL option">; +def HasATOMCTL : Predicate<"Subtarget->hasATOMCTL()">, + AssemblerPredicate<(all_of FeatureATOMCTL)>; + +def FeatureMEMCTL : SubtargetFeature<"memctl", "HasMEMCTL", "true", + "Enable Xtensa MEMCTL option">; +def HasMEMCTL : Predicate<"Subtarget->hasMEMCTL()">, + AssemblerPredicate<(all_of FeatureMEMCTL)>; + +def FeatureDebug : SubtargetFeature<"debug", "HasDebug", "true", + "Enable Xtensa Debug option">; +def HasDebug : Predicate<"Subtarget->hasDebug()">, + AssemblerPredicate<(all_of FeatureDebug)>; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 342cf54b02f52..194c5f073c5db 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -290,6 +290,8 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::VACOPY, MVT::Other, Custom); setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction(ISD::TRAP, MVT::Other, Legal); + // Compute derived properties from the register classes computeRegisterProperties(STI.getRegisterInfo()); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 90c4b0eba1221..a590d11a85ca6 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1138,6 +1138,45 @@ def QUOU : ArithLogic_RRR<0x0C, 0x02, "quou", udiv>, Requires<[HasDiv32]>; def REMS : ArithLogic_RRR<0x0F, 0x02, "rems", srem>, Requires<[HasDiv32]>; def REMU : ArithLogic_RRR<0x0E, 0x02, "remu", urem>, Requires<[HasDiv32]>; +//===----------------------------------------------------------------------===// +// S32C1I +//===----------------------------------------------------------------------===// + +let mayStore = 1, mayLoad = 1, Predicates = [HasS32C1I] in { + def S32C1I : RRI8_Inst<0x02, (outs AR:$a), (ins AR:$t, mem32:$addr), + "s32c1i\t$t, $addr", []> { + bits<12> addr; + + let r = 0x0e; + let Uses = [SCOMPARE1]; + let Constraints = "$a = $t"; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } +} + +//===----------------------------------------------------------------------===// +// Debug instructions +//===----------------------------------------------------------------------===// + +let isBarrier = 1, isTerminator = 1 in { + def BREAK : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins uimm4:$s, uimm4:$t), + "break\t$s, $t", []>, Requires<[HasDebug]> { + let r = 0x04; + } + + def BREAK_N : RRRN_Inst<0x0C, (outs), (ins uimm4:$imm), + "break.n\t$imm", []>, Requires<[HasDensity, HasDebug]> { + bits<4> imm; + + let r = 0xf; + let s = imm; + let t = 0x2; + } +} + +def : Pat<(trap), (BREAK (i32 1), (i32 15))>; + //===----------------------------------------------------------------------===// // DSP Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index 4e953f7bfe159..b5d371c2e315a 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -80,6 +80,10 @@ def LCOUNT : SRReg<2, "lcount", ["LCOUNT", "2"]>; def SAR : SRReg<3, "sar", ["SAR","3"]>; def BREG : SRReg<4, "br", ["BR", "4"]>; +def LITBASE : SRReg<5, "litbase", ["LITBASE", "5"]>; + +// Expected data value for S32C1I operation +def SCOMPARE1 : SRReg<12, "scompare1", ["SCOMPARE1", "12"]>; def ACCLO : SRReg<16, "acclo", ["ACCLO", "16"]>; def ACCHI : SRReg<17, "acchi", ["ACCHI", "17"]>; @@ -90,12 +94,46 @@ def M3 : SRReg<35, "m3", ["M3", "35"]>; def WINDOWBASE : SRReg<72, "windowbase", ["WINDOWBASE", "72"]>; def WINDOWSTART : SRReg<73, "windowstart", ["WINDOWSTART", "73"]>; +// Instuction breakpoint enable register +def IBREAKENABLE : SRReg<96, "ibreakenable", ["IBREAKENABLE", "96"]>; + +// Memory Control Register +def MEMCTL : SRReg<97, "memctl", ["MEMCTL", "97"]>; + +def ATOMCTL : SRReg<99, "atomctl", ["ATOMCTL", "99"]>; + +// Instuction break address register 0 +def IBREAKA0 : SRReg<128, "ibreaka0", ["IBREAKA0", "128"]>; + +// Instuction break address register 1 +def IBREAKA1 : SRReg<129, "ibreaka1", ["IBREAKA1", "129"]>; + +// Data break address register 0 +def DBREAKA0 : SRReg<144, "dbreaka0", ["DBREAKA0", "144"]>; + +// Data break address register 1 +def DBREAKA1 : SRReg<145, "dbreaka1", ["DBREAKA1", "145"]>; + +// Data breakpoint control register 0 +def DBREAKC0 : SRReg<160, "dbreakc0", ["DBREAKC0", "160"]>; + +// Data breakpoint control register 1 +def DBREAKC1 : SRReg<161, "dbreakc1", ["DBREAKC1", "161"]>; + +// Cause of last debug exception register +def DEBUGCAUSE : SRReg<233, "debugcause", ["DEBUGCAUSE", "233"]>; + +def ICOUNT : SRReg<236, "icount", ["ICOUNT", "236"]>; +def ICOUNTLEVEL : SRReg<237, "icountlevel", ["ICOUNTLEVEL", "237"]>; + def MR01 : RegisterClass<"Xtensa", [i32], 32, (add M0, M1)>; def MR23 : RegisterClass<"Xtensa", [i32], 32, (add M2, M3)>; def MR : RegisterClass<"Xtensa", [i32], 32, (add MR01, MR23)>; def SR : RegisterClass<"Xtensa", [i32], 32, (add LBEG, LEND, LCOUNT, - SAR, BREG, MR, WINDOWBASE, WINDOWSTART)>; + SAR, BREG, LITBASE, SCOMPARE1, ACCLO, ACCHI, MR, WINDOWBASE, WINDOWSTART, + IBREAKENABLE, MEMCTL, ATOMCTL, IBREAKA0, IBREAKA1, DBREAKA0, DBREAKA1, + DBREAKC0, DBREAKC1, DEBUGCAUSE, ICOUNT, ICOUNTLEVEL)>; //===----------------------------------------------------------------------===// // USER registers @@ -105,10 +143,17 @@ class URReg num, string n, list alt = []> : XtensaReg { let AltNames = alt; } +// Thread Pointer register +def THREADPTR : URReg<231, "threadptr", ["THREADPTR"]>; + def FCR : URReg<232, "fcr", ["FCR"]>; def FSR : URReg<233, "fsr", ["FSR"]>; +def F64R_LO : URReg<234, "f64r_lo", ["F64R_LO"]>; +def F64R_HI : URReg<235, "f64r_hi", ["F64R_HI"]>; +def F64S : URReg<236, "f64s", ["F64S"]>; -def UR : RegisterClass<"Xtensa", [i32], 32, (add FCR, FSR)>; +def UR : RegisterClass<"Xtensa", [i32], 32, (add THREADPTR, FCR, + FSR, F64R_LO, F64R_HI, F64S)>; //===----------------------------------------------------------------------===// // Floating-Point registers diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index d87b6201e1531..91d1c9a1c2ee9 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -41,6 +41,13 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasMul32High = false; HasDiv32 = false; HasMAC16 = false; + HasDFPAccel = false; + HasS32C1I = false; + HasTHREADPTR = false; + HasExtendedL32R = false; + HasATOMCTL = false; + HasMEMCTL = false; + HasDebug = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index 67218ab8b8f3c..a437f8c8104bc 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -69,6 +69,27 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enabled Xtensa MAC16 instructions bool HasMAC16; + // Enable Xtensa Xtensa Double Precision FP acceleration + bool HasDFPAccel; + + // Enable Xtensa S32C1I option + bool HasS32C1I; + + // Enable Xtensa THREADPTR option + bool HasTHREADPTR; + + // Enable Xtensa Extended L32R option + bool HasExtendedL32R; + + // Enable Xtensa ATOMCTL option + bool HasATOMCTL; + + // Enable Xtensa ATOMCTL option + bool HasMEMCTL; + + // Enable Xtensa Debug option + bool HasDebug; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -116,6 +137,20 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasMAC16() const { return HasMAC16; } + bool hasDFPAccel() const { return HasDFPAccel; } + + bool hasS32C1I() const { return HasS32C1I; } + + bool hasTHREADPTR() const { return HasTHREADPTR; } + + bool hasExtendedL32R() const { return HasExtendedL32R; } + + bool hasATOMCTL() const { return HasATOMCTL; } + + bool hasMEMCTL() const { return HasMEMCTL; } + + bool hasDebug() const { return HasDebug; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; diff --git a/llvm/test/MC/Xtensa/xtensa-valid-dbg.s b/llvm/test/MC/Xtensa/xtensa-valid-dbg.s new file mode 100644 index 0000000000000..9391c60e43f69 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-valid-dbg.s @@ -0,0 +1,9 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+debug -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +.align 4 +LBL0: + +# CHECK-INST: break 1, 2 +# CHECK: encoding: [0x20,0x41,0x00] + break 1, 2 From b30f4ab72596bb2217db76fb8ec2e6efca575e69 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 15:40:19 +0300 Subject: [PATCH 030/289] [Xtensa] Implement Xtensa features and operations. Implement Exception, HighPriInterrupts, Coprocessor, Interrupt, RelocatableVector, TimerInt, PRID, RegionProtection and MiscSR features. Implement instructions for Exception, Interrupt and RegionProtection features with tests. --- .../Disassembler/XtensaDisassembler.cpp | 52 +++++++--- llvm/lib/Target/Xtensa/Xtensa.td | 46 +++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 77 +++++++++++++++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 94 ++++++++++++++++++- llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 9 ++ llvm/lib/Target/Xtensa/XtensaSubtarget.h | 45 +++++++++ llvm/test/MC/Xtensa/xtensa-valid-exc.s | 21 +++++ llvm/test/MC/Xtensa/xtensa-valid-int.s | 18 ++++ llvm/test/MC/Xtensa/xtensa-valid-regprotect.s | 14 +++ 9 files changed, 356 insertions(+), 20 deletions(-) create mode 100644 llvm/test/MC/Xtensa/xtensa-valid-exc.s create mode 100644 llvm/test/MC/Xtensa/xtensa-valid-int.s create mode 100644 llvm/test/MC/Xtensa/xtensa-valid-regprotect.s diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index b13468e583571..d4d84b47f96ca 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -148,19 +148,40 @@ static DecodeStatus DecodeMR23RegisterClass(MCInst &Inst, uint64_t RegNo, } static const unsigned SRDecoderTable[] = { - Xtensa::LBEG, 0, Xtensa::LEND, 1, - Xtensa::LCOUNT, 2, Xtensa::SAR, 3, - Xtensa::BREG, 4, Xtensa::LITBASE, 5, - Xtensa::ACCLO, 16, Xtensa::ACCHI, 17, - Xtensa::M0, 32, Xtensa::M1, 33, - Xtensa::M2, 34, Xtensa::M3, 35, - Xtensa::WINDOWBASE, 72, Xtensa::WINDOWSTART, 73, - Xtensa::IBREAKENABLE, 96, Xtensa::MEMCTL, 97, - Xtensa::ATOMCTL, 99, Xtensa::IBREAKA0, 128, - Xtensa::IBREAKA1, 129, Xtensa::DBREAKA0, 144, - Xtensa::DBREAKA1, 145, Xtensa::DBREAKC0, 160, - Xtensa::DBREAKC1, 161, Xtensa::DEBUGCAUSE, 233, - Xtensa::ICOUNT, 236, Xtensa::ICOUNTLEVEL, 237}; + Xtensa::LBEG, 0, Xtensa::LEND, 1, + Xtensa::LCOUNT, 2, Xtensa::SAR, 3, + Xtensa::BREG, 4, Xtensa::LITBASE, 5, + Xtensa::SCOMPARE1, 12, Xtensa::ACCLO, 16, + Xtensa::ACCHI, 17, Xtensa::M0, 32, + Xtensa::M1, 33, Xtensa::M2, 34, + Xtensa::M3, 35, Xtensa::WINDOWBASE, 72, + Xtensa::WINDOWSTART, 73, Xtensa::IBREAKENABLE, 96, + Xtensa::MEMCTL, 97, Xtensa::ATOMCTL, 99, + Xtensa::DDR, 104, Xtensa::IBREAKA0, 128, + Xtensa::IBREAKA1, 129, Xtensa::DBREAKA0, 144, + Xtensa::DBREAKA1, 145, Xtensa::DBREAKC0, 160, + Xtensa::DBREAKC1, 161, Xtensa::CONFIGID0, 176, + Xtensa::EPC1, 177, Xtensa::EPC2, 178, + Xtensa::EPC3, 179, Xtensa::EPC4, 180, + Xtensa::EPC5, 181, Xtensa::EPC6, 182, + Xtensa::EPC7, 183, Xtensa::DEPC, 192, + Xtensa::EPS2, 194, Xtensa::EPS3, 195, + Xtensa::EPS4, 196, Xtensa::EPS5, 197, + Xtensa::EPS6, 198, Xtensa::EPS7, 199, + Xtensa::CONFIGID1, 208, Xtensa::EXCSAVE1, 209, + Xtensa::EXCSAVE2, 210, Xtensa::EXCSAVE3, 211, + Xtensa::EXCSAVE4, 212, Xtensa::EXCSAVE5, 213, + Xtensa::EXCSAVE6, 214, Xtensa::EXCSAVE7, 215, + Xtensa::CPENABLE, 224, Xtensa::INTSET, 226, + Xtensa::INTCLEAR, 227, Xtensa::INTENABLE, 228, + Xtensa::PS, 230, Xtensa::VECBASE, 231, + Xtensa::EXCCAUSE, 232, Xtensa::DEBUGCAUSE, 233, + Xtensa::CCOUNT, 234, Xtensa::PRID, 235, + Xtensa::ICOUNT, 236, Xtensa::ICOUNTLEVEL, 237, + Xtensa::EXCVADDR, 238, Xtensa::CCOMPARE0, 240, + Xtensa::CCOMPARE1, 241, Xtensa::CCOMPARE2, 242, + Xtensa::MISC0, 244, Xtensa::MISC1, 245, + Xtensa::MISC2, 246, Xtensa::MISC3, 247}; static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, @@ -180,8 +201,9 @@ static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, } static const unsigned URDecoderTable[] = { - Xtensa::THREADPTR, 231, Xtensa::FCR, 232, Xtensa::FSR, 233, - Xtensa::F64R_LO, 234, Xtensa::F64R_HI, 235, Xtensa::F64S, 236}; + Xtensa::EXPSTATE, 230, Xtensa::THREADPTR, 231, Xtensa::FCR, 232, + Xtensa::FSR, 233, Xtensa::F64R_LO, 234, Xtensa::F64R_HI, 235, + Xtensa::F64S, 236}; static DecodeStatus DecodeURRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index b793af66ef387..f758dc6cd90fe 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -107,6 +107,52 @@ def FeatureDebug : SubtargetFeature<"debug", "HasDebug", "true", def HasDebug : Predicate<"Subtarget->hasDebug()">, AssemblerPredicate<(all_of FeatureDebug)>; +def FeatureException : SubtargetFeature<"exception", "HasException", "true", + "Enable Xtensa Exception option">; +def HasException : Predicate<"Subtarget->hasException()">, + AssemblerPredicate<(all_of FeatureException)>; + +def FeatureHighPriInterrupts : SubtargetFeature<"highpriinterrupts", + "HasHighPriInterrupts", "true", + "Enable Xtensa HighPriInterrupts option">; +def HasHighPriInterrupts : Predicate<"Subtarget->hasHighPriInterrupts()">, + AssemblerPredicate<(all_of FeatureHighPriInterrupts)>; + +def FeatureCoprocessor : SubtargetFeature<"coprocessor", "HasCoprocessor", "true", + "Enable Xtensa Coprocessor option">; +def HasCoprocessor : Predicate<"Subtarget->hasCoprocessor()">, + AssemblerPredicate<(all_of FeatureCoprocessor)>; + +def FeatureInterrupt : SubtargetFeature<"interrupt", "HasInterrupt", "true", + "Enable Xtensa Interrupt option">; +def HasInterrupt : Predicate<"Subtarget->hasInterrupt()">, + AssemblerPredicate<(all_of FeatureInterrupt)>; + +def FeatureRelocatableVector : SubtargetFeature<"rvector", "HasRelocatableVector", "true", + "Enable Xtensa Relocatable Vector option">; +def HasRelocatableVector : Predicate<"Subtarget->hasRelocatableVector()">, + AssemblerPredicate<(all_of FeatureRelocatableVector)>; + +def FeatureTimerInt : SubtargetFeature<"timerint", "HasTimerInt", "true", + "Enable Xtensa Timer Interrupt option">; +def HasTimerInt : Predicate<"Subtarget->hasTimerInt()">, + AssemblerPredicate<(all_of FeatureTimerInt)>; + +def FeaturePRID : SubtargetFeature<"prid", "HasPRID", "true", + "Enable Xtensa Processor ID option">; +def HasPRID : Predicate<"Subtarget->hasPRID()">, + AssemblerPredicate<(all_of FeaturePRID)>; + +def FeatureRegionProtection : SubtargetFeature<"regprotect", "HasRegionProtection", "true", + "Enable Xtensa Region Protection option">; +def HasRegionProtection : Predicate<"Subtarget->hasRegionProtection()">, + AssemblerPredicate<(all_of FeatureRegionProtection)>; + +def FeatureMiscSR : SubtargetFeature<"miscsr", "HasMiscSR", "true", + "Enable Xtensa Miscellaneous SR option">; +def HasMiscSR : Predicate<"Subtarget->hasMiscSR()">, + AssemblerPredicate<(all_of FeatureMiscSR)>; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index a590d11a85ca6..671c11cb58110 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1177,6 +1177,83 @@ let isBarrier = 1, isTerminator = 1 in { def : Pat<(trap), (BREAK (i32 1), (i32 15))>; +//===----------------------------------------------------------------------===// +// Exception feature instructions +//===----------------------------------------------------------------------===// + +def EXCW : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "excw", []>, Requires<[HasException]> { + let r = 0x2; + let s = 0x0; + let t = 0x8; +} + +def RFDE : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "rfde", []>, Requires<[HasException]> { + let r = 0x3; + let s = 0x2; + let t = 0x0; +} + + +def RFE : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "rfe", []>, Requires<[HasException]> { + let r = 0x3; + let s = 0x0; + let t = 0x0; +} + +def SYSCALL : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "syscall", []>, Requires<[HasException]> { + let r = 0x5; + let s = 0x0; + let t = 0x0; +} + +//===----------------------------------------------------------------------===// +// Interrupt feature instructions +//===----------------------------------------------------------------------===// + +def RSIL : RRR_Inst<0x00, 0x00, 0x00, (outs AR:$t), (ins uimm4:$imm), + "rsil\t$t, $imm", []>, Requires<[HasInterrupt]> { + bits<4> imm; + + let r = 0x6; + let s = imm{3-0}; +} + +def WAITI : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins uimm4:$imm), + "waiti\t$imm", []>, Requires<[HasInterrupt]> { + bits<4> imm; + + let r = 0x7; + let s = imm{3-0}; + let t = 0; +} + +def RFI : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins uimm4:$imm), + "rfi\t$imm", []>, Requires<[HasInterrupt]> { + bits<4> imm; + + let r = 0x3; + let s = imm{3-0}; + let t = 0x1; +} + +//===----------------------------------------------------------------------===// +// Region Protection feature instructions +//===----------------------------------------------------------------------===// + +def WDTLB : RRR_Inst<0x00, 0x00, 0x05, (outs AR:$t), (ins AR:$s), + "wdtlb\t$t, $s", []>, Requires<[HasRegionProtection]> { + let r = 0xE; +} + +def WITLB : RRR_Inst<0x00, 0x00, 0x05, (outs AR:$t), (ins AR:$s), + "witlb\t$t, $s", []>, Requires<[HasRegionProtection]> { + let r = 0x6; +} + //===----------------------------------------------------------------------===// // DSP Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index b5d371c2e315a..2fb153d065aca 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -102,6 +102,8 @@ def MEMCTL : SRReg<97, "memctl", ["MEMCTL", "97"]>; def ATOMCTL : SRReg<99, "atomctl", ["ATOMCTL", "99"]>; +def DDR : SRReg<104, "ddr", ["DDR", "104"]>; + // Instuction break address register 0 def IBREAKA0 : SRReg<128, "ibreaka0", ["IBREAKA0", "128"]>; @@ -120,20 +122,100 @@ def DBREAKC0 : SRReg<160, "dbreakc0", ["DBREAKC0", "160"]>; // Data breakpoint control register 1 def DBREAKC1 : SRReg<161, "dbreakc1", ["DBREAKC1", "161"]>; +def CONFIGID0 : SRReg<176, "configid0", ["CONFIGID0", "176"]>; + +// Exception PC1 +def EPC1 : SRReg<177, "epc1", ["EPC1", "177"]>; + +// Exception PC2 +def EPC2 : SRReg<178, "epc2", ["EPC2", "178"]>; + +// Exception PC3 +def EPC3 : SRReg<179, "epc3", ["EPC3", "179"]>; + +// Exception PC4 +def EPC4 : SRReg<180, "epc4", ["EPC4", "180"]>; + +// Exception PC5 +def EPC5 : SRReg<181, "epc5", ["EPC5", "181"]>; + +// Exception PC6 +def EPC6 : SRReg<182, "epc6", ["EPC6", "182"]>; + +// Exception PC7 +def EPC7 : SRReg<183, "epc7", ["EPC7", "183"]>; + +def DEPC : SRReg<192, "depc", ["DEPC", "192"]>; +def EPS2 : SRReg<194, "eps2", ["EPS2", "194"]>; +def EPS3 : SRReg<195, "eps3", ["EPS3", "195"]>; +def EPS4 : SRReg<196, "eps4", ["EPS4", "196"]>; +def EPS5 : SRReg<197, "eps5", ["EPS5", "197"]>; +def EPS6 : SRReg<198, "eps6", ["EPS6", "198"]>; +def EPS7 : SRReg<199, "eps7", ["EPS7", "199"]>; +def CONFIGID1 : SRReg<208, "configid1", ["CONFIGID1", "208"]>; +def EXCSAVE1 : SRReg<209, "excsave1", ["EXCSAVE1", "209"]>; +def EXCSAVE2 : SRReg<210, "excsave2", ["EXCSAVE2", "210"]>; +def EXCSAVE3 : SRReg<211, "excsave3", ["EXCSAVE3", "211"]>; +def EXCSAVE4 : SRReg<212, "excsave4", ["EXCSAVE4", "212"]>; +def EXCSAVE5 : SRReg<213, "excsave5", ["EXCSAVE5", "213"]>; +def EXCSAVE6 : SRReg<214, "excsave6", ["EXCSAVE6", "214"]>; +def EXCSAVE7 : SRReg<215, "excsave7", ["EXCSAVE7", "215"]>; +def CPENABLE : SRReg<224, "cpenable", ["CPENABLE", "224"]>; + +// Interrupt enable mask register +def INTSET : SRReg<226, "interrupt", ["INTERRUPT", "226"]>; + +def INTCLEAR : SRReg<227, "intclear", ["INTCLEAR", "227"]>; + +def INTENABLE : SRReg<228, "intenable", ["INTENABLE", "228"]>; + +// Processor State +def PS : SRReg<230, "ps", ["PS", "230"]>; + +// Vector base register +def VECBASE : SRReg<231, "vecbase", ["VECBASE", "231"]>; + +def EXCCAUSE : SRReg<232, "exccause", ["EXCCAUSE", "232"]>; + // Cause of last debug exception register def DEBUGCAUSE : SRReg<233, "debugcause", ["DEBUGCAUSE", "233"]>; +// Processor Clock Count Register +def CCOUNT : SRReg<234, "ccount", ["CCOUNT", "234"]>; + +// Processor ID Register +def PRID : SRReg<235, "prid", ["PRID", "235"]>; + def ICOUNT : SRReg<236, "icount", ["ICOUNT", "236"]>; def ICOUNTLEVEL : SRReg<237, "icountlevel", ["ICOUNTLEVEL", "237"]>; +def EXCVADDR : SRReg<238, "excvaddr", ["EXCVADDR", "238"]>; + +// Cycle number to interrupt register 0 +def CCOMPARE0 : SRReg<240, "ccompare0", ["CCOMPARE0", "240"]>; + +// Cycle number to interrupt register 1 +def CCOMPARE1 : SRReg<241, "ccompare1", ["CCOMPARE1", "241"]>; + +// Cycle number to interrupt register 2 +def CCOMPARE2 : SRReg<242, "ccompare2", ["CCOMPARE2", "242"]>; + +def MISC0 : SRReg<244, "misc0", ["MISC0", "244"]>; +def MISC1 : SRReg<245, "misc1", ["MISC1", "245"]>; +def MISC2 : SRReg<246, "misc2", ["MISC2", "246"]>; +def MISC3 : SRReg<247, "misc3", ["MISC3", "247"]>; def MR01 : RegisterClass<"Xtensa", [i32], 32, (add M0, M1)>; def MR23 : RegisterClass<"Xtensa", [i32], 32, (add M2, M3)>; def MR : RegisterClass<"Xtensa", [i32], 32, (add MR01, MR23)>; -def SR : RegisterClass<"Xtensa", [i32], 32, (add LBEG, LEND, LCOUNT, - SAR, BREG, LITBASE, SCOMPARE1, ACCLO, ACCHI, MR, WINDOWBASE, WINDOWSTART, - IBREAKENABLE, MEMCTL, ATOMCTL, IBREAKA0, IBREAKA1, DBREAKA0, DBREAKA1, - DBREAKC0, DBREAKC1, DEBUGCAUSE, ICOUNT, ICOUNTLEVEL)>; +def SR : RegisterClass<"Xtensa", [i32], 32, (add + LBEG, LEND, LCOUNT, SAR, BREG, LITBASE, SCOMPARE1, ACCLO, ACCHI, MR, + WINDOWBASE, WINDOWSTART, IBREAKENABLE, MEMCTL, ATOMCTL, DDR, IBREAKA0, IBREAKA1, + DBREAKA0, DBREAKA1, DBREAKC0, DBREAKC1, CONFIGID0, EPC1, EPC2, EPC3, EPC4, EPC5, + EPC6, EPC7, DEPC, EPS2, EPS3, EPS4, EPS5, EPS6, EPS7, CONFIGID1, EXCSAVE1, EXCSAVE2, + EXCSAVE3, EXCSAVE4, EXCSAVE5, EXCSAVE6, EXCSAVE7, CPENABLE, INTSET, INTCLEAR, INTENABLE, PS, + VECBASE, EXCCAUSE, DEBUGCAUSE, CCOUNT, PRID, ICOUNT, ICOUNTLEVEL, EXCVADDR, CCOMPARE0, + CCOMPARE1, CCOMPARE2, MISC0, MISC1, MISC2, MISC3)>; //===----------------------------------------------------------------------===// // USER registers @@ -143,6 +225,8 @@ class URReg num, string n, list alt = []> : XtensaReg { let AltNames = alt; } +def EXPSTATE : URReg<230, "expstate", ["EXPSTATE"]>; + // Thread Pointer register def THREADPTR : URReg<231, "threadptr", ["THREADPTR"]>; @@ -152,7 +236,7 @@ def F64R_LO : URReg<234, "f64r_lo", ["F64R_LO"]>; def F64R_HI : URReg<235, "f64r_hi", ["F64R_HI"]>; def F64S : URReg<236, "f64s", ["F64S"]>; -def UR : RegisterClass<"Xtensa", [i32], 32, (add THREADPTR, FCR, +def UR : RegisterClass<"Xtensa", [i32], 32, (add EXPSTATE, THREADPTR, FCR, FSR, F64R_LO, F64R_HI, F64S)>; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 91d1c9a1c2ee9..8b1b1098f1668 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -48,6 +48,15 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasATOMCTL = false; HasMEMCTL = false; HasDebug = false; + HasException = false; + HasHighPriInterrupts = false; + HasCoprocessor = false; + HasInterrupt = false; + HasRelocatableVector = false; + HasTimerInt = false; + HasPRID = false; + HasRegionProtection = false; + HasMiscSR = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index a437f8c8104bc..b5f48e547dea7 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -90,6 +90,33 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa Debug option bool HasDebug; + // Enable Xtensa Exceptions option + bool HasException; + + // Enable Xtensa High Priority Interrupt option + bool HasHighPriInterrupts; + + // Enable Xtensa Coprocessor option + bool HasCoprocessor; + + // Enable Xtensa Interrupt option + bool HasInterrupt; + + // Enable Xtensa Relocatable Vector option + bool HasRelocatableVector; + + // Enable Xtensa Timer Interrupt option + bool HasTimerInt; + + // Enable Xtensa Processor ID option + bool HasPRID; + + // Enable Xtensa Region Protection option + bool HasRegionProtection; + + // Enable Xtensa Miscellaneous Special Reigsiters option + bool HasMiscSR; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -151,6 +178,24 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasDebug() const { return HasDebug; } + bool hasException() const { return HasException; } + + bool hasHighPriInterrupts() const { return HasHighPriInterrupts; } + + bool hasCoprocessor() const { return HasCoprocessor; } + + bool hasInterrupt() const { return HasInterrupt; } + + bool hasRelocatableVector() const { return HasRelocatableVector; } + + bool hasTimerInt() const { return HasTimerInt; } + + bool hasPRID() const { return HasPRID; } + + bool hasRegionProtection() const { return HasRegionProtection; } + + bool hasMiscSR() const { return HasMiscSR; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; diff --git a/llvm/test/MC/Xtensa/xtensa-valid-exc.s b/llvm/test/MC/Xtensa/xtensa-valid-exc.s new file mode 100644 index 0000000000000..4d1e9198bd9ad --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-valid-exc.s @@ -0,0 +1,21 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+exception -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +.align 4 +LBL0: + +# CHECK-INST: excw +# CHECK: encoding: [0x80,0x20,0x00] + excw + +# CHECK-INST: rfde +# CHECK: encoding: [0x00,0x32,0x00] + rfde + +# CHECK-INST: rfe +# CHECK: encoding: [0x00,0x30,0x00] + rfe + +# CHECK-INST: syscall +# CHECK: encoding: [0x00,0x50,0x00] + syscall diff --git a/llvm/test/MC/Xtensa/xtensa-valid-int.s b/llvm/test/MC/Xtensa/xtensa-valid-int.s new file mode 100644 index 0000000000000..a24191ef4aa5a --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-valid-int.s @@ -0,0 +1,18 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+interrupt -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + + +.align 4 +LBL0: + +# CHECK-INST: rfi 1 +# CHECK: encoding: [0x10,0x31,0x00] + rfi 1 + +# CHECK-INST: rsil a3, 1 +# CHECK: encoding: [0x30,0x61,0x00] + rsil a3, 1 + +# CHECK-INST: waiti 1 +# CHECK: encoding: [0x00,0x71,0x00] + waiti 1 \ No newline at end of file diff --git a/llvm/test/MC/Xtensa/xtensa-valid-regprotect.s b/llvm/test/MC/Xtensa/xtensa-valid-regprotect.s new file mode 100644 index 0000000000000..b3504eef1d557 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-valid-regprotect.s @@ -0,0 +1,14 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+regprotect -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + + +.align 4 +LBL0: + +# CHECK-INST: wdtlb a3, a4 +# CHECK: encoding: [0x30,0xe4,0x50] + wdtlb a3, a4 + +# CHECK-INST: witlb a3, a4 +# CHECK: encoding: [0x30,0x64,0x50] + witlb a3, a4 From 967c0d08d85553ded8bc5679d808eaa9f3a220de Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 16:18:16 +0300 Subject: [PATCH 031/289] [Xtensa] Add the Xtensa target. --- clang/include/clang/Basic/TargetInfo.h | 5 +- clang/lib/AST/ASTContext.cpp | 47 +++++++++ clang/lib/Basic/CMakeLists.txt | 1 + clang/lib/Basic/Targets.cpp | 4 + clang/lib/Basic/Targets/Xtensa.cpp | 29 ++++++ clang/lib/Basic/Targets/Xtensa.h | 108 +++++++++++++++++++++ clang/lib/Driver/ToolChains/CommonArgs.cpp | 5 + clang/lib/Driver/ToolChains/Gnu.cpp | 7 ++ 8 files changed, 205 insertions(+), 1 deletion(-) create mode 100644 clang/lib/Basic/Targets/Xtensa.cpp create mode 100644 clang/lib/Basic/Targets/Xtensa.h diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h index a58fb5f979272..41805070e5f41 100644 --- a/clang/include/clang/Basic/TargetInfo.h +++ b/clang/include/clang/Basic/TargetInfo.h @@ -359,7 +359,10 @@ class TargetInfo : public TransferrableTargetInfo, // void *__saved_reg_area_end_pointer; // void *__overflow_area_pointer; //} va_list; - HexagonBuiltinVaList + HexagonBuiltinVaList, + + // Tensilica Xtensa + XtensaABIBuiltinVaList }; protected: diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index 1064507f34616..501a9ca5d0519 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -9542,6 +9542,51 @@ static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } +static TypedefDecl * +CreateXtensaABIBuiltinVaListDecl(const ASTContext *Context) { + // typedef struct __va_list_tag { + RecordDecl *VaListTagDecl; + + VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); + VaListTagDecl->startDefinition(); + + const size_t NumFields = 3; + QualType FieldTypes[NumFields]; + const char *FieldNames[NumFields]; + + // int* __va_stk; + FieldTypes[0] = Context->getPointerType(Context->IntTy); + FieldNames[0] = "__va_stk"; + + // int* __va_reg; + FieldTypes[1] = Context->getPointerType(Context->IntTy); + FieldNames[1] = "__va_reg"; + + // int __va_ndx; + FieldTypes[2] = Context->IntTy; + FieldNames[2] = "__va_ndx"; + + // Create fields + for (unsigned i = 0; i < NumFields; ++i) { + FieldDecl *Field = FieldDecl::Create( + *Context, VaListTagDecl, SourceLocation(), SourceLocation(), + &Context->Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, + /*Mutable=*/false, ICIS_NoInit); + Field->setAccess(AS_public); + VaListTagDecl->addDecl(Field); + } + VaListTagDecl->completeDefinition(); + Context->VaListTagDecl = VaListTagDecl; + QualType VaListTagType = Context->getRecordType(VaListTagDecl); + + // } __va_list_tag; + TypedefDecl *VaListTagTypedefDecl = + Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); + + return VaListTagTypedefDecl; +} + static TypedefDecl *CreateVaListDecl(const ASTContext *Context, TargetInfo::BuiltinVaListKind Kind) { switch (Kind) { @@ -9563,6 +9608,8 @@ static TypedefDecl *CreateVaListDecl(const ASTContext *Context, return CreateSystemZBuiltinVaListDecl(Context); case TargetInfo::HexagonBuiltinVaList: return CreateHexagonBuiltinVaListDecl(Context); + case TargetInfo::XtensaABIBuiltinVaList: + return CreateXtensaABIBuiltinVaListDecl(Context); } llvm_unreachable("Unhandled __builtin_va_list type kind"); diff --git a/clang/lib/Basic/CMakeLists.txt b/clang/lib/Basic/CMakeLists.txt index f30680552e0f5..79bf06b0c9d95 100644 --- a/clang/lib/Basic/CMakeLists.txt +++ b/clang/lib/Basic/CMakeLists.txt @@ -120,6 +120,7 @@ add_clang_library(clangBasic Targets/WebAssembly.cpp Targets/X86.cpp Targets/XCore.cpp + Targets/Xtensa.cpp TokenKinds.cpp TypeTraits.cpp Version.cpp diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp index 29133f9ee8fce..7702bc27a6ba1 100644 --- a/clang/lib/Basic/Targets.cpp +++ b/clang/lib/Basic/Targets.cpp @@ -41,6 +41,7 @@ #include "Targets/WebAssembly.h" #include "Targets/X86.h" #include "Targets/XCore.h" +#include "Targets/Xtensa.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/DiagnosticFrontend.h" #include "llvm/ADT/StringExtras.h" @@ -752,6 +753,9 @@ std::unique_ptr AllocateTarget(const llvm::Triple &Triple, default: return std::make_unique(Triple, Opts); } + + case llvm::Triple::xtensa : + return std::make_unique(Triple, Opts); } } } // namespace targets diff --git a/clang/lib/Basic/Targets/Xtensa.cpp b/clang/lib/Basic/Targets/Xtensa.cpp new file mode 100644 index 0000000000000..270af0a05cfdc --- /dev/null +++ b/clang/lib/Basic/Targets/Xtensa.cpp @@ -0,0 +1,29 @@ +//===--- Xtensa.cpp - Implement Xtensa target feature support -------------===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements Xtensa TargetInfo objects. +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/MacroBuilder.h" +#include "clang/Basic/TargetBuiltins.h" + +using namespace clang; +using namespace clang::targets; + +void XtensaTargetInfo::getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const { + Builder.defineMacro("__Xtensa__"); + Builder.defineMacro("__xtensa__"); + Builder.defineMacro("__XTENSA__"); + Builder.defineMacro("__XTENSA_EL__"); +} diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h new file mode 100644 index 0000000000000..6a3f5441fc61a --- /dev/null +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -0,0 +1,108 @@ +//===--- Xtensa.h - Declare Xtensa target feature support -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares Xtensa TargetInfo objects. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_XTENSA_H +#define LLVM_CLANG_LIB_BASIC_TARGETS_XTENSA_H + +#include "clang/Basic/TargetInfo.h" +#include "clang/Basic/TargetOptions.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/TargetParser/Triple.h" +#include "llvm/Support/Compiler.h" + +#include "clang/Basic/Builtins.h" +#include "clang/Basic/MacroBuilder.h" +#include "clang/Basic/TargetBuiltins.h" + +namespace clang { +namespace targets { + +class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { + static const Builtin::Info BuiltinInfo[]; + std::string CPU; + +public: + XtensaTargetInfo(const llvm::Triple &Triple, const TargetOptions &) + : TargetInfo(Triple) { + BigEndian = false; + NoAsmVariants = true; + LongLongAlign = 64; + SuitableAlign = 32; + DoubleAlign = LongDoubleAlign = 64; + SizeType = UnsignedInt; + PtrDiffType = SignedInt; + IntPtrType = SignedInt; + WCharType = UnsignedChar; + WIntType = UnsignedInt; + UseZeroLengthBitfieldAlignment = true; + MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32; + resetDataLayout("e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32"); + } + + void getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const override; + + ArrayRef getTargetBuiltins() const override { + return std::nullopt; + } + + BuiltinVaListKind getBuiltinVaListKind() const override { + + return TargetInfo::XtensaABIBuiltinVaList; + } + + std::string_view getClobbers() const override { return ""; } + + ArrayRef getGCCRegNames() const override { + static const char *const GCCRegNames[] = { + // General register name + "a0", "sp", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8", "a9", "a10", + "a11", "a12", "a13", "a14", "a15", + // Special register name + "sar"}; + return llvm::ArrayRef(GCCRegNames); + } + + ArrayRef getGCCRegAliases() const override { + return std::nullopt; + } + + bool validateAsmConstraint(const char *&Name, + TargetInfo::ConstraintInfo &Info) const override { + switch (*Name) { + default: + return false; + case 'a': + Info.setAllowsRegister(); + return true; + } + return false; + } + + int getEHDataRegisterNumber(unsigned RegNo) const override { + return (RegNo < 2) ? RegNo : -1; + } + + bool isValidCPUName(StringRef Name) const override { + return llvm::StringSwitch(Name).Case("generic", true).Default(false); + } + + bool setCPU(const std::string &Name) override { + CPU = Name; + return isValidCPUName(Name); + } +}; +} // namespace targets +} // namespace clang +#endif // LLVM_CLANG_LIB_BASIC_TARGETS_XTENSA_H diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 019df16a909f4..3de3b30995d8a 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -667,6 +667,11 @@ std::string tools::getCPUName(const Driver &D, const ArgList &Args, case llvm::Triple::loongarch32: case llvm::Triple::loongarch64: return loongarch::getLoongArchTargetCPU(Args, T); + + case llvm::Triple::xtensa: + if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) + return A->getValue(); + return ""; } } diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index 543f3965dfd4f..3961c8e7e35de 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -2576,6 +2576,9 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu", "s390x-suse-linux", "s390x-redhat-linux"}; + static const char *const XtensaLibDirs[] = {"/lib"}; + static const char *const XtensaTriples[] = {"xtensa-unknown-elf"}; + using std::begin; using std::end; @@ -2847,6 +2850,10 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( LibDirs.append(begin(SystemZLibDirs), end(SystemZLibDirs)); TripleAliases.append(begin(SystemZTriples), end(SystemZTriples)); break; + case llvm::Triple::xtensa: + LibDirs.append(begin(XtensaLibDirs), end(XtensaLibDirs)); + TripleAliases.append(begin(XtensaTriples), end(XtensaTriples)); + break; default: // By default, just rely on the standard lib directories and the original // triple. From b72a9de6108f5cd24677de49097384e0db4a206d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 17:14:19 +0300 Subject: [PATCH 032/289] [Xtensa] Implement Xtensa ABI lowering. --- clang/lib/CodeGen/CMakeLists.txt | 1 + clang/lib/CodeGen/CodeGenModule.cpp | 2 + clang/lib/CodeGen/TargetInfo.h | 3 + clang/lib/CodeGen/Targets/Xtensa.cpp | 241 +++++++++++++++++++++++++++ 4 files changed, 247 insertions(+) create mode 100644 clang/lib/CodeGen/Targets/Xtensa.cpp diff --git a/clang/lib/CodeGen/CMakeLists.txt b/clang/lib/CodeGen/CMakeLists.txt index 2a179deddcc31..20549ce0c9499 100644 --- a/clang/lib/CodeGen/CMakeLists.txt +++ b/clang/lib/CodeGen/CMakeLists.txt @@ -140,6 +140,7 @@ add_clang_library(clangCodeGen Targets/WebAssembly.cpp Targets/X86.cpp Targets/XCore.cpp + Targets/Xtensa.cpp VarBypassDetector.cpp DEPENDS diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index cf5e29e5a3db8..0f20b4c2dc894 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -324,6 +324,8 @@ createTargetCodeGenInfo(CodeGenModule &CGM) { return createLoongArchTargetCodeGenInfo( CGM, Target.getPointerWidth(LangAS::Default), ABIFRLen); } + case llvm::Triple::xtensa: + return createXtensaTargetCodeGenInfo(CGM); } } diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h index 156b4ff4353be..56713bb077118 100644 --- a/clang/lib/CodeGen/TargetInfo.h +++ b/clang/lib/CodeGen/TargetInfo.h @@ -579,6 +579,9 @@ createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel); std::unique_ptr createXCoreTargetCodeGenInfo(CodeGenModule &CGM); +std::unique_ptr +createXtensaTargetCodeGenInfo(CodeGenModule &CGM); + } // namespace CodeGen } // namespace clang diff --git a/clang/lib/CodeGen/Targets/Xtensa.cpp b/clang/lib/CodeGen/Targets/Xtensa.cpp new file mode 100644 index 0000000000000..37be0962e97ca --- /dev/null +++ b/clang/lib/CodeGen/Targets/Xtensa.cpp @@ -0,0 +1,241 @@ +//===- Xtensa.cpp ---------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "ABIInfoImpl.h" +#include "TargetInfo.h" + +using namespace clang; +using namespace clang::CodeGen; +//===----------------------------------------------------------------------===// +// Xtensa ABI Implementation +//===----------------------------------------------------------------------===// + +namespace { +class XtensaABIInfo : public DefaultABIInfo { +private: + static const int MaxNumArgGPRs = 6; + static const int MaxNumRetGPRs = 4; + +public: + XtensaABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} + + // DefaultABIInfo's classifyReturnType and classifyArgumentType are + // non-virtual, but computeInfo is virtual, so we overload it. + void computeInfo(CGFunctionInfo &FI) const override; + + ABIArgInfo classifyArgumentType(QualType Ty, int &ArgGPRsLeft) const; + + ABIArgInfo classifyReturnType(QualType RetTy) const; + + RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, + AggValueSlot Slot) const override; + + ABIArgInfo extendType(QualType Ty) const; +}; +} // end anonymous namespace + +void XtensaABIInfo::computeInfo(CGFunctionInfo &FI) const { + QualType RetTy = FI.getReturnType(); + if (!getCXXABI().classifyReturnType(FI)) + FI.getReturnInfo() = classifyReturnType(RetTy); + + int ArgGPRsLeft = MaxNumArgGPRs; + for (auto &ArgInfo : FI.arguments()) { + ArgInfo.info = classifyArgumentType(ArgInfo.type, ArgGPRsLeft); + } +} + +ABIArgInfo XtensaABIInfo::classifyArgumentType(QualType Ty, + int &ArgGPRsLeft) const { + assert(ArgGPRsLeft <= MaxNumArgGPRs && "Arg GPR tracking underflow"); + Ty = useFirstFieldIfTransparentUnion(Ty); + // Structures with either a non-trivial destructor or a non-trivial + // copy constructor are always passed indirectly. + if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { + if (ArgGPRsLeft) + ArgGPRsLeft -= 1; + return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == + CGCXXABI::RAA_DirectInMemory); + } + + // Ignore empty structs/unions. + if (isEmptyRecord(getContext(), Ty, true)) + return ABIArgInfo::getIgnore(); + + uint64_t Size = getContext().getTypeSize(Ty); + uint64_t NeededAlign = getContext().getTypeAlign(Ty); + bool MustUseStack = false; + int NeededArgGPRs = (Size + 31) / 32; + + if (NeededAlign == (2 * 32)) + NeededArgGPRs += (ArgGPRsLeft % 2); + + // Put on stack objects which are not fit to 6 registers, + // also on stack object which alignment more then 16 bytes and + // object with 16-byte alignment if it isn't the first argument. + if ((NeededArgGPRs > ArgGPRsLeft) || (NeededAlign > (4 * 32)) || + ((ArgGPRsLeft < 6) && (NeededAlign == (4 * 32)))) { + MustUseStack = true; + NeededArgGPRs = ArgGPRsLeft; + } + ArgGPRsLeft -= NeededArgGPRs; + + if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType() && !MustUseStack) { + // Treat an enum type as its underlying type. + if (const EnumType *EnumTy = Ty->getAs()) + Ty = EnumTy->getDecl()->getIntegerType(); + // All integral types are promoted to XLen width, unless passed on the + // stack. + if (Size < 32 && Ty->isIntegralOrEnumerationType() && !MustUseStack) { + return extendType(Ty); + } + if (Size == 64) + return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); + return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 32)); + } + + // Aggregates which are <= 6*32 will be passed in registers if possible, + // so coerce to integers. + if ((Size <= (MaxNumArgGPRs * 32)) && (!MustUseStack)) { + if (Size <= 32) { + return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 32)); + } else if (NeededAlign == (2 * 32)) { + return ABIArgInfo::getDirect(llvm::ArrayType::get( + llvm::IntegerType::get(getVMContext(), 64), NeededArgGPRs / 2)); + } else { + return ABIArgInfo::getDirect(llvm::ArrayType::get( + llvm::IntegerType::get(getVMContext(), 32), NeededArgGPRs)); + } + } +#undef MAX_STRUCT_IN_REGS_SIZE + return getNaturalAlignIndirect(Ty, /*ByVal=*/true); +} + +ABIArgInfo XtensaABIInfo::classifyReturnType(QualType RetTy) const { + if (RetTy->isVoidType()) + return ABIArgInfo::getIgnore(); + int ArgGPRsLeft = MaxNumRetGPRs; + // The rules for return and argument types are the same, so defer to + // classifyArgumentType. + return classifyArgumentType(RetTy, ArgGPRsLeft); +} + +RValue XtensaABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, + QualType Ty, AggValueSlot Slot) const { + // The va_list structure memory layout: + // struct __va_list_tag { + // int32_t *va_stk; + // int32_t *va_reg; + // int32_t va_ndx; + // }; + CGBuilderTy &Builder = CGF.Builder; + + Address OverflowAreaPtr = Builder.CreateStructGEP(VAListAddr, 0, "__va_stk"); + Address OverflowArea = Address(Builder.CreateLoad(OverflowAreaPtr, ""), + CGF.Int32Ty, CharUnits::fromQuantity(4)); + Address RegSaveAreaPtr = Builder.CreateStructGEP(VAListAddr, 1, "__va_reg"); + Address RegSaveArea = Address(Builder.CreateLoad(RegSaveAreaPtr, ""), + CGF.Int32Ty, CharUnits::fromQuantity(4)); + Address ARAreaPtr = Builder.CreateStructGEP(VAListAddr, 2, "__va_ndx"); + llvm::Value *ARIndex = Builder.CreateLoad(ARAreaPtr, ""); + + ARIndex = Builder.CreateLShr(ARIndex, Builder.getInt32(2)); + + unsigned Align = getContext().getTypeAlign(Ty) / 32; + unsigned Size = (getContext().getTypeSize(Ty) + 31) / 32; + + if (Align > 1) { + ARIndex = Builder.CreateAdd(ARIndex, Builder.getInt32(Align - 1)); + ARIndex = + Builder.CreateAnd(ARIndex, Builder.getInt32((uint32_t) ~(Align - 1))); + } + + llvm::Value *ARIndexNext = Builder.CreateAdd(ARIndex, Builder.getInt32(Size)); + Builder.CreateStore(Builder.CreateShl(ARIndexNext, Builder.getInt32(2)), + ARAreaPtr); + + const unsigned OverflowLimit = 6; + llvm::Value *CC = Builder.CreateICmpULE( + ARIndexNext, Builder.getInt32(OverflowLimit), "cond"); + + llvm::BasicBlock *UsingRegSaveArea = + CGF.createBasicBlock("using_regsavearea"); + llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); + llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); + + Builder.CreateCondBr(CC, UsingRegSaveArea, UsingOverflow); + + llvm::Type *DirectTy = CGF.ConvertType(Ty); + + // Case 1: consume registers. + Address RegAddr = Address::invalid(); + { + CGF.EmitBlock(UsingRegSaveArea); + + CharUnits RegSize = CharUnits::fromQuantity(4); + RegSaveArea = + Address(Builder.CreateInBoundsGEP(CGF.Int32Ty, RegSaveArea.emitRawPointer(CGF), + ARIndex), + CGF.Int32Ty, RegSaveArea.getAlignment().alignmentOfArrayElement(RegSize)); + RegAddr = RegSaveArea.withElementType(DirectTy); + CGF.EmitBranch(Cont); + } + + // Case 2: consume space in the overflow area. + Address MemAddr = Address::invalid(); + { + CGF.EmitBlock(UsingOverflow); + llvm::Value *CC1 = Builder.CreateICmpULE( + ARIndex, Builder.getInt32(OverflowLimit), "cond_overflow"); + + llvm::Value *ARIndexOff = Builder.CreateSelect( + CC1, Builder.CreateSub(Builder.getInt32(8), ARIndex), + Builder.getInt32(0)); + + llvm::Value *ARIndexCorr = Builder.CreateAdd(ARIndex, ARIndexOff); + llvm::Value *ARIndexNextCorr = Builder.CreateAdd(ARIndexNext, ARIndexOff); + Builder.CreateStore(Builder.CreateShl(ARIndexNextCorr, Builder.getInt32(2)), + ARAreaPtr); + + CharUnits RegSize = CharUnits::fromQuantity(4); + OverflowArea = + Address(Builder.CreateInBoundsGEP( + CGF.Int32Ty, OverflowArea.emitRawPointer(CGF), ARIndexCorr), + CGF.Int32Ty, OverflowArea.getAlignment().alignmentOfArrayElement(RegSize)); + MemAddr = OverflowArea.withElementType(DirectTy); + CGF.EmitBranch(Cont); + } + + CGF.EmitBlock(Cont); + + // Merge the cases with a phi. + Address Result = + emitMergePHI(CGF, RegAddr, UsingRegSaveArea, MemAddr, UsingOverflow, ""); + + return CGF.EmitLoadOfAnyValue(CGF.MakeAddrLValue(Result, Ty), Slot); +} + +ABIArgInfo XtensaABIInfo::extendType(QualType Ty) const { + return ABIArgInfo::getExtend(Ty); +} + +namespace { +class XtensaTargetCodeGenInfo : public TargetCodeGenInfo { +public: + XtensaTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) + : TargetCodeGenInfo(std::make_unique(CGT)) {} +}; +} // namespace + + +std::unique_ptr +CodeGen::createXtensaTargetCodeGenInfo(CodeGenModule &CGM) { + return std::make_unique(CGM.getTypes()); +} + + From 78cec6fc0faa4143de191832f44bec7b36afccc8 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 17:17:40 +0300 Subject: [PATCH 033/289] [Xtensa] Add subtargets ESP32. ESP8266 and ESP32-S2. Make ESP32 default subtarget. --- llvm/lib/Target/Xtensa/Xtensa.td | 12 ++++++++++++ llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 2 +- llvm/test/CodeGen/Xtensa/blockaddress.ll | 2 +- llvm/test/CodeGen/Xtensa/brcc.ll | 2 +- llvm/test/CodeGen/Xtensa/bswap.ll | 2 +- llvm/test/CodeGen/Xtensa/call.ll | 2 +- llvm/test/CodeGen/Xtensa/calling-conv.ll | 2 +- llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll | 2 +- llvm/test/CodeGen/Xtensa/div.ll | 2 +- llvm/test/CodeGen/Xtensa/dynamic-alloc.ll | 2 +- llvm/test/CodeGen/Xtensa/indirectbr.ll | 2 +- llvm/test/CodeGen/Xtensa/jumpt.ll | 2 +- llvm/test/CodeGen/Xtensa/mul.ll | 2 +- llvm/test/CodeGen/Xtensa/rotl-rotr.ll | 2 +- llvm/test/CodeGen/Xtensa/saverestore.ll | 2 +- llvm/test/CodeGen/Xtensa/select-cc.ll | 2 +- llvm/test/CodeGen/Xtensa/setcc.ll | 2 +- llvm/test/CodeGen/Xtensa/shift.ll | 2 +- llvm/test/CodeGen/Xtensa/stack-access.ll | 2 +- 19 files changed, 30 insertions(+), 18 deletions(-) diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index f758dc6cd90fe..3d4c21f45af30 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -161,6 +161,18 @@ class Proc Features> def : Proc<"generic", []>; +def : Proc<"esp32", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, + FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, + FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, + FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR]>; + +def : Proc<"esp8266", [FeatureDensity, FeatureNSA, FeatureMul32, FeatureExtendedL32R, FeatureDebug, FeatureException, FeatureHighPriInterrupts, + FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeatureRegionProtection, FeaturePRID]>; + +def : Proc<"esp32-s2", [FeatureDensity, FeatureWindowed, FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureTHREADPTR, FeatureDiv32, + FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, + FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR]>; + //===----------------------------------------------------------------------===// // Register File Description //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 8b1b1098f1668..2856860756757 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -27,7 +27,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { StringRef CPUName = CPU; if (CPUName.empty()) { // set default cpu name - CPUName = "generic"; + CPUName = "esp32"; } HasDensity = false; diff --git a/llvm/test/CodeGen/Xtensa/blockaddress.ll b/llvm/test/CodeGen/Xtensa/blockaddress.ll index debcdbc049330..4fd6de288e267 100644 --- a/llvm/test/CodeGen/Xtensa/blockaddress.ll +++ b/llvm/test/CodeGen/Xtensa/blockaddress.ll @@ -1,4 +1,4 @@ -; RUN: llc --mtriple=xtensa < %s | FileCheck %s +; RUN: llc --mtriple=xtensa --mcpu=generic < %s | FileCheck %s @addr = global ptr null diff --git a/llvm/test/CodeGen/Xtensa/brcc.ll b/llvm/test/CodeGen/Xtensa/brcc.ll index 6d542f637cf65..b869e9a23d9b1 100644 --- a/llvm/test/CodeGen/Xtensa/brcc.ll +++ b/llvm/test/CodeGen/Xtensa/brcc.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=xtensa -disable-block-placement -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -disable-block-placement -verify-machineinstrs < %s \ ; RUN: | FileCheck %s define i32 @brcc_sgt(i32 %a, i32 %b) nounwind { diff --git a/llvm/test/CodeGen/Xtensa/bswap.ll b/llvm/test/CodeGen/Xtensa/bswap.ll index 6a87aa84351cf..cf3718173d9e7 100644 --- a/llvm/test/CodeGen/Xtensa/bswap.ll +++ b/llvm/test/CodeGen/Xtensa/bswap.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s declare i16 @llvm.bswap.i16(i16) diff --git a/llvm/test/CodeGen/Xtensa/call.ll b/llvm/test/CodeGen/Xtensa/call.ll index 921f89a81b2e4..1326eff629682 100644 --- a/llvm/test/CodeGen/Xtensa/call.ll +++ b/llvm/test/CodeGen/Xtensa/call.ll @@ -1,4 +1,4 @@ -; RUN: llc --mtriple=xtensa < %s | FileCheck %s +; RUN: llc --mtriple=xtensa --mcpu=generic < %s | FileCheck %s declare i32 @external_function(i32) diff --git a/llvm/test/CodeGen/Xtensa/calling-conv.ll b/llvm/test/CodeGen/Xtensa/calling-conv.ll index 41ae4220145c2..684fc80a7e8a5 100644 --- a/llvm/test/CodeGen/Xtensa/calling-conv.ll +++ b/llvm/test/CodeGen/Xtensa/calling-conv.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=xtensa -O1 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -O1 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=XTENSA ; Check placement of first 6 arguments in registers and 7th argument on stack diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll index 6030323538625..50d3414b05535 100644 --- a/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz-ctpop.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s declare i32 @llvm.cttz.i32(i32, i1) diff --git a/llvm/test/CodeGen/Xtensa/div.ll b/llvm/test/CodeGen/Xtensa/div.ll index e10e976fb1b38..5583906dd560f 100644 --- a/llvm/test/CodeGen/Xtensa/div.ll +++ b/llvm/test/CodeGen/Xtensa/div.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s define i32 @udiv(i32 %a, i32 %b) nounwind { diff --git a/llvm/test/CodeGen/Xtensa/dynamic-alloc.ll b/llvm/test/CodeGen/Xtensa/dynamic-alloc.ll index 2b28f3559d775..1e1d11615cf9d 100644 --- a/llvm/test/CodeGen/Xtensa/dynamic-alloc.ll +++ b/llvm/test/CodeGen/Xtensa/dynamic-alloc.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=xtensa -disable-block-placement -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -disable-block-placement -verify-machineinstrs < %s \ ; RUN: | FileCheck %s define ptr @test_simple_alloca(i32 %numelts) { diff --git a/llvm/test/CodeGen/Xtensa/indirectbr.ll b/llvm/test/CodeGen/Xtensa/indirectbr.ll index c4181c28826f4..ce89ef8feaa15 100644 --- a/llvm/test/CodeGen/Xtensa/indirectbr.ll +++ b/llvm/test/CodeGen/Xtensa/indirectbr.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s define i32 @indirectbr(i8* %target) nounwind { diff --git a/llvm/test/CodeGen/Xtensa/jumpt.ll b/llvm/test/CodeGen/Xtensa/jumpt.ll index 66c2fc39e3952..1c4a5bf265f03 100644 --- a/llvm/test/CodeGen/Xtensa/jumpt.ll +++ b/llvm/test/CodeGen/Xtensa/jumpt.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -verify-machineinstrs < %s \ ; RUN: | FileCheck %s define void @switch_4_xtensa(i32 %in, ptr %out) nounwind { diff --git a/llvm/test/CodeGen/Xtensa/mul.ll b/llvm/test/CodeGen/Xtensa/mul.ll index 9b13897293dc1..7aaa2e1d00af4 100644 --- a/llvm/test/CodeGen/Xtensa/mul.ll +++ b/llvm/test/CodeGen/Xtensa/mul.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s define signext i32 @square(i32 %a) nounwind { diff --git a/llvm/test/CodeGen/Xtensa/rotl-rotr.ll b/llvm/test/CodeGen/Xtensa/rotl-rotr.ll index 350315e9aefda..9b704e0ac2177 100644 --- a/llvm/test/CodeGen/Xtensa/rotl-rotr.ll +++ b/llvm/test/CodeGen/Xtensa/rotl-rotr.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s define i32 @rotl_32(i32 %x, i32 %y) nounwind { diff --git a/llvm/test/CodeGen/Xtensa/saverestore.ll b/llvm/test/CodeGen/Xtensa/saverestore.ll index 69c8b16ab601d..669b639be92ee 100644 --- a/llvm/test/CodeGen/Xtensa/saverestore.ll +++ b/llvm/test/CodeGen/Xtensa/saverestore.ll @@ -1,4 +1,4 @@ -; RUN: llc --mtriple=xtensa < %s | FileCheck %s +; RUN: llc --mtriple=xtensa --mcpu=generic < %s | FileCheck %s declare ptr @llvm.stacksave() diff --git a/llvm/test/CodeGen/Xtensa/select-cc.ll b/llvm/test/CodeGen/Xtensa/select-cc.ll index c86aa9f33ca36..31fb11d6855df 100644 --- a/llvm/test/CodeGen/Xtensa/select-cc.ll +++ b/llvm/test/CodeGen/Xtensa/select-cc.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=xtensa -disable-block-placement -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -disable-block-placement -verify-machineinstrs < %s \ ; RUN: | FileCheck %s define i32 @f_eq(i32 %a, ptr %b) nounwind { diff --git a/llvm/test/CodeGen/Xtensa/setcc.ll b/llvm/test/CodeGen/Xtensa/setcc.ll index 05eb80e041fbe..a335b54d2658e 100644 --- a/llvm/test/CodeGen/Xtensa/setcc.ll +++ b/llvm/test/CodeGen/Xtensa/setcc.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=xtensa -O0 | FileCheck %s +; RUN: llc < %s -mtriple=xtensa --mcpu=generic -O0 | FileCheck %s define i32 @f_eq(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: f_eq: diff --git a/llvm/test/CodeGen/Xtensa/shift.ll b/llvm/test/CodeGen/Xtensa/shift.ll index 729b66b12ab20..b48fc52a4387a 100644 --- a/llvm/test/CodeGen/Xtensa/shift.ll +++ b/llvm/test/CodeGen/Xtensa/shift.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -verify-machineinstrs < %s \ ; RUN: | FileCheck %s define i32 @lshl(i32 %x, i32 %y) nounwind { diff --git a/llvm/test/CodeGen/Xtensa/stack-access.ll b/llvm/test/CodeGen/Xtensa/stack-access.ll index 1590d24f228f2..3f53296eb737e 100644 --- a/llvm/test/CodeGen/Xtensa/stack-access.ll +++ b/llvm/test/CodeGen/Xtensa/stack-access.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=xtensa -O0 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa --mcpu=generic -O0 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=XTENSA define i8 @loadi8(i8 %a) { From 6105f4836a0d16b7c41355722132191782f26f23 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:42:38 +0300 Subject: [PATCH 034/289] [Xtensa] Add esp32, esp8266 and esp32-s2 to valid cpu names. --- clang/lib/Basic/Targets/Xtensa.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index 6a3f5441fc61a..76ec5dc6c0033 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -95,7 +95,12 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { } bool isValidCPUName(StringRef Name) const override { - return llvm::StringSwitch(Name).Case("generic", true).Default(false); + return llvm::StringSwitch(Name) + .Case("esp32", true) + .Case("esp8266", true) + .Case("esp32-s2", true) + .Case("generic", true) + .Default(false); } bool setCPU(const std::string &Name) override { From 320130a4a49cbf25aa3d7f95ef46266692d32c50 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 17:53:49 +0300 Subject: [PATCH 035/289] [Xtensa] Improve parsing of the SR and UR registers. Implement subtarget dependent SR and UR register parsing and disassembling, add tests. Implement User Registers read/write instructions and add tests. --- .../Xtensa/AsmParser/XtensaAsmParser.cpp | 315 +++++++++++++++++- .../Disassembler/XtensaDisassembler.cpp | 180 +++++++++- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 35 ++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 3 +- llvm/test/MC/Xtensa/Core/processor-control.s | 8 + 5 files changed, 523 insertions(+), 18 deletions(-) diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index fbd168d304aa8..8e8090cc79e6e 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -62,11 +62,12 @@ class XtensaAsmParser : public MCTargetAsmParser { #include "XtensaGenAsmMatcher.inc" ParseStatus parseImmediate(OperandVector &Operands); - ParseStatus parseRegister(OperandVector &Operands, bool AllowParens = false, - bool SR = false); + ParseStatus parseRegister(OperandVector &Operands, + bool AllowParens = false, bool SR = false, + bool UR = false); ParseStatus parseOperandWithModifier(OperandVector &Operands); bool parseOperand(OperandVector &Operands, StringRef Mnemonic, - bool SR = false); + bool SR = false, bool UR = false); bool ParseInstructionWithSR(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands); ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, @@ -76,6 +77,8 @@ class XtensaAsmParser : public MCTargetAsmParser { ParseStatus parsePCRelTarget(OperandVector &Operands); bool parseLiteralDirective(SMLoc L); + bool checkRegister(unsigned RegNo); + public: enum XtensaMatchResultTy { Match_Dummy = FIRST_TARGET_MATCH_RESULT_TY, @@ -89,6 +92,86 @@ class XtensaAsmParser : public MCTargetAsmParser { : MCTargetAsmParser(Options, STI, MII) { setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); } + + bool hasWindowed() const { + return getSTI().getFeatureBits()[Xtensa::FeatureWindowed]; + }; + + bool hasSingleFloat() const { + return getSTI().getFeatureBits()[Xtensa::FeatureSingleFloat]; + }; + + bool hasLoop() const { + return getSTI().getFeatureBits()[Xtensa::FeatureLoop]; + }; + + bool hasMAC16() const { + return getSTI().getFeatureBits()[Xtensa::FeatureMAC16]; + }; + + bool hasBoolean() const { + return getSTI().getFeatureBits()[Xtensa::FeatureBoolean]; + }; + + bool hasDFPAccel() const { + return getSTI().getFeatureBits()[Xtensa::FeatureDFPAccel]; + }; + + bool hasS32C1I() const { + return getSTI().getFeatureBits()[Xtensa::FeatureS32C1I]; + }; + + bool hasTHREADPTR() const { + return getSTI().getFeatureBits()[Xtensa::FeatureTHREADPTR]; + }; + + bool hasExtendedL32R() const { + return getSTI().getFeatureBits()[Xtensa::FeatureExtendedL32R]; + } + + bool hasATOMCTL() const { + return getSTI().getFeatureBits()[Xtensa::FeatureATOMCTL]; + } + + bool hasMEMCTL() const { + return getSTI().getFeatureBits()[Xtensa::FeatureMEMCTL]; + } + + bool hasDebug() const { + return getSTI().getFeatureBits()[Xtensa::FeatureDebug]; + } + + bool hasException() const { + return getSTI().getFeatureBits()[Xtensa::FeatureException]; + } + + bool hasHighPriInterrupts() const { + return getSTI().getFeatureBits()[Xtensa::FeatureHighPriInterrupts]; + } + + bool hasCoprocessor() const { + return getSTI().getFeatureBits()[Xtensa::FeatureCoprocessor]; + } + + bool hasInterrupt() const { + return getSTI().getFeatureBits()[Xtensa::FeatureInterrupt]; + } + + bool hasRelocatableVector() const { + return getSTI().getFeatureBits()[Xtensa::FeatureRelocatableVector]; + } + + bool hasTimerInt() const { + return getSTI().getFeatureBits()[Xtensa::FeatureTimerInt]; + } + + bool hasPRID() const { + return getSTI().getFeatureBits()[Xtensa::FeaturePRID]; + } + + bool hasMiscSR() const { + return getSTI().getFeatureBits()[Xtensa::FeatureMiscSR]; + } }; // Return true if Expr is in the range [MinValue, MaxValue]. @@ -578,17 +661,19 @@ bool XtensaAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc, } ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, - bool AllowParens, bool SR) { + bool AllowParens, bool SR, + bool UR) { SMLoc FirstS = getLoc(); bool HadParens = false; AsmToken Buf[2]; - StringRef RegName; + std::string RegName = ""; + int64_t Num; // If this a parenthesised register name is allowed, parse it atomically if (AllowParens && getLexer().is(AsmToken::LParen)) { size_t ReadCount = getLexer().peekTokens(Buf); if (ReadCount == 2 && Buf[1].getKind() == AsmToken::RParen) { - if ((Buf[0].getKind() == AsmToken::Integer) && (!SR)) + if ((Buf[0].getKind() == AsmToken::Integer) && (!SR) && (!UR)) return ParseStatus::NoMatch; HadParens = true; getParser().Lex(); // Eat '(' @@ -601,15 +686,38 @@ ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, default: return ParseStatus::NoMatch; case AsmToken::Integer: - if (!SR) + if ((!SR) && (!UR)) return ParseStatus::NoMatch; - RegName = getLexer().getTok().getString(); + + Num = getLexer().getTok().getIntVal(); + // Parse case when we expect UR operand as special case, + // because SR and UR registers may have the same number + // and such situation may lead to confilct + if (UR) { + if (Num == 0) + RegName = "GPIO_OUT"; + if (Num == 230) + RegName = "EXPSTATE"; + if (Num == 231) + RegName = "THREADPTR"; + if (Num == 232) + RegName = "FCR"; + if (Num == 233) + RegName = "FSR"; + if (Num == 234) + RegName = "F64R_LO"; + if (Num == 235) + RegName = "F64R_HI"; + if (Num == 236) + RegName = "F64S"; + } else + RegName = std::to_string(Num); RegNo = MatchRegisterName(RegName); if (RegNo == 0) RegNo = MatchRegisterAltName(RegName); break; case AsmToken::Identifier: - RegName = getLexer().getTok().getIdentifier(); + RegName = getLexer().getTok().getIdentifier().str(); RegNo = MatchRegisterName(RegName); if (RegNo == 0) RegNo = MatchRegisterAltName(RegName); @@ -621,6 +729,11 @@ ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, getLexer().UnLex(Buf[0]); return ParseStatus::NoMatch; } + + if (!checkRegister(RegNo)) { + return ParseStatus::NoMatch; + } + if (HadParens) Operands.push_back(XtensaOperand::createToken("(", FirstS)); SMLoc S = getLoc(); @@ -679,7 +792,7 @@ ParseStatus XtensaAsmParser::parseOperandWithModifier(OperandVector &Operands) { /// from this information, adding to Operands. /// If operand was parsed, returns false, else true. bool XtensaAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic, - bool SR) { + bool SR, bool UR) { // Check if the current operand has a custom associated parser, if so, try to // custom parse the operand, or fallback to the general approach. ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic); @@ -693,7 +806,7 @@ bool XtensaAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic, return true; // Attempt to parse token as register - if (parseRegister(Operands, true, SR).isSuccess()) + if (parseRegister(Operands, true, SR, UR).isSuccess()) return false; // Attempt to parse token as an immediate @@ -707,8 +820,13 @@ bool XtensaAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic, bool XtensaAsmParser::ParseInstructionWithSR(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands) { + bool IsSR = Name.starts_with("wsr") || Name.starts_with("rsr") || + Name.starts_with("xsr"); + bool IsUR = Name.starts_with("wur") || Name.starts_with("rur"); + if ((Name.starts_with("wsr.") || Name.starts_with("rsr.") || - Name.starts_with("xsr.")) && + Name.starts_with("xsr.") || Name.starts_with("rur.") || + Name.starts_with("wur.")) && (Name.size() > 4)) { // Parse case when instruction name is concatenated with SR register // name, like "wsr.sar a1" @@ -725,6 +843,11 @@ bool XtensaAsmParser::ParseInstructionWithSR(ParseInstructionInfo &Info, if (RegNo == 0) return Error(NameLoc, "invalid register name"); + if (!checkRegister(RegNo)) { + Error(NameLoc, "invalid register name"); + return true; + } + // Parse operand if (parseOperand(Operands, Name)) return true; @@ -747,7 +870,7 @@ bool XtensaAsmParser::ParseInstructionWithSR(ParseInstructionInfo &Info, } // Parse second operand - if (parseOperand(Operands, Name, true)) + if (parseOperand(Operands, Name, IsSR, IsUR)) return true; } @@ -765,7 +888,8 @@ bool XtensaAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands) { if (Name.starts_with("wsr") || Name.starts_with("rsr") || - Name.starts_with("xsr")) { + Name.starts_with("xsr") || Name.starts_with("rur") || + Name.starts_with("wur")) { return ParseInstructionWithSR(Info, Name, NameLoc, Operands); } @@ -845,6 +969,169 @@ ParseStatus XtensaAsmParser::parseDirective(AsmToken DirectiveID) { return ParseStatus::NoMatch; } + +// Verify SR and UR +bool XtensaAsmParser::checkRegister(unsigned RegNo) { + StringRef CPU = getSTI().getCPU(); + unsigned NumIntLevels = 0; + unsigned NumTimers = 0; + unsigned NumMiscSR = 0; + bool IsESP32 = false; + bool IsESP32_S2 = false; + bool Res = true; + + // Assume that CPU is esp32 by default + if ((CPU == "esp32") || (CPU == "")) { + NumIntLevels = 6; + NumTimers = 3; + NumMiscSR = 4; + IsESP32 = true; + } else if (CPU == "esp32-s2") { + NumIntLevels = 6; + NumTimers = 3; + NumMiscSR = 4; + IsESP32_S2 = true; + } else if (CPU == "esp8266") { + NumIntLevels = 2; + NumTimers = 1; + } + + switch (RegNo) { + case Xtensa::LBEG: + case Xtensa::LEND: + case Xtensa::LCOUNT: + Res = hasLoop(); + break; + case Xtensa::BREG: + Res = hasBoolean(); + break; + case Xtensa::LITBASE: + Res = hasExtendedL32R(); + break; + case Xtensa::SCOMPARE1: + Res = hasS32C1I(); + break; + case Xtensa::ACCLO: + case Xtensa::ACCHI: + case Xtensa::M0: + case Xtensa::M1: + case Xtensa::M2: + case Xtensa::M3: + Res = hasMAC16(); + break; + case Xtensa::WINDOWBASE: + case Xtensa::WINDOWSTART: + Res = hasWindowed(); + break; + case Xtensa::IBREAKENABLE: + case Xtensa::IBREAKA0: + case Xtensa::IBREAKA1: + case Xtensa::DBREAKA0: + case Xtensa::DBREAKA1: + case Xtensa::DBREAKC0: + case Xtensa::DBREAKC1: + case Xtensa::DEBUGCAUSE: + case Xtensa::ICOUNT: + case Xtensa::ICOUNTLEVEL: + Res = hasDebug(); + break; + case Xtensa::ATOMCTL: + Res = hasATOMCTL(); + break; + case Xtensa::MEMCTL: + Res = hasMEMCTL(); + break; + case Xtensa::EPC1: + Res = hasException(); + break; + case Xtensa::EPC2: + case Xtensa::EPC3: + case Xtensa::EPC4: + case Xtensa::EPC5: + case Xtensa::EPC6: + case Xtensa::EPC7: + Res = hasHighPriInterrupts(); + Res = Res & (NumIntLevels >= (RegNo - Xtensa::EPC1)); + break; + case Xtensa::EPS2: + case Xtensa::EPS3: + case Xtensa::EPS4: + case Xtensa::EPS5: + case Xtensa::EPS6: + case Xtensa::EPS7: + Res = hasHighPriInterrupts(); + Res = Res & (NumIntLevels > (RegNo - Xtensa::EPS2)); + break; + case Xtensa::EXCSAVE1: + Res = hasException(); + break; + case Xtensa::EXCSAVE2: + case Xtensa::EXCSAVE3: + case Xtensa::EXCSAVE4: + case Xtensa::EXCSAVE5: + case Xtensa::EXCSAVE6: + case Xtensa::EXCSAVE7: + Res = hasHighPriInterrupts(); + Res = Res & (NumIntLevels >= (RegNo - Xtensa::EXCSAVE1)); + break; + case Xtensa::DEPC: + case Xtensa::EXCCAUSE: + case Xtensa::EXCVADDR: + Res = hasException(); + break; + case Xtensa::CPENABLE: + Res = hasCoprocessor(); + break; + case Xtensa::VECBASE: + Res = hasRelocatableVector(); + break; + case Xtensa::CCOUNT: + Res = hasTimerInt(); + Res &= (NumTimers > 0); + break; + case Xtensa::CCOMPARE0: + case Xtensa::CCOMPARE1: + case Xtensa::CCOMPARE2: + Res = hasTimerInt(); + Res &= (NumTimers > (RegNo - Xtensa::CCOMPARE0)); + break; + case Xtensa::PRID: + Res = hasPRID(); + break; + case Xtensa::INTSET: + case Xtensa::INTCLEAR: + case Xtensa::INTENABLE: + Res = hasInterrupt(); + break; + case Xtensa::MISC0: + case Xtensa::MISC1: + case Xtensa::MISC2: + case Xtensa::MISC3: + Res = hasMiscSR(); + Res &= (NumMiscSR > (RegNo - Xtensa::MISC0)); + break; + case Xtensa::THREADPTR: + Res = hasTHREADPTR(); + break; + case Xtensa::GPIO_OUT: + Res = IsESP32_S2; + break; + case Xtensa::EXPSTATE: + Res = IsESP32; + break; + case Xtensa::FCR: + case Xtensa::FSR: + Res = hasSingleFloat(); + break; + case Xtensa::F64R_LO: + case Xtensa::F64R_HI: + case Xtensa::F64S: + Res = hasDFPAccel(); + break; + } + + return Res; +} // Force static initialization. extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXtensaAsmParser() { diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index d4d84b47f96ca..c10c31d4f0164 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -147,6 +147,169 @@ static DecodeStatus DecodeMR23RegisterClass(MCInst &Inst, uint64_t RegNo, return MCDisassembler::Success; } +// Verify SR and UR +bool CheckRegister(unsigned RegNo, MCSubtargetInfo STI) { + StringRef CPU = STI.getCPU(); + unsigned NumIntLevels = 0; + unsigned NumTimers = 0; + unsigned NumMiscSR = 0; + bool IsESP32 = false; + bool IsESP32_S2 = false; + bool Res = true; + + // Assume that CPU is esp32 by default + if ((CPU == "esp32") || (CPU == "")) { + NumIntLevels = 6; + NumTimers = 3; + NumMiscSR = 4; + IsESP32 = true; + } else if (CPU == "esp32-s2") { + NumIntLevels = 6; + NumTimers = 3; + NumMiscSR = 4; + IsESP32_S2 = true; + } else if (CPU == "esp8266") { + NumIntLevels = 2; + NumTimers = 1; + } + + switch (RegNo) { + case Xtensa::LBEG: + case Xtensa::LEND: + case Xtensa::LCOUNT: + Res = STI.getFeatureBits()[Xtensa::FeatureLoop]; + break; + case Xtensa::BREG: + Res = STI.getFeatureBits()[Xtensa::FeatureBoolean]; + break; + case Xtensa::LITBASE: + Res = STI.getFeatureBits()[Xtensa::FeatureExtendedL32R]; + break; + case Xtensa::SCOMPARE1: + Res = STI.getFeatureBits()[Xtensa::FeatureS32C1I]; + break; + case Xtensa::ACCLO: + case Xtensa::ACCHI: + case Xtensa::M0: + case Xtensa::M1: + case Xtensa::M2: + case Xtensa::M3: + Res = STI.getFeatureBits()[Xtensa::FeatureMAC16]; + break; + case Xtensa::WINDOWBASE: + case Xtensa::WINDOWSTART: + Res = STI.getFeatureBits()[Xtensa::FeatureWindowed]; + break; + case Xtensa::IBREAKENABLE: + case Xtensa::IBREAKA0: + case Xtensa::IBREAKA1: + case Xtensa::DBREAKA0: + case Xtensa::DBREAKA1: + case Xtensa::DBREAKC0: + case Xtensa::DBREAKC1: + case Xtensa::DEBUGCAUSE: + case Xtensa::ICOUNT: + case Xtensa::ICOUNTLEVEL: + Res = STI.getFeatureBits()[Xtensa::FeatureDebug]; + break; + case Xtensa::ATOMCTL: + Res = STI.getFeatureBits()[Xtensa::FeatureATOMCTL]; + break; + case Xtensa::MEMCTL: + Res = STI.getFeatureBits()[Xtensa::FeatureMEMCTL]; + break; + case Xtensa::EPC1: + Res = STI.getFeatureBits()[Xtensa::FeatureException]; + break; + case Xtensa::EPC2: + case Xtensa::EPC3: + case Xtensa::EPC4: + case Xtensa::EPC5: + case Xtensa::EPC6: + case Xtensa::EPC7: + Res = STI.getFeatureBits()[Xtensa::FeatureHighPriInterrupts]; + Res = Res & (NumIntLevels >= (RegNo - Xtensa::EPC1)); + break; + case Xtensa::EPS2: + case Xtensa::EPS3: + case Xtensa::EPS4: + case Xtensa::EPS5: + case Xtensa::EPS6: + case Xtensa::EPS7: + Res = STI.getFeatureBits()[Xtensa::FeatureHighPriInterrupts]; + Res = Res & (NumIntLevels > (RegNo - Xtensa::EPS2)); + break; + case Xtensa::EXCSAVE1: + Res = STI.getFeatureBits()[Xtensa::FeatureException]; + break; + case Xtensa::EXCSAVE2: + case Xtensa::EXCSAVE3: + case Xtensa::EXCSAVE4: + case Xtensa::EXCSAVE5: + case Xtensa::EXCSAVE6: + case Xtensa::EXCSAVE7: + Res = STI.getFeatureBits()[Xtensa::FeatureHighPriInterrupts]; + Res = Res & (NumIntLevels >= (RegNo - Xtensa::EXCSAVE1)); + break; + case Xtensa::DEPC: + case Xtensa::EXCCAUSE: + case Xtensa::EXCVADDR: + Res = STI.getFeatureBits()[Xtensa::FeatureException]; + break; + case Xtensa::CPENABLE: + Res = STI.getFeatureBits()[Xtensa::FeatureCoprocessor]; + break; + case Xtensa::VECBASE: + Res = STI.getFeatureBits()[Xtensa::FeatureRelocatableVector]; + break; + case Xtensa::CCOUNT: + Res = STI.getFeatureBits()[Xtensa::FeatureTimerInt]; + Res &= (NumTimers > 0); + break; + case Xtensa::CCOMPARE0: + case Xtensa::CCOMPARE1: + case Xtensa::CCOMPARE2: + Res = STI.getFeatureBits()[Xtensa::FeatureTimerInt]; + Res &= (NumTimers > (RegNo - Xtensa::CCOMPARE0)); + break; + case Xtensa::PRID: + Res = STI.getFeatureBits()[Xtensa::FeaturePRID]; + break; + case Xtensa::INTSET: + case Xtensa::INTCLEAR: + case Xtensa::INTENABLE: + Res = STI.getFeatureBits()[Xtensa::FeatureInterrupt]; + break; + case Xtensa::MISC0: + case Xtensa::MISC1: + case Xtensa::MISC2: + case Xtensa::MISC3: + Res = STI.getFeatureBits()[Xtensa::FeatureMiscSR]; + Res &= (NumMiscSR > (RegNo - Xtensa::MISC0)); + break; + case Xtensa::THREADPTR: + Res = STI.getFeatureBits()[Xtensa::FeatureTHREADPTR]; + break; + case Xtensa::GPIO_OUT: + Res = IsESP32_S2; + break; + case Xtensa::EXPSTATE: + Res = IsESP32; + break; + case Xtensa::FCR: + case Xtensa::FSR: + Res = STI.getFeatureBits()[Xtensa::FeatureSingleFloat]; + break; + case Xtensa::F64R_LO: + case Xtensa::F64R_HI: + case Xtensa::F64S: + Res = STI.getFeatureBits()[Xtensa::FeatureDFPAccel]; + break; + } + + return Res; +} + static const unsigned SRDecoderTable[] = { Xtensa::LBEG, 0, Xtensa::LEND, 1, Xtensa::LCOUNT, 2, Xtensa::SAR, 3, @@ -186,12 +349,19 @@ static const unsigned SRDecoderTable[] = { static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, const void *Decoder) { + const llvm::MCSubtargetInfo STI = + ((const MCDisassembler *)Decoder)->getSubtargetInfo(); + if (RegNo > 255) return MCDisassembler::Fail; for (unsigned i = 0; i < std::size(SRDecoderTable); i += 2) { if (SRDecoderTable[i + 1] == RegNo) { unsigned Reg = SRDecoderTable[i]; + + if (!CheckRegister(Reg, STI)) + return MCDisassembler::Fail; + Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } @@ -201,9 +371,9 @@ static DecodeStatus DecodeSRRegisterClass(MCInst &Inst, uint64_t RegNo, } static const unsigned URDecoderTable[] = { - Xtensa::EXPSTATE, 230, Xtensa::THREADPTR, 231, Xtensa::FCR, 232, - Xtensa::FSR, 233, Xtensa::F64R_LO, 234, Xtensa::F64R_HI, 235, - Xtensa::F64S, 236}; + Xtensa::GPIO_OUT, 0, Xtensa::EXPSTATE, 230, Xtensa::THREADPTR, 231, + Xtensa::FCR, 232, Xtensa::FSR, 233, Xtensa::F64R_LO, 234, + Xtensa::F64R_HI, 235, Xtensa::F64S, 236}; static DecodeStatus DecodeURRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, @@ -217,6 +387,10 @@ static DecodeStatus DecodeURRegisterClass(MCInst &Inst, uint64_t RegNo, for (unsigned i = 0; i < std::size(URDecoderTable); i += 2) { if (URDecoderTable[i + 1] == RegNo) { unsigned Reg = URDecoderTable[i]; + + if (!CheckRegister(Reg, STI)) + return MCDisassembler::Fail; + Inst.addOperand(MCOperand::createReg(Reg)); return MCDisassembler::Success; } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 671c11cb58110..e030844149ce8 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -558,6 +558,41 @@ def XSR : RSR_Inst<0x00, 0x01, 0x06, (outs AR:$ard, SR:$srd), (ins AR:$t, SR:$sr let Constraints = "$ard = $t, $srd = $sr"; } +//===----------------------------------------------------------------------===// +// User Registers read/write instructions +//===----------------------------------------------------------------------===// + +def WUR : RRR_Inst<0x00, 0x03, 0x0F, (outs UR:$ur), (ins AR:$t), + "wur\t$t, $ur", []> { + bits<8> ur; + + let r = ur{7-4}; + let s = ur{3-0}; +} + +def RUR : RRR_Inst<0x00, 0x03, 0x0E, (outs AR:$r), (ins UR:$ur), + "rur\t$r, $ur", []> { + bits<8> ur; + + let s = ur{7-4}; + let t = ur{3-0}; +} + +//===----------------------------------------------------------------------===// +// External Registers read/write instructions +//===----------------------------------------------------------------------===// + +def RER : RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), + "rer\t$t, $s", []> { + let r = 0x6; +} + +def WER : RRR_Inst<0x00, 0x00, 0x04, (outs), (ins AR:$t, AR:$s), + "wer\t$t, $s", []> { + let r = 0x7; + let hasSideEffects = 1; +} + //===----------------------------------------------------------------------===// // Stack allocation //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index 2fb153d065aca..93e67af82fc86 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -225,6 +225,7 @@ class URReg num, string n, list alt = []> : XtensaReg { let AltNames = alt; } +def GPIO_OUT : URReg<0, "gpio_out", ["GPIO_OUT"]>; def EXPSTATE : URReg<230, "expstate", ["EXPSTATE"]>; // Thread Pointer register @@ -236,7 +237,7 @@ def F64R_LO : URReg<234, "f64r_lo", ["F64R_LO"]>; def F64R_HI : URReg<235, "f64r_hi", ["F64R_HI"]>; def F64S : URReg<236, "f64s", ["F64S"]>; -def UR : RegisterClass<"Xtensa", [i32], 32, (add EXPSTATE, THREADPTR, FCR, +def UR : RegisterClass<"Xtensa", [i32], 32, (add GPIO_OUT, EXPSTATE, THREADPTR, FCR, FSR, F64R_LO, F64R_HI, F64S)>; //===----------------------------------------------------------------------===// diff --git a/llvm/test/MC/Xtensa/Core/processor-control.s b/llvm/test/MC/Xtensa/Core/processor-control.s index 5b648356fc68b..1e7bac5789276 100644 --- a/llvm/test/MC/Xtensa/Core/processor-control.s +++ b/llvm/test/MC/Xtensa/Core/processor-control.s @@ -25,6 +25,10 @@ isync # CHECK: encoding: [0xf0,0x20,0x00] nop +# CHECK-INST: rer a3, a4 +# CHECK: encoding: [0x30,0x64,0x40] +rer a3, a4 + # Instruction format RSR # CHECK-INST: rsr a8, sar # CHECK: encoding: [0x80,0x03,0x03] @@ -43,6 +47,10 @@ rsr a8, 3 # CHECK: encoding: [0x10,0x20,0x00] rsync +# CHECK-INST: wer a3, a4 +# CHECK: encoding: [0x30,0x74,0x40] +wer a3, a4 + # Instruction format RSR # CHECK-INST: wsr a8, sar # CHECK: encoding: [0x80,0x03,0x13] From ff643752d07f9c02efcaf96ff40f3302c1e9ab25 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 18:12:11 +0300 Subject: [PATCH 036/289] [Xtensa] Improve CFA support. --- llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp index 2653c293dc0c4..d80b98e6f56dd 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp @@ -14,6 +14,7 @@ #include "XtensaTargetStreamer.h" #include "llvm/ADT/STLExtras.h" #include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCDwarf.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCStreamer.h" @@ -36,6 +37,9 @@ static MCAsmInfo *createXtensaMCAsmInfo(const MCRegisterInfo &MRI, const Triple &TT, const MCTargetOptions &Options) { MCAsmInfo *MAI = new XtensaMCAsmInfo(TT); + MCCFIInstruction Inst = MCCFIInstruction::cfiDefCfa( + nullptr, MRI.getDwarfRegNum(Xtensa::SP, true), 0); + MAI->addInitialFrameState(Inst); return MAI; } From fc8e75f2282d6127beb9511562f1d0fb4287eb1f Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 18:15:35 +0300 Subject: [PATCH 037/289] [Xtensa] Lowering Exception Selector and Pointer Registers. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 14 ++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 9 +++++++++ 2 files changed, 23 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 194c5f073c5db..4d8817b1ad5c0 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -315,6 +315,20 @@ bool XtensaTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, return false; } +/// If a physical register, this returns the register that receives the +/// exception address on entry to an EH pad. +Register XtensaTargetLowering::getExceptionPointerRegister( + const Constant *PersonalityFn) const { + return Xtensa::A2; +} + +/// If a physical register, this returns the register that receives the +/// exception typeid on entry to a landing pad. +Register XtensaTargetLowering::getExceptionSelectorRegister( + const Constant *PersonalityFn) const { + return Xtensa::A3; +} + bool XtensaTargetLowering::isOffsetFoldingLegal( const GlobalAddressSDNode *GA) const { // The Xtensa target isn't yet aware of offsets. diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index b4e5a9b093b61..ab915062765b3 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -101,6 +101,15 @@ class XtensaTargetLowering : public TargetLowering { bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override; + /// If a physical register, this returns the register that receives the + /// exception address on entry to an EH pad. + Register + getExceptionPointerRegister(const Constant *PersonalityFn) const override; + /// If a physical register, this returns the register that receives the + /// exception typeid on entry to a landing pad. + Register + getExceptionSelectorRegister(const Constant *PersonalityFn) const override; + bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; const char *getTargetNodeName(unsigned Opcode) const override; From 1bb2171599f5f0e88483dbc88145c033c3d291b1 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 18:45:06 +0300 Subject: [PATCH 038/289] [Xtensa] Implement lowering GLobalTLSAddress operation. --- .../MCTargetDesc/XtensaELFObjectWriter.cpp | 6 ++- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 43 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 4 ++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 2 +- llvm/lib/Target/Xtensa/XtensaOperators.td | 6 ++- 5 files changed, 58 insertions(+), 3 deletions(-) diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaELFObjectWriter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaELFObjectWriter.cpp index 7472371932f11..1422802fb9bca 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaELFObjectWriter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaELFObjectWriter.cpp @@ -46,10 +46,14 @@ XtensaObjectWriter::~XtensaObjectWriter() {} unsigned XtensaObjectWriter::getRelocType(MCContext &Ctx, const MCValue &Target, const MCFixup &Fixup, bool IsPCRel) const { + MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant(); switch ((unsigned)Fixup.getKind()) { case FK_Data_4: - return ELF::R_XTENSA_32; + if (Modifier == MCSymbolRefExpr::VariantKind::VK_TPOFF) + return ELF::R_XTENSA_TLS_TPOFF; + else + return ELF::R_XTENSA_32; default: return ELF::R_XTENSA_SLOT0_OP; } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 4d8817b1ad5c0..601ccde1b108e 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -68,6 +68,8 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setSchedulingPreference(Sched::RegPressure); + setBooleanVectorContents(ZeroOrOneBooleanContent); + setMinFunctionAlignment(Align(4)); setOperationAction(ISD::Constant, MVT::i32, Custom); @@ -97,6 +99,7 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::ConstantPool, PtrVT, Custom); setOperationAction(ISD::GlobalAddress, PtrVT, Custom); + setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); setOperationAction(ISD::BlockAddress, PtrVT, Custom); setOperationAction(ISD::JumpTable, PtrVT, Custom); @@ -1254,6 +1257,44 @@ SDValue XtensaTargetLowering::LowerGlobalAddress(SDValue Op, return CPWrap; } +SDValue XtensaTargetLowering::LowerGlobalTLSAddress(GlobalAddressSDNode *GA, + SelectionDAG &DAG) const { + SDLoc DL(GA); + const GlobalValue *GV = GA->getGlobal(); + EVT PtrVT = getPointerTy(DAG.getDataLayout()); + + if (DAG.getTarget().useEmulatedTLS()) + return LowerToTLSEmulatedModel(GA, DAG); + + TLSModel::Model model = getTargetMachine().getTLSModel(GV); + + if (!Subtarget.hasTHREADPTR()) { + llvm_unreachable("only emulated TLS supported"); + } + + if ((model == TLSModel::LocalExec) || (model == TLSModel::InitialExec)) { + auto PtrVt = getPointerTy(DAG.getDataLayout()); + + bool Priv = GV->isPrivateLinkage(GV->getLinkage()); + // Create a constant pool entry for the callee address + XtensaConstantPoolValue *CPV = XtensaConstantPoolSymbol::Create( + *DAG.getContext(), GV->getName().str().c_str() /* Sym */, + 0 /* XtensaCLabelIndex */, Priv, XtensaCP::TPOFF); + + // Get the address of the callee into a register + SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4)); + SDValue CPWrap = getAddrPCRel(CPAddr, DAG); + + SDValue TPRegister = DAG.getRegister(Xtensa::THREADPTR, MVT::i32); + SDValue ThreadPointer = + DAG.getNode(XtensaISD::RUR, DL, MVT::i32, TPRegister); + return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, CPWrap); + } else + llvm_unreachable("only local-exec and initial-exec TLS mode supported"); + + return SDValue(); +} + SDValue XtensaTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { BlockAddressSDNode *Node = cast(Op); @@ -1624,6 +1665,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerRETURNADDR(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); + case ISD::GlobalTLSAddress: + return LowerGlobalTLSAddress(cast(Op), DAG); case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); case ISD::JumpTable: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index ab915062765b3..c41cf99dd7bb6 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -65,6 +65,8 @@ enum { // WinABI Return RETW, + RUR, + // Select with condition operator - This selects between a true value and // a false value (ops #2 and #3) based on the boolean result of comparing // the lhs and rhs (ops #0 and #1) of a conditional expression with the @@ -177,6 +179,8 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerImmediateFP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerGlobalTLSAddress(GlobalAddressSDNode *Node, + SelectionDAG &DAG) const; SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index e030844149ce8..e980029155d2c 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -571,7 +571,7 @@ def WUR : RRR_Inst<0x00, 0x03, 0x0F, (outs UR:$ur), (ins AR:$t), } def RUR : RRR_Inst<0x00, 0x03, 0x0E, (outs AR:$r), (ins UR:$ur), - "rur\t$r, $ur", []> { + "rur\t$r, $ur", [(set AR:$r, (Xtensa_rur UR:$ur))]> { bits<8> ur; let s = ur{7-4}; diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index 2c82a2bbfbb52..60a836c065fd5 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -32,6 +32,7 @@ def SDT_XtensaMADD : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDT def SDT_XtensaMOVS : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, f32>]>; def SDT_XtensaSelectCCFP : SDTypeProfile<1, 5, [SDTCisSameAs<0, 3>, SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisVT<5, i32>]>; +def SDT_XtensaRUR : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; def SDT_XtensaSRC : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; @@ -94,4 +95,7 @@ def Xtensa_cmpuo : SDNode<"XtensaISD::CMPUO", SDT_XtensaCmp, [SDNPOutGlue]> def Xtensa_madd: SDNode<"XtensaISD::MADD", SDT_XtensaMADD, [SDNPInGlue]>; def Xtensa_msub: SDNode<"XtensaISD::MSUB", SDT_XtensaMADD, [SDNPInGlue]>; -def Xtensa_movs: SDNode<"XtensaISD::MOVS", SDT_XtensaMOVS, [SDNPInGlue]>; +def Xtensa_movs: SDNode<"XtensaISD::MOVS", SDT_XtensaMOVS, [SDNPInGlue]>; + +def Xtensa_rur: SDNode<"XtensaISD::RUR", SDT_XtensaRUR, + [SDNPInGlue]>; From 8da4565419d15446ef70b374f284c251adee8342 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 19:40:26 +0300 Subject: [PATCH 039/289] [Xtensa] Lower ATOMIC_FENCE. Add Atomic Expand pass. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 15 +++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 8 ++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 14 ++++++++++++++ llvm/lib/Target/Xtensa/XtensaOperators.td | 7 ++++++- llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp | 3 +++ 5 files changed, 46 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 601ccde1b108e..79ab1057d5a81 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -295,6 +295,10 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::TRAP, MVT::Other, Legal); + // to have the best chance and doing something good with fences custom lower + // them + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); + // Compute derived properties from the register classes computeRegisterProperties(STI.getRegisterInfo()); @@ -1650,6 +1654,13 @@ bool XtensaTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, return false; } +SDValue XtensaTargetLowering::LowerATOMIC_FENCE(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + SDValue Chain = Op.getOperand(0); + return DAG.getNode(XtensaISD::MEMW, DL, MVT::Other, Chain); +} + SDValue XtensaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { @@ -1691,6 +1702,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerVASTART(Op, DAG); case ISD::VACOPY: return LowerVACOPY(Op, DAG); + case ISD::ATOMIC_FENCE: + return LowerATOMIC_FENCE(Op, DAG); case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); case ISD::SRA_PARTS: @@ -1752,6 +1765,8 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::MSUB"; case XtensaISD::MOVS: return "XtensaISD::MOVS"; + case XtensaISD::MEMW: + return "XtensaISD::MEMW"; } return nullptr; } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index c41cf99dd7bb6..96ebf5494a9d6 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -55,6 +55,8 @@ enum { // FP move MOVS, + MEMW, + MOVSP, // Wraps a TargetGlobalAddress that should be loaded using PC-relative @@ -160,6 +162,10 @@ class XtensaTargetLowering : public TargetLowering { const SmallVectorImpl &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override; + bool shouldInsertFencesForAtomic(const Instruction *I) const override { + return true; + } + bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override; @@ -212,6 +218,8 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; + SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; + SDValue getAddrPCRel(SDValue Op, SelectionDAG &DAG) const; CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index e980029155d2c..6096ebf955f12 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -504,6 +504,8 @@ def EXTW : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), let hasSideEffects = 1; } +def : Pat<(Xtensa_mem_barrier), (MEMW)>; + //===----------------------------------------------------------------------===// // Processor control instructions //===----------------------------------------------------------------------===// @@ -1289,6 +1291,18 @@ def WITLB : RRR_Inst<0x00, 0x00, 0x05, (outs AR:$t), (ins AR:$s), let r = 0x6; } +//===----------------------------------------------------------------------===// +// Atomic patterns +//===----------------------------------------------------------------------===// + +def : Pat<(i32 (atomic_load_8 addr_ish1:$addr)), (L8UI addr_ish1:$addr)>; +def : Pat<(i32 (atomic_load_16 addr_ish2:$addr)), (L16UI addr_ish2:$addr)>; +def : Pat<(i32 (atomic_load_32 addr_ish4:$addr)), (L32I addr_ish4:$addr)>; + +def : Pat<(atomic_store_8 addr_ish1:$addr, AR:$t), (S8I AR:$t, addr_ish1:$addr)>; +def : Pat<(atomic_store_16 addr_ish2:$addr, AR:$t), (S16I AR:$t, addr_ish2:$addr)>; +def : Pat<(atomic_store_32 addr_ish4:$addr, AR:$t), (S32I AR:$t, addr_ish4:$addr)>; + //===----------------------------------------------------------------------===// // DSP Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index 60a836c065fd5..01871a44569e8 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -32,6 +32,7 @@ def SDT_XtensaMADD : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDT def SDT_XtensaMOVS : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, f32>]>; def SDT_XtensaSelectCCFP : SDTypeProfile<1, 5, [SDTCisSameAs<0, 3>, SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisVT<5, i32>]>; +def SDT_XtensaMEMBARRIER : SDTypeProfile<0, 0, []>; def SDT_XtensaRUR : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; def SDT_XtensaSRC : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, @@ -97,5 +98,9 @@ def Xtensa_madd: SDNode<"XtensaISD::MADD", SDT_XtensaMADD, [SDNPInGlue]>; def Xtensa_msub: SDNode<"XtensaISD::MSUB", SDT_XtensaMADD, [SDNPInGlue]>; def Xtensa_movs: SDNode<"XtensaISD::MOVS", SDT_XtensaMOVS, [SDNPInGlue]>; + +def Xtensa_mem_barrier: SDNode<"XtensaISD::MEMW", SDT_XtensaMEMBARRIER, + [SDNPHasChain, SDNPSideEffect]>; + def Xtensa_rur: SDNode<"XtensaISD::RUR", SDT_XtensaRUR, - [SDNPInGlue]>; + [SDNPInGlue]>; diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 7ef350a6008a1..dfe04ee2f4dcc 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -101,6 +101,7 @@ class XtensaPassConfig : public TargetPassConfig { return getTM(); } + void addIRPasses() override; bool addInstSelector() override; void addPreEmitPass() override; }; @@ -111,6 +112,8 @@ bool XtensaPassConfig::addInstSelector() { return false; } +void XtensaPassConfig::addIRPasses() { addPass(createAtomicExpandLegacyPass()); } + void XtensaPassConfig::addPreEmitPass() { addPass(createXtensaSizeReductionPass()); addPass(&BranchRelaxationPassID); From 1b5dbdad1ae9555d0904a04c5053ddb33bdc046e Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 19:55:19 +0300 Subject: [PATCH 040/289] [Xtensa] Lower atomic_cmp_swap_(8/16/32) operations. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 173 ++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 2 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 12 ++ 3 files changed, 187 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 79ab1057d5a81..1567ff04fccf0 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -299,6 +299,16 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, // them setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); + if (!Subtarget.hasS32C1I()) { + for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; + I <= MVT::LAST_INTEGER_VALUETYPE; ++I) { + MVT VT = MVT::SimpleValueType(I); + if (isTypeLegal(VT)) { + setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand); + } + } + } + // Compute derived properties from the register classes computeRegisterProperties(STI.getRegisterInfo()); @@ -1911,6 +1921,143 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI, return SinkMBB; } +// Emit instructions for atomic_cmp_swap node for 8/16 bit operands +MachineBasicBlock * +XtensaTargetLowering::emitAtomicCmpSwap(MachineInstr &MI, MachineBasicBlock *BB, + int isByteOperand) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + MachineBasicBlock *thisBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *BBLoop = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBExit = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(It, BBLoop); + F->insert(It, BBExit); + + // Transfer the remainder of BB and its successor edges to BBExit. + BBExit->splice(BBExit->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + BBExit->transferSuccessorsAndUpdatePHIs(BB); + + BB->addSuccessor(BBLoop); + + MachineOperand &Res = MI.getOperand(0); + MachineOperand &AtomValAddr = MI.getOperand(1); + MachineOperand &CmpVal = MI.getOperand(2); + MachineOperand &SwpVal = MI.getOperand(3); + + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), R1).addImm(3); + + unsigned ByteOffs = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::AND), ByteOffs) + .addReg(R1) + .addReg(AtomValAddr.getReg()); + + unsigned AddrAlign = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SUB), AddrAlign) + .addReg(AtomValAddr.getReg()) + .addReg(ByteOffs); + + unsigned BitOffs = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLLI), BitOffs) + .addReg(ByteOffs) + .addImm(3); + + unsigned Mask1 = MRI.createVirtualRegister(RC); + if (isByteOperand) { + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), Mask1).addImm(0xff); + } else { + unsigned R2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), R2).addImm(1); + unsigned R3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLLI), R3).addReg(R2).addImm(16); + BuildMI(*BB, MI, DL, TII.get(Xtensa::ADDI), Mask1).addReg(R3).addImm(-1); + } + + BuildMI(*BB, MI, DL, TII.get(Xtensa::SSL)).addReg(BitOffs); + + unsigned R2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), R2).addImm(-1); + + unsigned Mask2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), Mask2).addReg(Mask1); + + unsigned Mask3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::XOR), Mask3).addReg(Mask2).addReg(R2); + + unsigned R3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), R3).addReg(AddrAlign).addImm(0); + + unsigned R4 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::AND), R4).addReg(R3).addReg(Mask3); + + unsigned Cmp1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), Cmp1).addReg(CmpVal.getReg()); + + unsigned Swp1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), Swp1).addReg(SwpVal.getReg()); + + BB = BBLoop; + + unsigned MaskPhi = MRI.createVirtualRegister(RC); + unsigned MaskLoop = MRI.createVirtualRegister(RC); + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), MaskPhi) + .addReg(MaskLoop) + .addMBB(BBLoop) + .addReg(R4) + .addMBB(thisBB); + + unsigned Cmp2 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::OR), Cmp2).addReg(Cmp1).addReg(MaskPhi); + + unsigned Swp2 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::OR), Swp2).addReg(Swp1).addReg(MaskPhi); + + BuildMI(BB, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1).addReg(Cmp2); + + unsigned Swp3 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::S32C1I), Swp3) + .addReg(Swp2) + .addReg(AddrAlign) + .addImm(0); + + BuildMI(BB, DL, TII.get(Xtensa::AND), MaskLoop).addReg(Swp3).addReg(Mask3); + + BuildMI(BB, DL, TII.get(Xtensa::BNE)) + .addReg(MaskLoop) + .addReg(MaskPhi) + .addMBB(BBLoop); + + BB->addSuccessor(BBLoop); + BB->addSuccessor(BBExit); + + BB = BBExit; + auto St = BBExit->begin(); + + unsigned R5 = MRI.createVirtualRegister(RC); + BuildMI(*BB, St, DL, TII.get(Xtensa::SSR)).addReg(BitOffs); + + BuildMI(*BB, St, DL, TII.get(Xtensa::SRL), R5).addReg(Swp3); + + BuildMI(*BB, St, DL, TII.get(Xtensa::AND), Res.getReg()) + .addReg(R5) + .addReg(Mask1); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + return BB; +} + MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *MBB) const { const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); @@ -1955,6 +2102,32 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( return MBB; } + case Xtensa::ATOMIC_CMP_SWAP_8_P: { + return emitAtomicCmpSwap(MI, MBB, 1); + } + + case Xtensa::ATOMIC_CMP_SWAP_16_P: { + return emitAtomicCmpSwap(MI, MBB, 0); + } + + case Xtensa::ATOMIC_CMP_SWAP_32_P: { + MachineOperand &R = MI.getOperand(0); + MachineOperand &Addr = MI.getOperand(1); + MachineOperand &Cmp = MI.getOperand(2); + MachineOperand &Swap = MI.getOperand(3); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1) + .addReg(Cmp.getReg()); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::S32C1I), R.getReg()) + .addReg(Swap.getReg()) + .addReg(Addr.getReg()) + .addImm(0); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::S8I: case Xtensa::S16I: case Xtensa::S32I: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 96ebf5494a9d6..b3cb79ff1e0f8 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -226,6 +226,8 @@ class XtensaTargetLowering : public TargetLowering { MachineBasicBlock *emitSelectCC(MachineInstr &MI, MachineBasicBlock *BB) const; + MachineBasicBlock *emitAtomicCmpSwap(MachineInstr &MI, MachineBasicBlock *BB, + int isByteOperand) const; InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override { diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 6096ebf955f12..dfaea649b0ead 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1303,6 +1303,18 @@ def : Pat<(atomic_store_8 addr_ish1:$addr, AR:$t), (S8I AR:$t, addr_ish1:$addr) def : Pat<(atomic_store_16 addr_ish2:$addr, AR:$t), (S16I AR:$t, addr_ish2:$addr)>; def : Pat<(atomic_store_32 addr_ish4:$addr, AR:$t), (S32I AR:$t, addr_ish4:$addr)>; +let usesCustomInserter = 1, Predicates = [HasS32C1I] in { + def ATOMIC_CMP_SWAP_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$cmp, AR:$swap), + "!atomic_cmp_swap_8_p, $dst, $ptr, $cmp, $swap", + [(set AR:$dst, (atomic_cmp_swap_i8 AR:$ptr, AR:$cmp, AR:$swap))]>; + def ATOMIC_CMP_SWAP_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$cmp, AR:$swap), + "!atomic_cmp_swap_16_p, $dst, $ptr, $cmp, $swap", + [(set AR:$dst, (atomic_cmp_swap_i16 AR:$ptr, AR:$cmp, AR:$swap))]>; + def ATOMIC_CMP_SWAP_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$cmp, AR:$swap), + "!atomic_cmp_swap_32_p, $dst, $ptr, $cmp, $swap", + [(set AR:$dst, (atomic_cmp_swap_i32 AR:$ptr, AR:$cmp, AR:$swap))]>; +} + //===----------------------------------------------------------------------===// // DSP Instructions //===----------------------------------------------------------------------===// From a65aa3ba6d1218af77b0269dfa8f020320ba3f3e Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 20:00:44 +0300 Subject: [PATCH 041/289] [Xtensa] Lower atomic_swap_(8/16/32) operations. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 275 ++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 4 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 10 + 3 files changed, 289 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 1567ff04fccf0..582b009b52255 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -305,6 +305,7 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, MVT VT = MVT::SimpleValueType(I); if (isTypeLegal(VT)) { setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand); + setOperationAction(ISD::ATOMIC_SWAP, VT, Expand); } } } @@ -2058,6 +2059,268 @@ XtensaTargetLowering::emitAtomicCmpSwap(MachineInstr &MI, MachineBasicBlock *BB, return BB; } +// Emit instructions for atomic_swap node for 8/16 bit operands +MachineBasicBlock * +XtensaTargetLowering::emitAtomicSwap(MachineInstr &MI, MachineBasicBlock *BB, + int isByteOperand) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + MachineFunction *F = BB->getParent(); + MachineBasicBlock *BBLoop1 = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBLoop2 = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBLoop3 = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBLoop4 = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBExit = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(It, BBLoop1); + F->insert(It, BBLoop2); + F->insert(It, BBLoop3); + F->insert(It, BBLoop4); + F->insert(It, BBExit); + + // Transfer the remainder of BB and its successor edges to BBExit. + BBExit->splice(BBExit->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + BBExit->transferSuccessorsAndUpdatePHIs(BB); + + BB->addSuccessor(BBLoop1); + BBLoop1->addSuccessor(BBLoop2); + BBLoop2->addSuccessor(BBLoop3); + BBLoop2->addSuccessor(BBLoop4); + BBLoop3->addSuccessor(BBLoop2); + BBLoop3->addSuccessor(BBLoop4); + BBLoop4->addSuccessor(BBLoop1); + BBLoop4->addSuccessor(BBExit); + + MachineOperand &Res = MI.getOperand(0); + MachineOperand &AtomValAddr = MI.getOperand(1); + MachineOperand &SwpVal = MI.getOperand(2); + + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), R1).addImm(3); + + unsigned ByteOffs = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::AND), ByteOffs) + .addReg(R1) + .addReg(AtomValAddr.getReg()); + + unsigned AddrAlign = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SUB), AddrAlign) + .addReg(AtomValAddr.getReg()) + .addReg(ByteOffs); + + unsigned BitOffs = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLLI), BitOffs) + .addReg(ByteOffs) + .addImm(3); + + unsigned Mask1 = MRI.createVirtualRegister(RC); + if (isByteOperand) { + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), Mask1).addImm(0xff); + } else { + unsigned R2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), R2).addImm(1); + unsigned R3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLLI), R3).addReg(R2).addImm(16); + BuildMI(*BB, MI, DL, TII.get(Xtensa::ADDI), Mask1).addReg(R3).addImm(-1); + } + + BuildMI(*BB, MI, DL, TII.get(Xtensa::SSL)).addReg(BitOffs); + + unsigned R2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), R2).addImm(-1); + + unsigned Mask2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), Mask2).addReg(Mask1); + + unsigned Mask3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::XOR), Mask3).addReg(Mask2).addReg(R2); + + unsigned R3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), R3).addReg(AddrAlign).addImm(0); + + unsigned R4 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::AND), R4).addReg(R3).addReg(Mask3); + + unsigned SwpValShifted = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), SwpValShifted) + .addReg(SwpVal.getReg()); + + unsigned R5 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), R5).addReg(AddrAlign).addImm(0); + + unsigned AtomVal = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::AND), AtomVal).addReg(R5).addReg(Mask2); + + unsigned AtomValPhi = MRI.createVirtualRegister(RC); + unsigned AtomValLoop = MRI.createVirtualRegister(RC); + + BuildMI(*BBLoop1, BBLoop1->begin(), DL, TII.get(Xtensa::PHI), AtomValPhi) + .addReg(AtomValLoop) + .addMBB(BBLoop4) + .addReg(AtomVal) + .addMBB(BB); + + BB = BBLoop1; + + BuildMI(BB, DL, TII.get(Xtensa::MEMW)); + + unsigned R6 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::L32I), R6).addReg(AddrAlign).addImm(0); + + unsigned R7 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), R7).addReg(R6).addReg(Mask3); + + unsigned MaskPhi = MRI.createVirtualRegister(RC); + unsigned MaskLoop = MRI.createVirtualRegister(RC); + + BuildMI(*BBLoop2, BBLoop2->begin(), DL, TII.get(Xtensa::PHI), MaskPhi) + .addReg(MaskLoop) + .addMBB(BBLoop3) + .addReg(R7) + .addMBB(BBLoop1); + + BB = BBLoop2; + + unsigned Swp1 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::OR), Swp1) + .addReg(SwpValShifted) + .addReg(MaskPhi); + + unsigned AtomVal1 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::OR), AtomVal1) + .addReg(AtomValPhi) + .addReg(MaskPhi); + + BuildMI(BB, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1).addReg(AtomVal1); + + unsigned Swp2 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::S32C1I), Swp2) + .addReg(Swp1) + .addReg(AddrAlign) + .addImm(0); + + BuildMI(BB, DL, TII.get(Xtensa::BEQ)) + .addReg(AtomVal1) + .addReg(Swp2) + .addMBB(BBLoop4); + + BB = BBLoop3; + + BuildMI(BB, DL, TII.get(Xtensa::AND), MaskLoop).addReg(Swp2).addReg(Mask3); + + BuildMI(BB, DL, TII.get(Xtensa::BNE)) + .addReg(MaskLoop) + .addReg(MaskPhi) + .addMBB(BBLoop2); + + BB = BBLoop4; + + BuildMI(BB, DL, TII.get(Xtensa::AND), AtomValLoop).addReg(Swp2).addReg(Mask2); + + BuildMI(BB, DL, TII.get(Xtensa::BNE)) + .addReg(AtomValLoop) + .addReg(AtomValPhi) + .addMBB(BBLoop1); + + BB = BBExit; + + auto St = BB->begin(); + + unsigned R8 = MRI.createVirtualRegister(RC); + + BuildMI(*BB, St, DL, TII.get(Xtensa::SSR)).addReg(BitOffs); + BuildMI(*BB, St, DL, TII.get(Xtensa::SLL), R8).addReg(AtomValLoop); + + if (isByteOperand) { + BuildMI(*BB, St, DL, TII.get(Xtensa::SEXT), Res.getReg()) + .addReg(R8) + .addImm(7); + } else { + BuildMI(*BB, St, DL, TII.get(Xtensa::SEXT), Res.getReg()) + .addReg(R8) + .addImm(15); + } + + MI.eraseFromParent(); // The pseudo instruction is gone now. + return BB; +} + +// Emit instructions for atomic_swap node for 32 bit operands +MachineBasicBlock * +XtensaTargetLowering::emitAtomicSwap(MachineInstr &MI, + MachineBasicBlock *BB) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + MachineFunction *F = BB->getParent(); + MachineBasicBlock *BBLoop = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBExit = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(It, BBLoop); + F->insert(It, BBExit); + + // Transfer the remainder of BB and its successor edges to BBExit. + BBExit->splice(BBExit->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + BBExit->transferSuccessorsAndUpdatePHIs(BB); + + BB->addSuccessor(BBLoop); + BBLoop->addSuccessor(BBLoop); + BBLoop->addSuccessor(BBExit); + + MachineOperand &Res = MI.getOperand(0); + MachineOperand &AtomValAddr = MI.getOperand(1); + MachineOperand &SwpVal = MI.getOperand(2); + + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + + BuildMI(*BB, MI, DL, TII.get(Xtensa::MEMW)); + + unsigned AtomVal = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), AtomVal) + .addReg(AtomValAddr.getReg()) + .addImm(0); + + unsigned AtomValLoop = MRI.createVirtualRegister(RC); + + BuildMI(*BBLoop, BBLoop->begin(), DL, TII.get(Xtensa::PHI), Res.getReg()) + .addReg(AtomValLoop) + .addMBB(BBLoop) + .addReg(AtomVal) + .addMBB(BB); + + BB = BBLoop; + + BuildMI(BB, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1).addReg(Res.getReg()); + + BuildMI(BB, DL, TII.get(Xtensa::S32C1I), AtomValLoop) + .addReg(SwpVal.getReg()) + .addReg(AtomValAddr.getReg()) + .addImm(0); + + BuildMI(BB, DL, TII.get(Xtensa::BNE)) + .addReg(AtomValLoop) + .addReg(Res.getReg()) + .addMBB(BBLoop); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + return BB; +} + MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *MBB) const { const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); @@ -2128,6 +2391,18 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( return MBB; } + case Xtensa::ATOMIC_SWAP_8_P: { + return emitAtomicSwap(MI, MBB, 1); + } + + case Xtensa::ATOMIC_SWAP_16_P: { + return emitAtomicSwap(MI, MBB, 0); + } + + case Xtensa::ATOMIC_SWAP_32_P: { + return emitAtomicSwap(MI, MBB); + } + case Xtensa::S8I: case Xtensa::S16I: case Xtensa::S32I: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index b3cb79ff1e0f8..95baefcc487f6 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -226,8 +226,12 @@ class XtensaTargetLowering : public TargetLowering { MachineBasicBlock *emitSelectCC(MachineInstr &MI, MachineBasicBlock *BB) const; + MachineBasicBlock *emitAtomicSwap(MachineInstr &MI, MachineBasicBlock *BB, + int isByteOperand) const; MachineBasicBlock *emitAtomicCmpSwap(MachineInstr &MI, MachineBasicBlock *BB, int isByteOperand) const; + MachineBasicBlock *emitAtomicSwap(MachineInstr &MI, + MachineBasicBlock *BB) const; InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override { diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index dfaea649b0ead..42c81c0e8539b 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1313,6 +1313,16 @@ let usesCustomInserter = 1, Predicates = [HasS32C1I] in { def ATOMIC_CMP_SWAP_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$cmp, AR:$swap), "!atomic_cmp_swap_32_p, $dst, $ptr, $cmp, $swap", [(set AR:$dst, (atomic_cmp_swap_i32 AR:$ptr, AR:$cmp, AR:$swap))]>; + + def ATOMIC_SWAP_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$swap), + "!atomic_swap_8_p, $dst, $ptr, $swap", + [(set AR:$dst, (atomic_swap_i8 AR:$ptr, AR:$swap))]>; + def ATOMIC_SWAP_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$swap), + "!atomic_swap_16_p, $dst, $ptr, $swap", + [(set AR:$dst, (atomic_swap_i16 AR:$ptr, AR:$swap))]>; + def ATOMIC_SWAP_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$swap), + "!atomic_swap_32_p, $dst, $ptr, $swap", + [(set AR:$dst, (atomic_swap_i32 AR:$ptr, AR:$swap))]>; } //===----------------------------------------------------------------------===// From c35f9b85daffb2ae431cb1bfdb45cd0a901e2ae6 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 20:09:38 +0300 Subject: [PATCH 042/289] [Xtensa] Lower atomic operations. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 419 ++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 6 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 100 +++++ 3 files changed, 525 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 582b009b52255..270e28cf4a5c9 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -306,10 +306,27 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, if (isTypeLegal(VT)) { setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand); setOperationAction(ISD::ATOMIC_SWAP, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand); } } } + if (Subtarget.hasS32C1I()) { + setMaxAtomicSizeInBitsSupported(32); + setMinCmpXchgSizeInBits(32); + } else { + setMaxAtomicSizeInBitsSupported(0); + } + // Compute derived properties from the register classes computeRegisterProperties(STI.getRegisterInfo()); @@ -2321,6 +2338,345 @@ XtensaTargetLowering::emitAtomicSwap(MachineInstr &MI, return BB; } +MachineBasicBlock *XtensaTargetLowering::emitAtomicRMW(MachineInstr &MI, + MachineBasicBlock *BB, + unsigned Opcode, + bool inv, + bool minmax) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + MachineBasicBlock *ThisBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *BBLoop = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBExit = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(It, BBLoop); + F->insert(It, BBExit); + + // Transfer the remainder of BB and its successor edges to BB2. + BBExit->splice(BBExit->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + BBExit->transferSuccessorsAndUpdatePHIs(BB); + + BB->addSuccessor(BBLoop); + + MachineOperand &Res = MI.getOperand(0); + MachineOperand &AtomicValAddr = MI.getOperand(1); + MachineOperand &Val = MI.getOperand(2); + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), R1).add(AtomicValAddr).addImm(0); + + BB = BBLoop; + + unsigned AtomicValPhi = MRI.createVirtualRegister(RC); + unsigned AtomicValLoop = MRI.createVirtualRegister(RC); + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), AtomicValPhi) + .addReg(AtomicValLoop) + .addMBB(BBLoop) + .addReg(R1) + .addMBB(ThisBB); + + unsigned R2 = MRI.createVirtualRegister(RC); + + if (minmax) { + MachineBasicBlock *BBLoop1 = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, BBLoop1); + BB->addSuccessor(BBLoop1); + MachineBasicBlock *BBLoop2 = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, BBLoop2); + BB->addSuccessor(BBLoop2); + + BuildMI(BB, DL, TII.get(Opcode)) + .addReg(AtomicValPhi) + .addReg(Val.getReg()) + .addMBB(BBLoop1); + + unsigned R7 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), R7).addReg(Val.getReg()); + + BB = BBLoop1; + unsigned R8 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), R8).addReg(AtomicValPhi); + BB->addSuccessor(BBLoop2); + + BB = BBLoop2; + unsigned R9 = MRI.createVirtualRegister(RC); + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), R9) + .addReg(R7) + .addMBB(BBLoop) + .addReg(R8) + .addMBB(BBLoop1); + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), R2).addReg(R9); + } else { + BuildMI(BB, DL, TII.get(Opcode), R2) + .addReg(AtomicValPhi) + .addReg(Val.getReg()); + if (inv) { + unsigned Rtmp1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), Rtmp1).addImm(-1); + unsigned Rtmp2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::XOR), Rtmp2) + .addReg(R2) + .addReg(Rtmp1); + R2 = Rtmp2; + } + } + + unsigned R4 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1).addReg(AtomicValPhi); + BuildMI(BB, DL, TII.get(Xtensa::S32C1I), R4) + .addReg(R2) + .addReg(AtomicValAddr.getReg()) + .addImm(0); + + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), AtomicValLoop).addReg(R4); + + BuildMI(BB, DL, TII.get(Xtensa::BNE)) + .addReg(AtomicValPhi) + .addReg(R4) + .addMBB(BBLoop); + + BB->addSuccessor(BBLoop); + BB->addSuccessor(BBExit); + + BB = BBExit; + auto st = BBExit->begin(); + + BuildMI(*BB, st, DL, TII.get(Xtensa::MOV_N), Res.getReg()).addReg(R4); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + + return BB; +} + +MachineBasicBlock * +XtensaTargetLowering::emitAtomicRMW(MachineInstr &MI, MachineBasicBlock *BB, + bool isByteOperand, unsigned Opcode, + bool inv, bool minmax) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + MachineBasicBlock *ThisBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *BBLoop = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *BBExit = F->CreateMachineBasicBlock(LLVM_BB); + + F->insert(It, BBLoop); + F->insert(It, BBExit); + + // Transfer the remainder of BB and its successor edges to BB2. + BBExit->splice(BBExit->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + BBExit->transferSuccessorsAndUpdatePHIs(BB); + + BB->addSuccessor(BBLoop); + + MachineOperand &Res = MI.getOperand(0); + MachineOperand &AtomValAddr = MI.getOperand(1); + MachineOperand &Val = MI.getOperand(2); + + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), R1).addImm(3); + + unsigned ByteOffs = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::AND), ByteOffs) + .addReg(R1) + .addReg(AtomValAddr.getReg()); + + unsigned AddrAlign = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SUB), AddrAlign) + .addReg(AtomValAddr.getReg()) + .addReg(ByteOffs); + + unsigned BitOffs = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLLI), BitOffs) + .addReg(ByteOffs) + .addImm(3); + + unsigned Mask1 = MRI.createVirtualRegister(RC); + if (isByteOperand) { + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), Mask1).addImm(0xff); + } else { + unsigned R2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), R2).addImm(1); + unsigned R3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLLI), R3).addReg(R2).addImm(16); + BuildMI(*BB, MI, DL, TII.get(Xtensa::ADDI), Mask1).addReg(R3).addImm(-1); + } + + BuildMI(*BB, MI, DL, TII.get(Xtensa::SSL)).addReg(BitOffs); + + unsigned R2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::MOVI), R2).addImm(-1); + + unsigned Mask2 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), Mask2).addReg(Mask1); + + unsigned Mask3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::XOR), Mask3).addReg(Mask2).addReg(R2); + + unsigned R3 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), R3).addReg(AddrAlign).addImm(0); + + unsigned Val1 = MRI.createVirtualRegister(RC); + BuildMI(*BB, MI, DL, TII.get(Xtensa::SLL), Val1).addReg(Val.getReg()); + + BB = BBLoop; + + unsigned AtomicValPhi = MRI.createVirtualRegister(RC); + unsigned AtomicValLoop = MRI.createVirtualRegister(RC); + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), AtomicValPhi) + .addReg(AtomicValLoop) + .addMBB(BBLoop) + .addReg(R3) + .addMBB(ThisBB); + + unsigned Swp2; + + if (minmax) { + MachineBasicBlock *BBLoop1 = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, BBLoop1); + BB->addSuccessor(BBLoop1); + MachineBasicBlock *BBLoop2 = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, BBLoop2); + BB->addSuccessor(BBLoop2); + + unsigned R1 = MRI.createVirtualRegister(RC); + unsigned R2 = MRI.createVirtualRegister(RC); + unsigned R3 = MRI.createVirtualRegister(RC); + unsigned R4 = MRI.createVirtualRegister(RC); + + unsigned R5 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), R5) + .addReg(AtomicValPhi) + .addReg(Mask2); + + BuildMI(BB, DL, TII.get(Xtensa::SRL), R1).addReg(R5); + BuildMI(BB, DL, TII.get(Xtensa::SRL), R2).addReg(Val1); + + if ((Opcode == Xtensa::BLT) || (Opcode == Xtensa::BGE)) { + if (isByteOperand) { + BuildMI(BB, DL, TII.get(Xtensa::SEXT), R3).addReg(R1).addImm(7); + BuildMI(BB, DL, TII.get(Xtensa::SEXT), R4).addReg(R2).addImm(7); + } else { + BuildMI(BB, DL, TII.get(Xtensa::SEXT), R3).addReg(R1).addImm(15); + BuildMI(BB, DL, TII.get(Xtensa::SEXT), R4).addReg(R2).addImm(15); + } + } else { + R3 = R1; + R4 = R2; + } + + BuildMI(BB, DL, TII.get(Opcode)).addReg(R3).addReg(R4).addMBB(BBLoop1); + + unsigned R7 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), R7).addReg(Val1); + + BB = BBLoop1; + unsigned R8 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), R8).addReg(AtomicValPhi); + BB->addSuccessor(BBLoop2); + + BB = BBLoop2; + unsigned R9 = MRI.createVirtualRegister(RC); + + BuildMI(*BB, BB->begin(), DL, TII.get(Xtensa::PHI), R9) + .addReg(R7) + .addMBB(BBLoop) + .addReg(R8) + .addMBB(BBLoop1); + + unsigned R10 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), R10) + .addReg(AtomicValPhi) + .addReg(Mask3); + + unsigned R11 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), R11).addReg(R9).addReg(Mask2); + + Swp2 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::OR), Swp2).addReg(R10).addReg(R11); + } else { + unsigned R4 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), R4) + .addReg(AtomicValPhi) + .addReg(Mask2); + + unsigned Res1 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Opcode), Res1).addReg(R4).addReg(Val1); + + unsigned Swp1 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), Swp1).addReg(Res1).addReg(Mask2); + + unsigned R5 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::AND), R5) + .addReg(AtomicValPhi) + .addReg(Mask3); + + if (inv) { + unsigned Rtmp1 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::XOR), Rtmp1) + .addReg(AtomicValPhi) + .addReg(Mask2); + R5 = Rtmp1; + } + + Swp2 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::OR), Swp2).addReg(Swp1).addReg(R5); + } + + unsigned Swp3 = MRI.createVirtualRegister(RC); + BuildMI(BB, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1).addReg(AtomicValPhi); + BuildMI(BB, DL, TII.get(Xtensa::S32C1I), Swp3) + .addReg(Swp2) + .addReg(AddrAlign) + .addImm(0); + + BuildMI(BB, DL, TII.get(Xtensa::MOV_N), AtomicValLoop).addReg(Swp3); + + BuildMI(BB, DL, TII.get(Xtensa::BNE)) + .addReg(Swp3) + .addReg(AtomicValPhi) + .addMBB(BBLoop); + + BB->addSuccessor(BBLoop); + BB->addSuccessor(BBExit); + BB = BBExit; + auto St = BBExit->begin(); + + unsigned R6 = MRI.createVirtualRegister(RC); + + BuildMI(*BB, St, DL, TII.get(Xtensa::SSR)).addReg(BitOffs); + + BuildMI(*BB, St, DL, TII.get(Xtensa::SRL), R6).addReg(AtomicValLoop); + + BuildMI(*BB, St, DL, TII.get(Xtensa::AND), Res.getReg()) + .addReg(R6) + .addReg(Mask1); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + + return BB; +} + MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *MBB) const { const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); @@ -2403,6 +2759,69 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( return emitAtomicSwap(MI, MBB); } + case Xtensa::ATOMIC_LOAD_ADD_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::ADD, false, false); + case Xtensa::ATOMIC_LOAD_SUB_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::SUB, false, false); + case Xtensa::ATOMIC_LOAD_OR_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::OR, false, false); + case Xtensa::ATOMIC_LOAD_XOR_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::XOR, false, false); + case Xtensa::ATOMIC_LOAD_AND_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::AND, false, false); + case Xtensa::ATOMIC_LOAD_NAND_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::AND, true, false); + case Xtensa::ATOMIC_LOAD_MIN_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::BGE, false, true); + case Xtensa::ATOMIC_LOAD_MAX_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::BLT, false, true); + case Xtensa::ATOMIC_LOAD_UMIN_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::BGEU, false, true); + case Xtensa::ATOMIC_LOAD_UMAX_8_P: + return emitAtomicRMW(MI, MBB, true, Xtensa::BLTU, false, true); + + case Xtensa::ATOMIC_LOAD_ADD_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::ADD, false, false); + case Xtensa::ATOMIC_LOAD_SUB_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::SUB, false, false); + case Xtensa::ATOMIC_LOAD_OR_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::OR, false, false); + case Xtensa::ATOMIC_LOAD_XOR_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::XOR, false, false); + case Xtensa::ATOMIC_LOAD_AND_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::AND, false, false); + case Xtensa::ATOMIC_LOAD_NAND_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::AND, true, false); + case Xtensa::ATOMIC_LOAD_MIN_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::BGE, false, true); + case Xtensa::ATOMIC_LOAD_MAX_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::BLT, false, true); + case Xtensa::ATOMIC_LOAD_UMIN_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::BGEU, false, true); + case Xtensa::ATOMIC_LOAD_UMAX_16_P: + return emitAtomicRMW(MI, MBB, false, Xtensa::BLTU, false, true); + + case Xtensa::ATOMIC_LOAD_ADD_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::ADD, false, false); + case Xtensa::ATOMIC_LOAD_SUB_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::SUB, false, false); + case Xtensa::ATOMIC_LOAD_OR_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::OR, false, false); + case Xtensa::ATOMIC_LOAD_XOR_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::XOR, false, false); + case Xtensa::ATOMIC_LOAD_AND_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::AND, false, false); + case Xtensa::ATOMIC_LOAD_NAND_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::AND, true, false); + case Xtensa::ATOMIC_LOAD_MIN_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::BGE, false, true); + case Xtensa::ATOMIC_LOAD_MAX_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::BLT, false, true); + case Xtensa::ATOMIC_LOAD_UMIN_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::BGEU, false, true); + case Xtensa::ATOMIC_LOAD_UMAX_32_P: + return emitAtomicRMW(MI, MBB, Xtensa::BLTU, false, true); + case Xtensa::S8I: case Xtensa::S16I: case Xtensa::S32I: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 95baefcc487f6..7c0edc91960b3 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -232,6 +232,12 @@ class XtensaTargetLowering : public TargetLowering { int isByteOperand) const; MachineBasicBlock *emitAtomicSwap(MachineInstr &MI, MachineBasicBlock *BB) const; + MachineBasicBlock *emitAtomicRMW(MachineInstr &MI, MachineBasicBlock *BB, + bool isByteOperand, unsigned Opcode, + bool inv, bool minmax) const; + MachineBasicBlock *emitAtomicRMW(MachineInstr &MI, MachineBasicBlock *BB, + unsigned Opcode, bool inv, + bool minmax) const; InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override { diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 42c81c0e8539b..c0546bb41b423 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1323,6 +1323,106 @@ let usesCustomInserter = 1, Predicates = [HasS32C1I] in { def ATOMIC_SWAP_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$swap), "!atomic_swap_32_p, $dst, $ptr, $swap", [(set AR:$dst, (atomic_swap_i32 AR:$ptr, AR:$swap))]>; + + def ATOMIC_LOAD_ADD_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_add_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_add_i8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_ADD_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_add_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_add_i16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_ADD_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_add_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_add_i32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_SUB_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_sub_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_sub_i8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_SUB_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_sub_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_sub_i16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_SUB_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_sub_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_sub_i32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_AND_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_and_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_and_i8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_AND_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_and_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_and_i16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_AND_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_and_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_and_i32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_OR_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_or_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_or_i8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_OR_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_or_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_or_i16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_OR_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_or_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_or_i32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_XOR_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_xor_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_xor_i8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_XOR_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_xor_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_xor_i16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_XOR_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_xor_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_xor_i32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_NAND_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_nand_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_nand_i8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_NAND_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_nand_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_nand_i16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_NAND_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_nand_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_nand_i32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_MIN_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_min_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_min_i8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_MIN_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_min_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_min_i16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_MIN_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_min_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_min_i32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_MAX_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_max_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_max_i8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_MAX_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_max_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_max_i16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_MAX_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_max_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_max_i32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_UMIN_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umin_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umin_i8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_UMIN_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umin_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umin_i16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_UMIN_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umin_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umin_i32 AR:$ptr, AR:$arg))]>; + + def ATOMIC_LOAD_UMAX_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umax_8_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umax_i8 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_UMAX_16_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umax_16_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umax_i16 AR:$ptr, AR:$arg))]>; + def ATOMIC_LOAD_UMAX_32_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$arg), + "!atomic_load_umax_32_p, $dst, $ptr, $arg", + [(set AR:$dst, (atomic_load_umax_i32 AR:$ptr, AR:$arg))]>; } //===----------------------------------------------------------------------===// From 6b3b35d1a969860df9a13a5424568c51b11d6d65 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 20:22:29 +0300 Subject: [PATCH 043/289] [Xtensa] Implement Xtensa toolchain. --- clang/lib/Driver/CMakeLists.txt | 1 + clang/lib/Driver/Driver.cpp | 4 + clang/lib/Driver/ToolChains/Xtensa.cpp | 266 +++++++++++++++++++++++++ clang/lib/Driver/ToolChains/Xtensa.h | 94 +++++++++ 4 files changed, 365 insertions(+) create mode 100644 clang/lib/Driver/ToolChains/Xtensa.cpp create mode 100644 clang/lib/Driver/ToolChains/Xtensa.h diff --git a/clang/lib/Driver/CMakeLists.txt b/clang/lib/Driver/CMakeLists.txt index 32a4378ab499f..421e07798a920 100644 --- a/clang/lib/Driver/CMakeLists.txt +++ b/clang/lib/Driver/CMakeLists.txt @@ -81,6 +81,7 @@ add_clang_library(clangDriver ToolChains/VEToolchain.cpp ToolChains/WebAssembly.cpp ToolChains/XCore.cpp + ToolChains/Xtensa.cpp ToolChains/PPCLinux.cpp ToolChains/PPCFreeBSD.cpp ToolChains/InterfaceStubs.cpp diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 8e44d5afa40e0..3e2f02b27bf84 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -48,6 +48,7 @@ #include "ToolChains/VEToolchain.h" #include "ToolChains/WebAssembly.h" #include "ToolChains/XCore.h" +#include "ToolChains/Xtensa.h" #include "ToolChains/ZOS.h" #include "clang/Basic/DiagnosticDriver.h" #include "clang/Basic/TargetID.h" @@ -6507,6 +6508,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args, case llvm::Triple::csky: TC = std::make_unique(*this, Target, Args); break; + case llvm::Triple::xtensa: + TC = std::make_unique(*this, Target, Args); + break; default: if (toolchains::BareMetal::handlesTarget(Target)) TC = std::make_unique(*this, Target, Args); diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp new file mode 100644 index 0000000000000..4c77a7e3a3979 --- /dev/null +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -0,0 +1,266 @@ +//===--- Xtensa.cpp - Xtensa ToolChain Implementations ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "CommonArgs.h" +#include "clang/Driver/InputInfo.h" +#include "clang/Basic/Cuda.h" +#include "clang/Config/config.h" +#include "clang/Driver/Compilation.h" +#include "clang/Driver/Distro.h" +#include "clang/Driver/Driver.h" +#include "clang/Driver/DriverDiagnostic.h" +#include "clang/Driver/Options.h" +#include "llvm/Option/ArgList.h" +#include "llvm/Support/Path.h" +#include "llvm/Support/VirtualFileSystem.h" +#include + +using namespace clang::driver; +using namespace clang::driver::tools; +using namespace clang::driver::toolchains; +using namespace clang; +using namespace llvm::opt; + +XtensaGCCToolchainDetector::XtensaGCCToolchainDetector( + const Driver &D, const llvm::Triple &HostTriple, + const llvm::opt::ArgList &Args) { + std::string InstalledDir; + InstalledDir = D.Dir; + StringRef CPUName = XtensaToolChain::GetTargetCPUVersion(Args); + std::string Dir; + std::string ToolchainName; + std::string ToolchainDir; + + if (CPUName == "esp32") + ToolchainName = "xtensa-esp32-elf"; + else if (CPUName == "esp32-s2") + ToolchainName = "xtensa-esp32s2-elf"; + else if (CPUName == "esp8266") + ToolchainName = "xtensa-lx106-elf"; + + Slash = llvm::sys::path::get_separator().str(); + + ToolchainDir = InstalledDir + Slash + ".."; + Dir = ToolchainDir + Slash + "lib" + Slash + "gcc" + Slash + ToolchainName + + Slash; + GCCLibAndIncVersion = ""; + + if (D.getVFS().exists(Dir)) { + std::error_code EC; + for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(Dir, EC), LE; + !EC && LI != LE; LI = LI.increment(EC)) { + StringRef VersionText = llvm::sys::path::filename(LI->path()); + auto GCCVersion = Generic_GCC::GCCVersion::Parse(VersionText); + if (GCCVersion.Major == -1) + continue; + GCCLibAndIncVersion = GCCVersion.Text; + } + if (GCCLibAndIncVersion == "") + llvm_unreachable("Unexpected Xtensa GCC toolchain version"); + + } else { + // Unable to find Xtensa GCC toolchain; + GCCToolchainName = ""; + return; + } + GCCToolchainDir = ToolchainDir; + GCCToolchainName = ToolchainName; +} + +/// Xtensa Toolchain +XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, + const ArgList &Args) + : Generic_ELF(D, Triple, Args), XtensaGCCToolchain(D, getTriple(), Args) { + for (auto *A : Args) { + std::string Str = A->getAsString(Args); + if (!Str.compare("-mlongcalls")) + A->claim(); + if (!Str.compare("-fno-tree-switch-conversion")) + A->claim(); + + // Currently don't use integrated assembler for assembler input files + if ((IsIntegratedAsm) && (Str.length() > 2)) { + std::string ExtSubStr = Str.substr(Str.length() - 2); + if (!ExtSubStr.compare(".s")) + IsIntegratedAsm = false; + if (!ExtSubStr.compare(".S")) + IsIntegratedAsm = false; + } + } + + // Currently don't use integrated assembler for assembler input files + if (IsIntegratedAsm) { + if (Args.getLastArgValue(options::OPT_x) == "assembler") + IsIntegratedAsm = false; + + if (Args.getLastArgValue(options::OPT_x) == "assembler-with-cpp") + IsIntegratedAsm = false; + } + + const std::string Slash = XtensaGCCToolchain.Slash; + std::string Libs = + XtensaGCCToolchain.GCCToolchainDir + Slash + "lib" + Slash + "gcc" + + Slash + XtensaGCCToolchain.GCCToolchainName + Slash + + XtensaGCCToolchain.GCCLibAndIncVersion; + getFilePaths().push_back(Libs); + + Libs = XtensaGCCToolchain.GCCToolchainDir + Slash + + XtensaGCCToolchain.GCCToolchainName + Slash + "lib"; + getFilePaths().push_back(Libs); +} + +Tool *XtensaToolChain::buildLinker() const { + return new tools::Xtensa::Linker(*this); +} + +Tool *XtensaToolChain::buildAssembler() const { + return new tools::Xtensa::Assembler(*this); +} + +void XtensaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc) || + DriverArgs.hasArg(options::OPT_nostdlibinc)) + return; + + if (!XtensaGCCToolchain.IsValid()) + return; + + std::string Slash = XtensaGCCToolchain.Slash; + + std::string Path1 = getDriver().ResourceDir.c_str() + Slash + "include"; + std::string Path2 = XtensaGCCToolchain.GCCToolchainDir + Slash + + XtensaGCCToolchain.GCCToolchainName + Slash + + "sys-include"; + std::string Path3 = XtensaGCCToolchain.GCCToolchainDir + Slash + + XtensaGCCToolchain.GCCToolchainName + Slash + "include"; + + const StringRef Paths[] = {Path1, Path2, Path3}; + addSystemIncludes(DriverArgs, CC1Args, Paths); +} + +void XtensaToolChain::addLibStdCxxIncludePaths( + const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const { + if (!XtensaGCCToolchain.IsValid()) + return; + + std::string Slash = XtensaGCCToolchain.Slash; + + std::string BaseDir = XtensaGCCToolchain.GCCToolchainDir + Slash + + XtensaGCCToolchain.GCCToolchainName + Slash + + "include" + Slash + "c++" + Slash + + XtensaGCCToolchain.GCCLibAndIncVersion; + std::string TargetDir = BaseDir + Slash + XtensaGCCToolchain.GCCToolchainName; + addLibStdCXXIncludePaths(BaseDir, "", "", DriverArgs, CC1Args); + addLibStdCXXIncludePaths(TargetDir, "", "", DriverArgs, CC1Args); + TargetDir = BaseDir + Slash + "backward"; + addLibStdCXXIncludePaths(TargetDir, "", "", DriverArgs, CC1Args); +} + +ToolChain::CXXStdlibType +XtensaToolChain::GetCXXStdlibType(const ArgList &Args) const { + Arg *A = Args.getLastArg(options::OPT_stdlib_EQ); + if (!A) + return ToolChain::CST_Libstdcxx; + + StringRef Value = A->getValue(); + if (Value != "libstdc++") + getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args); + + return ToolChain::CST_Libstdcxx; +} + +const StringRef XtensaToolChain::GetTargetCPUVersion(const ArgList &Args) { + if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) { + StringRef CPUName = A->getValue(); + return CPUName; + } + return "esp32"; +} + +void tools::Xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + const auto &TC = + static_cast(getToolChain()); + + if (!TC.XtensaGCCToolchain.IsValid()) + llvm_unreachable("Unable to find Xtensa GCC assembler"); + + claimNoWarnArgs(Args); + ArgStringList CmdArgs; + + CmdArgs.push_back("-o"); + CmdArgs.push_back(Output.getFilename()); + + CmdArgs.push_back("-c"); + + if (Args.hasArg(options::OPT_v)) + CmdArgs.push_back("-v"); + + if (Arg *A = Args.getLastArg(options::OPT_g_Group)) + if (!A->getOption().matches(options::OPT_g0)) + CmdArgs.push_back("-g"); + + if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm, + false)) + CmdArgs.push_back("-fverbose-asm"); + + Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); + + for (const auto &II : Inputs) + CmdArgs.push_back(II.getFilename()); + + std::string Slash = TC.XtensaGCCToolchain.Slash; + + const char *Asm = + Args.MakeArgString(getToolChain().getDriver().Dir + Slash + + TC.XtensaGCCToolchain.GCCToolchainName + "-as"); + C.addCommand(std::make_unique( + JA, *this, ResponseFileSupport::AtFileCurCP(), Asm, CmdArgs, Inputs)); +} + +void Xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + const auto &TC = + static_cast(getToolChain()); + std::string Slash = TC.XtensaGCCToolchain.Slash; + + if (!TC.XtensaGCCToolchain.IsValid()) + llvm_unreachable("Unable to find Xtensa GCC linker"); + + std::string Linker = getToolChain().getDriver().Dir + Slash + + TC.XtensaGCCToolchain.GCCToolchainName + "-ld"; + ArgStringList CmdArgs; + + Args.AddAllArgs(CmdArgs, options::OPT_L); + TC.AddFilePathLibArgs(Args, CmdArgs); + + Args.addAllArgs(CmdArgs, + {options::OPT_T_Group, options::OPT_e, options::OPT_s, + options::OPT_t, options::OPT_u_Group}); + + AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA); + + CmdArgs.push_back("-lgcc"); + + CmdArgs.push_back("-o"); + CmdArgs.push_back(Output.getFilename()); + C.addCommand( + std::make_unique(JA, *this, ResponseFileSupport::AtFileCurCP(), + Args.MakeArgString(Linker), CmdArgs, Inputs)); +} diff --git a/clang/lib/Driver/ToolChains/Xtensa.h b/clang/lib/Driver/ToolChains/Xtensa.h new file mode 100644 index 0000000000000..663dc63f6d279 --- /dev/null +++ b/clang/lib/Driver/ToolChains/Xtensa.h @@ -0,0 +1,94 @@ +//===--- Xtensa.h - Xtensa Tool and ToolChain Implementations ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Xtensa_H +#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Xtensa_H + +#include "Gnu.h" +#include "clang/Driver/InputInfo.h" +#include "clang/Driver/Tool.h" +#include "clang/Driver/ToolChain.h" + +namespace clang { +namespace driver { +namespace toolchains { + +class XtensaGCCToolchainDetector { +public: + std::string GCCLibAndIncVersion; + std::string GCCToolchainName; + std::string GCCToolchainDir; + std::string Slash; + + XtensaGCCToolchainDetector(const Driver &D, const llvm::Triple &HostTriple, + const llvm::opt::ArgList &Args); + + bool IsValid() const { return GCCToolchainName != ""; } +}; + +class LLVM_LIBRARY_VISIBILITY XtensaToolChain : public Generic_ELF { +protected: + Tool *buildLinker() const override; + Tool *buildAssembler() const override; + +public: + XtensaToolChain(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args); + void + AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; + void + addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; + CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override; + bool IsIntegratedAssemblerDefault() const override { + return (IsIntegratedAsm || (XtensaGCCToolchain.GCCToolchainName == "")); + } + + static const StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args); + + XtensaGCCToolchainDetector XtensaGCCToolchain; + bool IsIntegratedAsm = true; +}; + +} // end namespace toolchains + +namespace tools { +namespace Xtensa { +class LLVM_LIBRARY_VISIBILITY Linker : public Tool { +public: + Linker(const ToolChain &TC) + : Tool("Xtensa::Linker", "xtensa-esp32-elf-ld", TC) {} + bool hasIntegratedCPP() const override { return false; } + bool isLinkJob() const override { return true; } + void ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, const InputInfoList &Inputs, + const llvm::opt::ArgList &TCArgs, + const char *LinkingOutput) const override; +}; + +class LLVM_LIBRARY_VISIBILITY Assembler : public Tool { +public: + Assembler(const ToolChain &TC) + : Tool("Xtensa::Assembler", "xtensa-esp32-elf-as", TC) {} + + bool hasIntegratedCPP() const override { return false; } + void ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, const InputInfoList &Inputs, + const llvm::opt::ArgList &TCArgs, + const char *LinkingOutput) const override; +}; + +} // end namespace Xtensa +} // end namespace tools +} // end namespace driver +} // end namespace clang + +#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Xtensa_H From c66d6ab8440882fd61603977d33d1e98183e210d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 20:23:53 +0300 Subject: [PATCH 044/289] [Xtensa] Implement multilib support Use Multilib class functionality to choose between library variants, based on the command line args. --- clang/lib/Driver/ToolChains/Xtensa.cpp | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 4c77a7e3a3979..930449424f1aa 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -17,6 +17,7 @@ #include "clang/Driver/Distro.h" #include "clang/Driver/Driver.h" #include "clang/Driver/DriverDiagnostic.h" +#include "clang/Driver/MultilibBuilder.h" #include "clang/Driver/Options.h" #include "llvm/Option/ArgList.h" #include "llvm/Support/Path.h" @@ -29,6 +30,8 @@ using namespace clang::driver::toolchains; using namespace clang; using namespace llvm::opt; +using tools::addMultilibFlag; + XtensaGCCToolchainDetector::XtensaGCCToolchainDetector( const Driver &D, const llvm::Triple &HostTriple, const llvm::opt::ArgList &Args) { @@ -105,15 +108,30 @@ XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, IsIntegratedAsm = false; } + Multilibs.push_back(Multilib()); + + Multilibs.push_back(MultilibBuilder("no-rtti", {}, {}) + .flag("-frtti", /*Disallow=*/true) + .flag("-fno-rtti") + .makeMultilib()); + + Multilib::flags_list Flags; + addMultilibFlag( + Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, false), "frtti", + Flags); + + Multilibs.select(Flags, SelectedMultilibs); + const std::string Slash = XtensaGCCToolchain.Slash; std::string Libs = XtensaGCCToolchain.GCCToolchainDir + Slash + "lib" + Slash + "gcc" + Slash + XtensaGCCToolchain.GCCToolchainName + Slash + - XtensaGCCToolchain.GCCLibAndIncVersion; + XtensaGCCToolchain.GCCLibAndIncVersion + SelectedMultilibs.back().gccSuffix(); getFilePaths().push_back(Libs); Libs = XtensaGCCToolchain.GCCToolchainDir + Slash + - XtensaGCCToolchain.GCCToolchainName + Slash + "lib"; + XtensaGCCToolchain.GCCToolchainName + Slash + "lib" + + SelectedMultilibs.back().gccSuffix(); getFilePaths().push_back(Libs); } From 9878a964afdce227db976b46bab79a3f689992eb Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 22:12:22 +0300 Subject: [PATCH 045/289] [Xtensa] Implemented builtins base functionality for Xtensa. --- clang/include/clang/Sema/Sema.h | 7 +++++++ clang/include/clang/Sema/SemaXtensa.h | 30 +++++++++++++++++++++++++++ clang/lib/Sema/CMakeLists.txt | 1 + clang/lib/Sema/Sema.cpp | 2 ++ clang/lib/Sema/SemaChecking.cpp | 3 +++ clang/lib/Sema/SemaXtensa.cpp | 27 ++++++++++++++++++++++++ 6 files changed, 70 insertions(+) create mode 100644 clang/include/clang/Sema/SemaXtensa.h create mode 100644 clang/lib/Sema/SemaXtensa.cpp diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index 7bfdaaae45a93..1270b47a1bbad 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -195,6 +195,7 @@ class SemaSwift; class SemaSystemZ; class SemaWasm; class SemaX86; +class SemaXtensa; class StandardConversionSequence; class Stmt; class StringLiteral; @@ -1261,6 +1262,11 @@ class Sema final : public SemaBase { return *X86Ptr; } + SemaXtensa &Xtensa() { + assert(XtensaPtr); + return *XtensaPtr; + } + /// Source of additional semantic information. IntrusiveRefCntPtr ExternalSource; @@ -1316,6 +1322,7 @@ class Sema final : public SemaBase { std::unique_ptr SystemZPtr; std::unique_ptr WasmPtr; std::unique_ptr X86Ptr; + std::unique_ptr XtensaPtr; ///@} diff --git a/clang/include/clang/Sema/SemaXtensa.h b/clang/include/clang/Sema/SemaXtensa.h new file mode 100644 index 0000000000000..2dccfd10fa4d2 --- /dev/null +++ b/clang/include/clang/Sema/SemaXtensa.h @@ -0,0 +1,30 @@ +//===----- SemaXtensa.h ----- Xtensa target-specific routines --*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file declares semantic analysis functions specific to Xtensa. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_SEMA_SEMAXTENSA_H +#define LLVM_CLANG_SEMA_SEMAXTENSA_H + +#include "clang/AST/Expr.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/Sema/SemaBase.h" + +namespace clang { +class SemaXtensa : public SemaBase { +public: + SemaXtensa(Sema &S); + + bool CheckXtensaBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, + CallExpr *TheCall); +}; +} // namespace clang + +#endif // LLVM_CLANG_SEMA_SEMAXTENSA_H diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt index 2cee4f5ef6e99..a25f14872e247 100644 --- a/clang/lib/Sema/CMakeLists.txt +++ b/clang/lib/Sema/CMakeLists.txt @@ -90,6 +90,7 @@ add_clang_library(clangSema SemaType.cpp SemaWasm.cpp SemaX86.cpp + SemaXtensa.cpp TypeLocBuilder.cpp DEPENDS diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp index 2e989f0ba6fe4..d6c148faaa139 100644 --- a/clang/lib/Sema/Sema.cpp +++ b/clang/lib/Sema/Sema.cpp @@ -68,6 +68,7 @@ #include "clang/Sema/SemaSystemZ.h" #include "clang/Sema/SemaWasm.h" #include "clang/Sema/SemaX86.h" +#include "clang/Sema/SemaXtensa.h" #include "clang/Sema/TemplateDeduction.h" #include "clang/Sema/TemplateInstCallback.h" #include "clang/Sema/TypoCorrection.h" @@ -247,6 +248,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, SystemZPtr(std::make_unique(*this)), WasmPtr(std::make_unique(*this)), X86Ptr(std::make_unique(*this)), + XtensaPtr(std::make_unique(*this)), MSPointerToMemberRepresentationMethod( LangOpts.getMSPointerToMemberRepresentationMethod()), MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()), diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 9088b5e285bf8..b1423eede51b5 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -77,6 +77,7 @@ #include "clang/Sema/SemaSystemZ.h" #include "clang/Sema/SemaWasm.h" #include "clang/Sema/SemaX86.h" +#include "clang/Sema/SemaXtensa.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/APSInt.h" @@ -1915,6 +1916,8 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, case llvm::Triple::nvptx: case llvm::Triple::nvptx64: return NVPTX().CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall); + case llvm::Triple::xtensa: + return Xtensa().CheckXtensaBuiltinFunctionCall(TI, BuiltinID, TheCall); } } diff --git a/clang/lib/Sema/SemaXtensa.cpp b/clang/lib/Sema/SemaXtensa.cpp new file mode 100644 index 0000000000000..46c6b511542b6 --- /dev/null +++ b/clang/lib/Sema/SemaXtensa.cpp @@ -0,0 +1,27 @@ +//===------ SemaXtensa.cpp ------- Xtensa target-specific routines --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements semantic analysis functions specific to Xtensa. +// +//===----------------------------------------------------------------------===// + +#include "clang/Sema/SemaXtensa.h" +#include "clang/Basic/TargetBuiltins.h" +#include "clang/Sema/Sema.h" + +namespace clang { + +SemaXtensa::SemaXtensa(Sema &S) : SemaBase(S) {} + +bool SemaXtensa::CheckXtensaBuiltinFunctionCall(const TargetInfo &TI, + unsigned BuiltinID, + CallExpr *TheCall) { + return false; +} + +} // namespace clang From 7734c14ca1a59a491559a25d4e507a13a0534d4a Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 23:44:46 +0300 Subject: [PATCH 046/289] [Xtensa] Implemented builtins for Xtensa MAC16 instructions. --- clang/include/clang/Basic/BuiltinsXtensa.def | 127 +++++++ clang/include/clang/Basic/TargetBuiltins.h | 10 + clang/lib/Basic/Targets/Xtensa.cpp | 11 + clang/lib/Basic/Targets/Xtensa.h | 5 +- clang/lib/Sema/SemaXtensa.cpp | 75 +++- llvm/include/llvm/IR/CMakeLists.txt | 1 + llvm/include/llvm/IR/Intrinsics.td | 1 + llvm/include/llvm/IR/IntrinsicsXtensa.td | 251 ++++++++++++++ llvm/lib/IR/Function.cpp | 1 + llvm/lib/Target/Xtensa/XtensaDSPInstrInfo.td | 175 ++++++++-- llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp | 249 ++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 262 ++++++++++++++ llvm/test/CodeGen/Xtensa/mac16_intrinsics.ll | 319 ++++++++++++++++++ .../secondary/llvm/include/llvm/IR/BUILD.gn | 5 + 14 files changed, 1459 insertions(+), 33 deletions(-) create mode 100644 clang/include/clang/Basic/BuiltinsXtensa.def create mode 100644 llvm/include/llvm/IR/IntrinsicsXtensa.td create mode 100644 llvm/test/CodeGen/Xtensa/mac16_intrinsics.ll diff --git a/clang/include/clang/Basic/BuiltinsXtensa.def b/clang/include/clang/Basic/BuiltinsXtensa.def new file mode 100644 index 0000000000000..b00c7bd112611 --- /dev/null +++ b/clang/include/clang/Basic/BuiltinsXtensa.def @@ -0,0 +1,127 @@ +//===-- BuiltinsXtensa.def - Xtensa Builtin function database ----*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the Xtensa-specific builtin function database. Users of +// this file must define the BUILTIN macro to make use of this information. +// +//===----------------------------------------------------------------------===// + +// The format of this database matches clang/Basic/Builtins.def. + +BUILTIN(__builtin_xtensa_umul_aa_ll, "vUiUi", "n") +BUILTIN(__builtin_xtensa_umul_aa_lh, "vUiUi", "n") +BUILTIN(__builtin_xtensa_umul_aa_hl, "vUiUi", "n") +BUILTIN(__builtin_xtensa_umul_aa_hh, "vUiUi", "n") + +BUILTIN(__builtin_xtensa_mul_aa_ll, "vUiUi", "n") +BUILTIN(__builtin_xtensa_mul_aa_lh, "vUiUi", "n") +BUILTIN(__builtin_xtensa_mul_aa_hl, "vUiUi", "n") +BUILTIN(__builtin_xtensa_mul_aa_hh, "vUiUi", "n") + +BUILTIN(__builtin_xtensa_mul_ad_ll, "vUiIUi", "n") +BUILTIN(__builtin_xtensa_mul_ad_lh, "vUiIUi", "n") +BUILTIN(__builtin_xtensa_mul_ad_hl, "vUiIUi", "n") +BUILTIN(__builtin_xtensa_mul_ad_hh, "vUiIUi", "n") + +BUILTIN(__builtin_xtensa_mul_da_ll, "vIUiUi", "n") +BUILTIN(__builtin_xtensa_mul_da_lh, "vIUiUi", "n") +BUILTIN(__builtin_xtensa_mul_da_hl, "vIUiUi", "n") +BUILTIN(__builtin_xtensa_mul_da_hh, "vIUiUi", "n") + +BUILTIN(__builtin_xtensa_mul_dd_ll, "vIUiIUi", "n") +BUILTIN(__builtin_xtensa_mul_dd_lh, "vIUiIUi", "n") +BUILTIN(__builtin_xtensa_mul_dd_hl, "vIUiIUi", "n") +BUILTIN(__builtin_xtensa_mul_dd_hh, "vIUiIUi", "n") + +BUILTIN(__builtin_xtensa_mula_aa_ll, "vUiUi", "n") +BUILTIN(__builtin_xtensa_mula_aa_lh, "vUiUi", "n") +BUILTIN(__builtin_xtensa_mula_aa_hl, "vUiUi", "n") +BUILTIN(__builtin_xtensa_mula_aa_hh, "vUiUi", "n") + +BUILTIN(__builtin_xtensa_mula_ad_ll, "vUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_ad_lh, "vUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_ad_hl, "vUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_ad_hh, "vUiIUi", "n") + +BUILTIN(__builtin_xtensa_mula_da_ll, "vIUiUi", "n") +BUILTIN(__builtin_xtensa_mula_da_lh, "vIUiUi", "n") +BUILTIN(__builtin_xtensa_mula_da_hl, "vIUiUi", "n") +BUILTIN(__builtin_xtensa_mula_da_hh, "vIUiUi", "n") + +BUILTIN(__builtin_xtensa_mula_dd_ll, "vIUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_dd_lh, "vIUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_dd_hl, "vIUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_dd_hh, "vIUiIUi", "n") + +BUILTIN(__builtin_xtensa_muls_aa_ll, "vUiUi", "n") +BUILTIN(__builtin_xtensa_muls_aa_lh, "vUiUi", "n") +BUILTIN(__builtin_xtensa_muls_aa_hl, "vUiUi", "n") +BUILTIN(__builtin_xtensa_muls_aa_hh, "vUiUi", "n") + +BUILTIN(__builtin_xtensa_muls_ad_ll, "vUiIUi", "n") +BUILTIN(__builtin_xtensa_muls_ad_lh, "vUiIUi", "n") +BUILTIN(__builtin_xtensa_muls_ad_hl, "vUiIUi", "n") +BUILTIN(__builtin_xtensa_muls_ad_hh, "vUiIUi", "n") + +BUILTIN(__builtin_xtensa_muls_da_ll, "vIUiUi", "n") +BUILTIN(__builtin_xtensa_muls_da_lh, "vIUiUi", "n") +BUILTIN(__builtin_xtensa_muls_da_hl, "vIUiUi", "n") +BUILTIN(__builtin_xtensa_muls_da_hh, "vIUiUi", "n") + +BUILTIN(__builtin_xtensa_muls_dd_ll, "vIUiIUi", "n") +BUILTIN(__builtin_xtensa_muls_dd_lh, "vIUiIUi", "n") +BUILTIN(__builtin_xtensa_muls_dd_hl, "vIUiIUi", "n") +BUILTIN(__builtin_xtensa_muls_dd_hh, "vIUiIUi", "n") + +BUILTIN(__builtin_xtensa_mula_da_ll_lddec, "vIUii**IUii", "n") +BUILTIN(__builtin_xtensa_mula_da_lh_lddec, "vIUii**IUii", "n") +BUILTIN(__builtin_xtensa_mula_da_hl_lddec, "vIUii**IUii", "n") +BUILTIN(__builtin_xtensa_mula_da_hh_lddec, "vIUii**IUii", "n") + +BUILTIN(__builtin_xtensa_mula_da_ll_ldinc, "vIUii**IUii", "n") +BUILTIN(__builtin_xtensa_mula_da_lh_ldinc, "vIUii**IUii", "n") +BUILTIN(__builtin_xtensa_mula_da_hl_ldinc, "vIUii**IUii", "n") +BUILTIN(__builtin_xtensa_mula_da_hh_ldinc, "vIUii**IUii", "n") + +BUILTIN(__builtin_xtensa_mula_dd_ll_lddec, "vIUii**IUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_dd_lh_lddec, "vIUii**IUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_dd_hl_lddec, "vIUii**IUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_dd_hh_lddec, "vIUii**IUiIUi", "n") + +BUILTIN(__builtin_xtensa_mula_dd_ll_ldinc, "vIUii**IUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_dd_lh_ldinc, "vIUii**IUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_dd_hl_ldinc, "vIUii**IUiIUi", "n") +BUILTIN(__builtin_xtensa_mula_dd_hh_ldinc, "vIUii**IUiIUi", "n") + +// Load operations + +BUILTIN(__builtin_xtensa_ldinc, "vIUii**", "n") +BUILTIN(__builtin_xtensa_lddec, "vIUii**", "n") + +// WSR/RSR/XSR + +BUILTIN(__builtin_xtensa_wsr_acclo, "vUi", "n") +BUILTIN(__builtin_xtensa_rsr_acclo, "Ui", "n") +BUILTIN(__builtin_xtensa_xsr_acclo, "vUi*", "n") +BUILTIN(__builtin_xtensa_wsr_acchi, "vUi", "n") +BUILTIN(__builtin_xtensa_rsr_acchi, "Ui", "n") +BUILTIN(__builtin_xtensa_xsr_acchi, "vUi*", "n") +BUILTIN(__builtin_xtensa_wsr_m0, "vUi", "n") +BUILTIN(__builtin_xtensa_rsr_m0, "Ui", "n") +BUILTIN(__builtin_xtensa_xsr_m0, "vUi*", "n") +BUILTIN(__builtin_xtensa_wsr_m1, "vUi", "n") +BUILTIN(__builtin_xtensa_rsr_m1, "Ui", "n") +BUILTIN(__builtin_xtensa_xsr_m1, "vUi*", "n") +BUILTIN(__builtin_xtensa_wsr_m2, "vUi", "n") +BUILTIN(__builtin_xtensa_rsr_m2, "Ui", "n") +BUILTIN(__builtin_xtensa_xsr_m2, "vUi*", "n") +BUILTIN(__builtin_xtensa_wsr_m3, "vUi", "n") +BUILTIN(__builtin_xtensa_rsr_m3, "Ui", "n") +BUILTIN(__builtin_xtensa_xsr_m3, "vUi*", "n") + +#undef BUILTIN diff --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h index 4333830bf34f2..e357667b0a9de 100644 --- a/clang/include/clang/Basic/TargetBuiltins.h +++ b/clang/include/clang/Basic/TargetBuiltins.h @@ -366,6 +366,16 @@ namespace clang { }; } + /// Xtensa builtins + namespace Xtensa { + enum { + LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1, +#define BUILTIN(ID, TYPE, ATTRS) BI##ID, +#include "clang/Basic/BuiltinsXtensa.def" + LastTSBuiltin + }; + } // namespace Xtensa + static constexpr uint64_t LargestBuiltinID = std::max( {ARM::LastTSBuiltin, AArch64::LastTSBuiltin, BPF::LastTSBuiltin, PPC::LastTSBuiltin, NVPTX::LastTSBuiltin, AMDGPU::LastTSBuiltin, diff --git a/clang/lib/Basic/Targets/Xtensa.cpp b/clang/lib/Basic/Targets/Xtensa.cpp index 270af0a05cfdc..6ca5cba2f6aec 100644 --- a/clang/lib/Basic/Targets/Xtensa.cpp +++ b/clang/lib/Basic/Targets/Xtensa.cpp @@ -20,6 +20,17 @@ using namespace clang; using namespace clang::targets; +static constexpr Builtin::Info BuiltinInfo[] = { +#define BUILTIN(ID, TYPE, ATTRS) \ + {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES}, +#include "clang/Basic/BuiltinsXtensa.def" +}; + +ArrayRef XtensaTargetInfo::getTargetBuiltins() const { + return llvm::ArrayRef(BuiltinInfo, + clang::Xtensa::LastTSBuiltin - Builtin::FirstTSBuiltin); +} + void XtensaTargetInfo::getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const { Builder.defineMacro("__Xtensa__"); diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index 76ec5dc6c0033..a9bf0f8cc8f58 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -29,7 +29,6 @@ namespace clang { namespace targets { class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { - static const Builtin::Info BuiltinInfo[]; std::string CPU; public: @@ -53,9 +52,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override; - ArrayRef getTargetBuiltins() const override { - return std::nullopt; - } + ArrayRef getTargetBuiltins() const override; BuiltinVaListKind getBuiltinVaListKind() const override { diff --git a/clang/lib/Sema/SemaXtensa.cpp b/clang/lib/Sema/SemaXtensa.cpp index 46c6b511542b6..b81e4381fc44f 100644 --- a/clang/lib/Sema/SemaXtensa.cpp +++ b/clang/lib/Sema/SemaXtensa.cpp @@ -21,7 +21,80 @@ SemaXtensa::SemaXtensa(Sema &S) : SemaBase(S) {} bool SemaXtensa::CheckXtensaBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall) { - return false; + unsigned i = 0, l = 0, u = 0; + + switch (BuiltinID) { + default: + return false; + case Xtensa::BI__builtin_xtensa_mul_ad_ll: + case Xtensa::BI__builtin_xtensa_mul_ad_lh: + case Xtensa::BI__builtin_xtensa_mul_ad_hl: + case Xtensa::BI__builtin_xtensa_mul_ad_hh: + case Xtensa::BI__builtin_xtensa_mula_ad_ll: + case Xtensa::BI__builtin_xtensa_mula_ad_lh: + case Xtensa::BI__builtin_xtensa_mula_ad_hl: + case Xtensa::BI__builtin_xtensa_mula_ad_hh: + case Xtensa::BI__builtin_xtensa_muls_ad_ll: + case Xtensa::BI__builtin_xtensa_muls_ad_lh: + case Xtensa::BI__builtin_xtensa_muls_ad_hl: + case Xtensa::BI__builtin_xtensa_muls_ad_hh: + i = 1; + l = 2; + u = 3; + break; + case Xtensa::BI__builtin_xtensa_mul_da_ll: + case Xtensa::BI__builtin_xtensa_mul_da_lh: + case Xtensa::BI__builtin_xtensa_mul_da_hl: + case Xtensa::BI__builtin_xtensa_mul_da_hh: + case Xtensa::BI__builtin_xtensa_mula_da_ll: + case Xtensa::BI__builtin_xtensa_mula_da_lh: + case Xtensa::BI__builtin_xtensa_mula_da_hl: + case Xtensa::BI__builtin_xtensa_mula_da_hh: + case Xtensa::BI__builtin_xtensa_muls_da_ll: + case Xtensa::BI__builtin_xtensa_muls_da_lh: + case Xtensa::BI__builtin_xtensa_muls_da_hl: + case Xtensa::BI__builtin_xtensa_muls_da_hh: + i = 0; + l = 0; + u = 1; + break; + case Xtensa::BI__builtin_xtensa_mul_dd_ll: + case Xtensa::BI__builtin_xtensa_mul_dd_lh: + case Xtensa::BI__builtin_xtensa_mul_dd_hl: + case Xtensa::BI__builtin_xtensa_mul_dd_hh: + case Xtensa::BI__builtin_xtensa_mula_dd_ll: + case Xtensa::BI__builtin_xtensa_mula_dd_lh: + case Xtensa::BI__builtin_xtensa_mula_dd_hl: + case Xtensa::BI__builtin_xtensa_mula_dd_hh: + case Xtensa::BI__builtin_xtensa_muls_dd_ll: + case Xtensa::BI__builtin_xtensa_muls_dd_lh: + case Xtensa::BI__builtin_xtensa_muls_dd_hl: + case Xtensa::BI__builtin_xtensa_muls_dd_hh: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 1) || + SemaRef.BuiltinConstantArgRange(TheCall, 1, 2, 3); + case Xtensa::BI__builtin_xtensa_mula_da_ll_lddec: + case Xtensa::BI__builtin_xtensa_mula_da_lh_lddec: + case Xtensa::BI__builtin_xtensa_mula_da_hl_lddec: + case Xtensa::BI__builtin_xtensa_mula_da_hh_lddec: + case Xtensa::BI__builtin_xtensa_mula_da_ll_ldinc: + case Xtensa::BI__builtin_xtensa_mula_da_lh_ldinc: + case Xtensa::BI__builtin_xtensa_mula_da_hl_ldinc: + case Xtensa::BI__builtin_xtensa_mula_da_hh_ldinc: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) || + SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1); + case Xtensa::BI__builtin_xtensa_mula_dd_ll_lddec: + case Xtensa::BI__builtin_xtensa_mula_dd_lh_lddec: + case Xtensa::BI__builtin_xtensa_mula_dd_hl_lddec: + case Xtensa::BI__builtin_xtensa_mula_dd_hh_lddec: + case Xtensa::BI__builtin_xtensa_mula_dd_ll_ldinc: + case Xtensa::BI__builtin_xtensa_mula_dd_lh_ldinc: + case Xtensa::BI__builtin_xtensa_mula_dd_hl_ldinc: + case Xtensa::BI__builtin_xtensa_mula_dd_hh_ldinc: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) || + SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1) || + SemaRef.BuiltinConstantArgRange(TheCall, 3, 2, 3); + } + return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u); } } // namespace clang diff --git a/llvm/include/llvm/IR/CMakeLists.txt b/llvm/include/llvm/IR/CMakeLists.txt index 468d663796ed4..83e98c8fd7c92 100644 --- a/llvm/include/llvm/IR/CMakeLists.txt +++ b/llvm/include/llvm/IR/CMakeLists.txt @@ -21,5 +21,6 @@ tablegen(LLVM IntrinsicsS390.h -gen-intrinsic-enums -intrinsic-prefix=s390) tablegen(LLVM IntrinsicsWebAssembly.h -gen-intrinsic-enums -intrinsic-prefix=wasm) tablegen(LLVM IntrinsicsX86.h -gen-intrinsic-enums -intrinsic-prefix=x86) tablegen(LLVM IntrinsicsXCore.h -gen-intrinsic-enums -intrinsic-prefix=xcore) +tablegen(LLVM IntrinsicsXtensa.h -gen-intrinsic-enums -intrinsic-prefix=xtensa) tablegen(LLVM IntrinsicsVE.h -gen-intrinsic-enums -intrinsic-prefix=ve) add_public_tablegen_target(intrinsics_gen) diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index b4e758136b39f..82d539f776e70 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -2764,5 +2764,6 @@ include "llvm/IR/IntrinsicsSPIRV.td" include "llvm/IR/IntrinsicsVE.td" include "llvm/IR/IntrinsicsDirectX.td" include "llvm/IR/IntrinsicsLoongArch.td" +include "llvm/IR/IntrinsicsXtensa.td" #endif // TEST_INTRINSICS_SUPPRESS_DEFS diff --git a/llvm/include/llvm/IR/IntrinsicsXtensa.td b/llvm/include/llvm/IR/IntrinsicsXtensa.td new file mode 100644 index 0000000000000..d7d25609b5d56 --- /dev/null +++ b/llvm/include/llvm/IR/IntrinsicsXtensa.td @@ -0,0 +1,251 @@ +//===- IntrinsicsXtensa.td - Defines Xtensa intrinsics -----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines all of the Xtensa-specific intrinsics. +// +//===----------------------------------------------------------------------===// + +let TargetPrefix = "xtensa" in { // All intrinsics start with "llvm.xtensa.". + +def int_xtensa_umul_aa_ll: ClangBuiltin<"__builtin_xtensa_umul_aa_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_umul_aa_hl: ClangBuiltin<"__builtin_xtensa_umul_aa_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_umul_aa_lh: ClangBuiltin<"__builtin_xtensa_umul_aa_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_umul_aa_hh: ClangBuiltin<"__builtin_xtensa_umul_aa_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_mul_aa_ll: ClangBuiltin<"__builtin_xtensa_mul_aa_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_mul_aa_hl: ClangBuiltin<"__builtin_xtensa_mul_aa_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_mul_aa_lh: ClangBuiltin<"__builtin_xtensa_mul_aa_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_mul_aa_hh: ClangBuiltin<"__builtin_xtensa_mul_aa_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_mul_ad_ll: ClangBuiltin<"__builtin_xtensa_mul_ad_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mul_ad_hl: ClangBuiltin<"__builtin_xtensa_mul_ad_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mul_ad_lh: ClangBuiltin<"__builtin_xtensa_mul_ad_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mul_ad_hh: ClangBuiltin<"__builtin_xtensa_mul_ad_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_mul_da_ll: ClangBuiltin<"__builtin_xtensa_mul_da_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mul_da_hl: ClangBuiltin<"__builtin_xtensa_mul_da_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mul_da_lh: ClangBuiltin<"__builtin_xtensa_mul_da_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mul_da_hh: ClangBuiltin<"__builtin_xtensa_mul_da_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_mul_dd_ll: ClangBuiltin<"__builtin_xtensa_mul_dd_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; +def int_xtensa_mul_dd_hl: ClangBuiltin<"__builtin_xtensa_mul_dd_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; +def int_xtensa_mul_dd_lh: ClangBuiltin<"__builtin_xtensa_mul_dd_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; +def int_xtensa_mul_dd_hh: ClangBuiltin<"__builtin_xtensa_mul_dd_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_mula_aa_ll: ClangBuiltin<"__builtin_xtensa_mula_aa_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_mula_aa_hl: ClangBuiltin<"__builtin_xtensa_mula_aa_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_mula_aa_lh: ClangBuiltin<"__builtin_xtensa_mula_aa_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_mula_aa_hh: ClangBuiltin<"__builtin_xtensa_mula_aa_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_mula_ad_ll: ClangBuiltin<"__builtin_xtensa_mula_ad_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mula_ad_hl: ClangBuiltin<"__builtin_xtensa_mula_ad_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mula_ad_lh: ClangBuiltin<"__builtin_xtensa_mula_ad_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mula_ad_hh: ClangBuiltin<"__builtin_xtensa_mula_ad_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_mula_da_ll: ClangBuiltin<"__builtin_xtensa_mula_da_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mula_da_hl: ClangBuiltin<"__builtin_xtensa_mula_da_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mula_da_lh: ClangBuiltin<"__builtin_xtensa_mula_da_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_mula_da_hh: ClangBuiltin<"__builtin_xtensa_mula_da_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_mula_dd_ll: ClangBuiltin<"__builtin_xtensa_mula_dd_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; +def int_xtensa_mula_dd_hl: ClangBuiltin<"__builtin_xtensa_mula_dd_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; +def int_xtensa_mula_dd_lh: ClangBuiltin<"__builtin_xtensa_mula_dd_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; +def int_xtensa_mula_dd_hh: ClangBuiltin<"__builtin_xtensa_mula_dd_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_muls_aa_ll: ClangBuiltin<"__builtin_xtensa_muls_aa_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_muls_aa_hl: ClangBuiltin<"__builtin_xtensa_muls_aa_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_muls_aa_lh: ClangBuiltin<"__builtin_xtensa_muls_aa_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; +def int_xtensa_muls_aa_hh: ClangBuiltin<"__builtin_xtensa_muls_aa_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_muls_ad_ll: ClangBuiltin<"__builtin_xtensa_muls_ad_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_muls_ad_hl: ClangBuiltin<"__builtin_xtensa_muls_ad_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_muls_ad_lh: ClangBuiltin<"__builtin_xtensa_muls_ad_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_muls_ad_hh: ClangBuiltin<"__builtin_xtensa_muls_ad_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_muls_da_ll: ClangBuiltin<"__builtin_xtensa_muls_da_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_muls_da_hl: ClangBuiltin<"__builtin_xtensa_muls_da_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_muls_da_lh: ClangBuiltin<"__builtin_xtensa_muls_da_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; +def int_xtensa_muls_da_hh: ClangBuiltin<"__builtin_xtensa_muls_da_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_muls_dd_ll: ClangBuiltin<"__builtin_xtensa_muls_dd_ll">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; +def int_xtensa_muls_dd_hl: ClangBuiltin<"__builtin_xtensa_muls_dd_hl">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; +def int_xtensa_muls_dd_lh: ClangBuiltin<"__builtin_xtensa_muls_dd_lh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; +def int_xtensa_muls_dd_hh: ClangBuiltin<"__builtin_xtensa_muls_dd_hh">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + + +def int_xtensa_mula_da_ll_lddec: ClangBuiltin<"__builtin_xtensa_mula_da_ll_lddec">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>]>; +def int_xtensa_mula_da_lh_lddec: ClangBuiltin<"__builtin_xtensa_mula_da_lh_lddec">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>]>; +def int_xtensa_mula_da_hl_lddec: ClangBuiltin<"__builtin_xtensa_mula_da_hl_lddec">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>]>; +def int_xtensa_mula_da_hh_lddec: ClangBuiltin<"__builtin_xtensa_mula_da_hh_lddec">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>]>; + +def int_xtensa_mula_da_ll_ldinc: ClangBuiltin<"__builtin_xtensa_mula_da_ll_ldinc">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>]>; +def int_xtensa_mula_da_lh_ldinc: ClangBuiltin<"__builtin_xtensa_mula_da_lh_ldinc">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>]>; +def int_xtensa_mula_da_hl_ldinc: ClangBuiltin<"__builtin_xtensa_mula_da_hl_ldinc">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>]>; +def int_xtensa_mula_da_hh_ldinc: ClangBuiltin<"__builtin_xtensa_mula_da_hh_ldinc">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>]>; + +def int_xtensa_mula_dd_ll_lddec: ClangBuiltin<"__builtin_xtensa_mula_dd_ll_lddec">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>, ImmArg>]>; +def int_xtensa_mula_dd_lh_lddec: ClangBuiltin<"__builtin_xtensa_mula_dd_lh_lddec">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>, ImmArg>]>; +def int_xtensa_mula_dd_hl_lddec: ClangBuiltin<"__builtin_xtensa_mula_dd_hl_lddec">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>, ImmArg>]>; +def int_xtensa_mula_dd_hh_lddec: ClangBuiltin<"__builtin_xtensa_mula_dd_hh_lddec">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_mula_dd_ll_ldinc: ClangBuiltin<"__builtin_xtensa_mula_dd_ll_ldinc">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>, ImmArg>]>; +def int_xtensa_mula_dd_lh_ldinc: ClangBuiltin<"__builtin_xtensa_mula_dd_lh_ldinc">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>, ImmArg>]>; +def int_xtensa_mula_dd_hl_ldinc: ClangBuiltin<"__builtin_xtensa_mula_dd_hl_ldinc">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>, ImmArg>]>; +def int_xtensa_mula_dd_hh_ldinc: ClangBuiltin<"__builtin_xtensa_mula_dd_hh_ldinc">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], + [ImmArg>, ImmArg>, ImmArg>]>; + +//===----------------------------------------------------------------------===// +// Load operations + +def int_xtensa_lddec: ClangBuiltin<"__builtin_xtensa_lddec">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], [ImmArg>]>; + +def int_xtensa_ldinc: ClangBuiltin<"__builtin_xtensa_ldinc">, + Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], [ImmArg>]>; + +//===----------------------------------------------------------------------===// +// WSR/XSR/RSR + +def int_xtensa_wsr_acclo: ClangBuiltin<"__builtin_xtensa_wsr_acclo">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_rsr_acclo: ClangBuiltin<"__builtin_xtensa_rsr_acclo">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_xsr_acclo: ClangBuiltin<"__builtin_xtensa_xsr_acclo">, + Intrinsic<[], [llvm_ptr_ty], []>; + +def int_xtensa_wsr_acchi: ClangBuiltin<"__builtin_xtensa_wsr_acchi">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_rsr_acchi: ClangBuiltin<"__builtin_xtensa_rsr_acchi">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_xsr_acchi: ClangBuiltin<"__builtin_xtensa_xsr_acchi">, + Intrinsic<[], [llvm_ptr_ty], []>; + +def int_xtensa_wsr_m0: ClangBuiltin<"__builtin_xtensa_wsr_m0">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_rsr_m0: ClangBuiltin<"__builtin_xtensa_rsr_m0">, + Intrinsic<[llvm_i32_ty]>; + +def int_xtensa_xsr_m0: ClangBuiltin<"__builtin_xtensa_xsr_m0">, + Intrinsic<[], [llvm_ptr_ty], []>; + +def int_xtensa_wsr_m1: ClangBuiltin<"__builtin_xtensa_wsr_m1">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_rsr_m1: ClangBuiltin<"__builtin_xtensa_rsr_m1">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_xsr_m1: ClangBuiltin<"__builtin_xtensa_xsr_m1">, + Intrinsic<[], [llvm_ptr_ty], []>; + +def int_xtensa_wsr_m2: ClangBuiltin<"__builtin_xtensa_wsr_m2">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_rsr_m2: ClangBuiltin<"__builtin_xtensa_rsr_m2">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_xsr_m2: ClangBuiltin<"__builtin_xtensa_xsr_m2">, + Intrinsic<[], [llvm_ptr_ty], []>; + +def int_xtensa_wsr_m3: ClangBuiltin<"__builtin_xtensa_wsr_m3">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_rsr_m3: ClangBuiltin<"__builtin_xtensa_rsr_m3">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_xsr_m3: ClangBuiltin<"__builtin_xtensa_xsr_m3">, + Intrinsic<[], [llvm_ptr_ty], []>; + +} diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp index 20871982afb06..9d3f6fea04402 100644 --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -50,6 +50,7 @@ #include "llvm/IR/IntrinsicsWebAssembly.h" #include "llvm/IR/IntrinsicsX86.h" #include "llvm/IR/IntrinsicsXCore.h" +#include "llvm/IR/IntrinsicsXtensa.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Metadata.h" diff --git a/llvm/lib/Target/Xtensa/XtensaDSPInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaDSPInstrInfo.td index d80df46320643..aec66efd8d2f2 100644 --- a/llvm/lib/Target/Xtensa/XtensaDSPInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaDSPInstrInfo.td @@ -13,29 +13,31 @@ //===----------------------------------------------------------------------===// // Multiply -class UMUL_AA oper1, string instrAsm> +class UMUL_AA oper1, string instrAsm, SDPatternOperator opNode> : RRR_Inst<0x04, oper1, 0x07, (outs), (ins AR:$s, AR:$t), - instrAsm#"\t$s, $t", []>, Requires<[HasMAC16]> { + instrAsm#"\t$s, $t", + [(opNode AR:$s, AR:$t)]>, Requires<[HasMAC16]> { let r = 0; let Defs = [M1, M2, ACCLO, ACCHI]; } -def UMUL_AA_LL : UMUL_AA<0x00, "umul.aa.ll">; -def UMUL_AA_HL : UMUL_AA<0x01, "umul.aa.hl">; -def UMUL_AA_LH : UMUL_AA<0x02, "umul.aa.lh">; -def UMUL_AA_HH : UMUL_AA<0x03, "umul.aa.hh">; +def UMUL_AA_LL : UMUL_AA<0x00, "umul.aa.ll", int_xtensa_umul_aa_ll>; +def UMUL_AA_HL : UMUL_AA<0x01, "umul.aa.hl", int_xtensa_umul_aa_hl>; +def UMUL_AA_LH : UMUL_AA<0x02, "umul.aa.lh", int_xtensa_umul_aa_lh>; +def UMUL_AA_HH : UMUL_AA<0x03, "umul.aa.hh", int_xtensa_umul_aa_hh>; -class MUL_AA oper1, string instrAsm> +class MUL_AA oper1, string instrAsm, SDPatternOperator opNode> : RRR_Inst<0x04, oper1, 0x07, (outs), (ins AR:$s, AR:$t), - instrAsm#"\t$s, $t", []>, Requires<[HasMAC16]> { + instrAsm#"\t$s, $t", + [(opNode AR:$s, AR:$t)]>, Requires<[HasMAC16]> { let r = 0; let Defs = [M1, M2, ACCLO, ACCHI]; } -def MUL_AA_LL : MUL_AA<0x04, "mul.aa.ll">; -def MUL_AA_HL : MUL_AA<0x05, "mul.aa.hl">; -def MUL_AA_LH : MUL_AA<0x06, "mul.aa.lh">; -def MUL_AA_HH : MUL_AA<0x07, "mul.aa.hh">; +def MUL_AA_LL : MUL_AA<0x04, "mul.aa.ll", int_xtensa_mul_aa_ll>; +def MUL_AA_HL : MUL_AA<0x05, "mul.aa.hl", int_xtensa_mul_aa_hl>; +def MUL_AA_LH : MUL_AA<0x06, "mul.aa.lh", int_xtensa_mul_aa_lh>; +def MUL_AA_HH : MUL_AA<0x07, "mul.aa.hh", int_xtensa_mul_aa_hh>; class MUL_AD oper1, string instrAsm> : RRR_Inst<0x04, oper1, 0x03, (outs), (ins AR:$s, MR23:$y), @@ -92,17 +94,18 @@ def MUL_DD_HL : MUL_DD<0x05, "mul.dd.hl">; def MUL_DD_LH : MUL_DD<0x06, "mul.dd.lh">; def MUL_DD_HH : MUL_DD<0x07, "mul.dd.hh">; -class MULA_AA oper1, string instrAsm> +class MULA_AA oper1, string instrAsm, SDPatternOperator opNode> : RRR_Inst<0x04, oper1, 0x07, (outs), (ins AR:$s, AR:$t), - instrAsm#"\t$s, $t", []>, Requires<[HasMAC16]> { + instrAsm#"\t$s, $t", + [(opNode AR:$s, AR:$t)]>, Requires<[HasMAC16]> { let r = 0; let Defs = [M1, M2, ACCLO, ACCHI]; } -def MULA_AA_LL : MULA_AA<0x08, "mula.aa.ll">; -def MULA_AA_HL : MULA_AA<0x09, "mula.aa.hl">; -def MULA_AA_LH : MULA_AA<0x0A, "mula.aa.lh">; -def MULA_AA_HH : MULA_AA<0x0B, "mula.aa.hh">; +def MULA_AA_LL : MULA_AA<0x08, "mula.aa.ll", int_xtensa_mula_aa_ll>; +def MULA_AA_HL : MULA_AA<0x09, "mula.aa.hl", int_xtensa_mula_aa_hl>; +def MULA_AA_LH : MULA_AA<0x0A, "mula.aa.lh", int_xtensa_mula_aa_lh>; +def MULA_AA_HH : MULA_AA<0x0B, "mula.aa.hh", int_xtensa_mula_aa_hh>; class MULA_AD oper1, string instrAsm> : RRR_Inst<0x04, oper1, 0x03, (outs), (ins AR:$s, MR23:$y), @@ -165,18 +168,19 @@ def MULA_DD_HL : MULA_DD<0x09, "mula.dd.hl">; def MULA_DD_LH : MULA_DD<0x0A, "mula.dd.lh">; def MULA_DD_HH : MULA_DD<0x0B, "mula.dd.hh">; -class MULS_AA oper1, string instrAsm> +class MULS_AA oper1, string instrAsm, SDPatternOperator opNode> : RRR_Inst<0x04, oper1, 0x07, (outs), (ins AR:$s, AR:$t), - instrAsm#"\t$s, $t", []>, Requires<[HasMAC16]> { + instrAsm#"\t$s, $t", + [(opNode AR:$s, AR:$t)]>, Requires<[HasMAC16]> { let r = 0; let Uses = [ACCLO, ACCHI]; let Defs = [M1, M2, ACCLO, ACCHI]; } -def MULS_AA_LL : MULS_AA<0x0C, "muls.aa.ll">; -def MULS_AA_HL : MULS_AA<0x0D, "muls.aa.hl">; -def MULS_AA_LH : MULS_AA<0x0E, "muls.aa.lh">; -def MULS_AA_HH : MULS_AA<0x0F, "muls.aa.hh">; +def MULS_AA_LL : MULS_AA<0x0C, "muls.aa.ll", int_xtensa_muls_aa_ll>; +def MULS_AA_HL : MULS_AA<0x0D, "muls.aa.hl", int_xtensa_muls_aa_hl>; +def MULS_AA_LH : MULS_AA<0x0E, "muls.aa.lh", int_xtensa_muls_aa_lh>; +def MULS_AA_HH : MULS_AA<0x0F, "muls.aa.hh", int_xtensa_muls_aa_hh>; class MULS_AD oper1, string instrAsm> : RRR_Inst<0x04, oper1, 0x03, (outs), (ins AR:$s, MR23:$y), @@ -262,6 +266,21 @@ def MULA_DA_HL_LDDEC : MULA_DA_LDDEC<0x09, "mula.da.hl.lddec">; def MULA_DA_LH_LDDEC : MULA_DA_LDDEC<0x0A, "mula.da.lh.lddec">; def MULA_DA_HH_LDDEC : MULA_DA_LDDEC<0x0B, "mula.da.hh.lddec">; +let usesCustomInserter = 1, Predicates = [HasMAC16] in { + def MULA_DA_LL_LDDEC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, AR:$t), + "!xtensa_mula_da_ll_lddec_p, $mw, $s, $mx, $t", + [(int_xtensa_mula_da_ll_lddec timm:$mw, AR:$s, timm:$mx, AR:$t)]>; + def MULA_DA_HL_LDDEC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, AR:$t), + "!xtensa_mula_da_hl_lddec_p, $mw, $s, $mx, $t", + [(int_xtensa_mula_da_hl_lddec timm:$mw, AR:$s, timm:$mx, AR:$t)]>; + def MULA_DA_LH_LDDEC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, AR:$t), + "!xtensa_mula_da_lh_lddec_p, $mw, $s, $mx, $t", + [(int_xtensa_mula_da_lh_lddec timm:$mw, AR:$s, timm:$mx, AR:$t)]>; + def MULA_DA_HH_LDDEC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, AR:$t), + "!xtensa_mula_da_hh_lddec_p, $mw, $s, $mx, $t", + [(int_xtensa_mula_da_hh_lddec timm:$mw, AR:$s, timm:$mx, AR:$t)]>; +} + class MULA_DA_LDINC oper1, string instrAsm> : RRR_Inst<0x04, oper1, 0x04, (outs MR:$w, AR:$d), (ins AR:$s, MR:$x, AR:$t), instrAsm#"\t $w, $s, $x, $t", []>, Requires<[HasMAC16]> { @@ -277,10 +296,25 @@ class MULA_DA_LDINC oper1, string instrAsm> let Defs = [M1, M2, ACCLO, ACCHI]; } -def MULA_DA_LL_LDINC: MULA_DA_LDINC<0x08, "mula.da.ll.ldinc">; -def MULA_DA_HL_LDINC: MULA_DA_LDINC<0x09, "mula.da.hl.ldinc">; -def MULA_DA_LH_LDINC: MULA_DA_LDINC<0x0A, "mula.da.lh.ldinc">; -def MULA_DA_HH_LDINC: MULA_DA_LDINC<0x0B, "mula.da.hh.ldinc">; +def MULA_DA_LL_LDINC : MULA_DA_LDINC<0x08, "mula.da.ll.ldinc">; +def MULA_DA_HL_LDINC : MULA_DA_LDINC<0x09, "mula.da.hl.ldinc">; +def MULA_DA_LH_LDINC : MULA_DA_LDINC<0x0A, "mula.da.lh.ldinc">; +def MULA_DA_HH_LDINC : MULA_DA_LDINC<0x0B, "mula.da.hh.ldinc">; + +let usesCustomInserter = 1, Predicates = [HasMAC16] in { + def MULA_DA_LL_LDINC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, AR:$t), + "!xtensa_mula_da_ll_ldinc_p, $mw, $s, $mx, $t", + [(int_xtensa_mula_da_ll_ldinc timm:$mw, AR:$s, timm:$mx, AR:$t)]>; + def MULA_DA_HL_LDINC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, AR:$t), + "!xtensa_mula_da_hl_ldinc_p, $mw, $s, $mx, $t", + [(int_xtensa_mula_da_hl_ldinc timm:$mw, AR:$s, timm:$mx, AR:$t)]>; + def MULA_DA_LH_LDINC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, AR:$t), + "!xtensa_mula_da_lh_ldinc_p, $mw, $s, $mx, $t", + [(int_xtensa_mula_da_lh_ldinc timm:$mw, AR:$s, timm:$mx, AR:$t)]>; + def MULA_DA_HH_LDINC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, AR:$t), + "!xtensa_mula_da_hh_ldinc_p, $mw, $s, $mx, $t", + [(int_xtensa_mula_da_hh_ldinc timm:$mw, AR:$s, timm:$mx, AR:$t)]>; +} class MULA_DD_LDDEC oper1, string instrAsm> : RRR_Inst<0x04, oper1, 0x01, (outs MR:$w, AR:$d), (ins AR:$s, MR01:$x, MR23:$y), @@ -306,6 +340,21 @@ def MULA_DD_HL_LDDEC : MULA_DD_LDDEC<0x09, "mula.dd.hl.lddec">; def MULA_DD_LH_LDDEC : MULA_DD_LDDEC<0x0A, "mula.dd.lh.lddec">; def MULA_DD_HH_LDDEC : MULA_DD_LDDEC<0x0B, "mula.dd.hh.lddec">; +let usesCustomInserter = 1, Predicates = [HasMAC16] in { + def MULA_DD_LL_LDDEC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, imm8:$my), + "!xtensa_mula_dd_ll_lddec_p, $mw, $s, $mx, $my", + [(int_xtensa_mula_dd_ll_lddec timm:$mw, AR:$s, timm:$mx, timm:$my)]>; + def MULA_DD_HL_LDDEC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, imm8:$my), + "!xtensa_mula_dd_hl_lddec_p, $mw, $s, $mx, $my", + [(int_xtensa_mula_dd_hl_lddec timm:$mw, AR:$s, timm:$mx, timm:$my)]>; + def MULA_DD_LH_LDDEC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, imm8:$my), + "!xtensa_mula_dd_lh_lddec_p, $mw, $s, $mx, $my", + [(int_xtensa_mula_dd_lh_lddec timm:$mw, AR:$s, timm:$mx, timm:$my)]>; + def MULA_DD_HH_LDDEC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, imm8:$my), + "!xtensa_mula_dd_hh_lddec_p, $mw, $s, $mx, $my", + [(int_xtensa_mula_dd_hh_lddec timm:$mw, AR:$s, timm:$mx, timm:$my)]>; +} + class MULA_DD_LDINC oper1, string instrAsm> : RRR_Inst<0x04, oper1, 0x00, (outs MR:$w, AR:$d), (ins AR:$s, MR01:$x, MR23:$y), instrAsm#"\t $w, $s, $x, $y", []>, Requires<[HasMAC16]> { @@ -330,6 +379,21 @@ def MULA_DD_HL_LDINC : MULA_DD_LDINC<0x09, "mula.dd.hl.ldinc">; def MULA_DD_LH_LDINC : MULA_DD_LDINC<0x0A, "mula.dd.lh.ldinc">; def MULA_DD_HH_LDINC : MULA_DD_LDINC<0x0B, "mula.dd.hh.ldinc">; +let usesCustomInserter = 1, Predicates = [HasMAC16] in { + def MULA_DD_LL_LDINC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, imm8:$my), + "!xtensa_mula_dd_ll_ldinc_p, $mw, $s, $mx, $my", + [(int_xtensa_mula_dd_ll_ldinc timm:$mw, AR:$s, timm:$mx, timm:$my)]>; + def MULA_DD_HL_LDINC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, imm8:$my), + "!xtensa_mula_dd_hl_ldinc_p, $mw, $s, $mx, $my", + [(int_xtensa_mula_dd_hl_ldinc timm:$mw, AR:$s, timm:$mx, timm:$my)]>; + def MULA_DD_LH_LDINC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, imm8:$my), + "!xtensa_mula_dd_lh_ldinc_p, $mw, $s, $mx, $my", + [(int_xtensa_mula_dd_lh_ldinc timm:$mw, AR:$s, timm:$mx, timm:$my)]>; + def MULA_DD_HH_LDINC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s, imm8:$mx, imm8:$my), + "!xtensa_mula_dd_hh_ldinc_p, $mw, $s, $mx, $my", + [(int_xtensa_mula_dd_hh_ldinc timm:$mw, AR:$s, timm:$mx, timm:$my)]>; +} + def LDDEC : RRR_Inst<0x04, 0x00, 0x09, (outs MR:$w, AR:$d), (ins AR:$s), "lddec\t $w, $s", []>, Requires<[HasMAC16]> { bits<2> w; @@ -351,3 +415,58 @@ def LDINC : RRR_Inst<0x04, 0x00, 0x08, (outs MR:$w, AR:$d), (ins AR:$s), let r{1-0} = w{1-0}; let t = 0; } + +let usesCustomInserter = 1, Predicates = [HasMAC16] in { + def LDDEC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s), + "!xtensa_lddec_p, $mw, $s", + [(int_xtensa_lddec timm:$mw, AR:$s)]>; + def LDINC_P : Pseudo<(outs), (ins imm8:$mw, AR:$s), + "!xtensa_ldinc_p, $mw, $s", + [(int_xtensa_ldinc timm:$mw, AR:$s)]>; +} + +def : Pat<(i32 (int_xtensa_rsr_acclo)), (RSR ACCLO)>; +def : Pat<(i32 (int_xtensa_rsr_acchi)), (RSR ACCHI)>; +def : Pat<(i32 (int_xtensa_rsr_m0)), (RSR M0)>; +def : Pat<(i32 (int_xtensa_rsr_m1)), (RSR M1)>; +def : Pat<(i32 (int_xtensa_rsr_m2)), (RSR M2)>; +def : Pat<(i32 (int_xtensa_rsr_m3)), (RSR M3)>; + +let usesCustomInserter = 1, Predicates = [HasMAC16] in { + def XSR_ACCLO_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_xsr_acclo_p, $s", + [(int_xtensa_xsr_acclo AR:$s)]>; + def XSR_ACCHI_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_xsr_acchi_p, $s", + [(int_xtensa_xsr_acchi AR:$s)]>; + def XSR_M0_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_xsr_m0_p, $s", + [(int_xtensa_xsr_m0 AR:$s)]>; + def XSR_M1_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_xsr_m1_p, $s", + [(int_xtensa_xsr_m1 AR:$s)]>; + def XSR_M2_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_xsr_m2_p, $s", + [(int_xtensa_xsr_m2 AR:$s)]>; + def XSR_M3_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_xsr_m3_p, $s", + [(int_xtensa_xsr_m3 AR:$s)]>; + def WSR_ACCLO_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_wsr_acclo_p, $s", + [(int_xtensa_wsr_acclo AR:$s)]>; + def WSR_ACCHI_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_wsr_acchi_p, $s", + [(int_xtensa_wsr_acchi AR:$s)]>; + def WSR_M0_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_wsr_m0_p, $s", + [(int_xtensa_wsr_m0 AR:$s)]>; + def WSR_M1_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_wsr_m1_p, $s", + [(int_xtensa_wsr_m1 AR:$s)]>; + def WSR_M2_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_wsr_m2_p, $s", + [(int_xtensa_wsr_m2 AR:$s)]>; + def WSR_M3_P : Pseudo<(outs), (ins AR:$s), + "!xtensa_wsr_m3_p, $s", + [(int_xtensa_wsr_m3 AR:$s)]>; +} diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp index 3d93a2cd1c516..e4bc8eddf3945 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -13,6 +13,7 @@ #include "Xtensa.h" #include "XtensaTargetMachine.h" #include "XtensaUtils.h" +#include "llvm/IR/IntrinsicsXtensa.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAGISel.h" @@ -136,6 +137,7 @@ FunctionPass *llvm::createXtensaISelDag(XtensaTargetMachine &TM, void XtensaDAGToDAGISel::Select(SDNode *Node) { SDLoc DL(Node); EVT VT = Node->getValueType(0); + const unsigned MRTable[] = {Xtensa::M0, Xtensa::M1, Xtensa::M2, Xtensa::M3}; // If we have a custom node, we already have selected! if (Node->isMachineOpcode()) { @@ -218,6 +220,253 @@ void XtensaDAGToDAGISel::Select(SDNode *Node) { ReplaceNode(Node, SRC); return; } + case ISD::INTRINSIC_VOID: { + unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + switch (IntNo) { + default: + break; + case Intrinsic::xtensa_mul_da_ll: + case Intrinsic::xtensa_mul_da_lh: + case Intrinsic::xtensa_mul_da_hl: + case Intrinsic::xtensa_mul_da_hh: + case Intrinsic::xtensa_mula_da_ll: + case Intrinsic::xtensa_mula_da_lh: + case Intrinsic::xtensa_mula_da_hl: + case Intrinsic::xtensa_mula_da_hh: + case Intrinsic::xtensa_muls_da_ll: + case Intrinsic::xtensa_muls_da_lh: + case Intrinsic::xtensa_muls_da_hl: + case Intrinsic::xtensa_muls_da_hh: { + SDValue ChainIn = Node->getOperand(0); + SDValue ValueMX = Node->getOperand(2); + SDValue ValueT = Node->getOperand(3); + unsigned OpCode; + + switch (IntNo) { + case Intrinsic::xtensa_mul_da_ll: + OpCode = Xtensa::MUL_DA_LL; + break; + case Intrinsic::xtensa_mul_da_lh: + OpCode = Xtensa::MUL_DA_LH; + break; + case Intrinsic::xtensa_mul_da_hl: + OpCode = Xtensa::MUL_DA_HL; + break; + case Intrinsic::xtensa_mul_da_hh: + OpCode = Xtensa::MUL_DA_HH; + break; + case Intrinsic::xtensa_mula_da_ll: + OpCode = Xtensa::MULA_DA_LL; + break; + case Intrinsic::xtensa_mula_da_lh: + OpCode = Xtensa::MULA_DA_LH; + break; + case Intrinsic::xtensa_mula_da_hl: + OpCode = Xtensa::MULA_DA_HL; + break; + case Intrinsic::xtensa_mula_da_hh: + OpCode = Xtensa::MULA_DA_HH; + break; + case Intrinsic::xtensa_muls_da_ll: + OpCode = Xtensa::MULS_DA_LL; + break; + case Intrinsic::xtensa_muls_da_lh: + OpCode = Xtensa::MULS_DA_LH; + break; + case Intrinsic::xtensa_muls_da_hl: + OpCode = Xtensa::MULS_DA_HL; + break; + case Intrinsic::xtensa_muls_da_hh: + OpCode = Xtensa::MULS_DA_HH; + break; + } + + uint64_t MXVal = 4; + if (ValueMX.getOpcode() == ISD::TargetConstant) { + MXVal = cast(ValueMX)->getZExtValue(); + } + + assert( + (MXVal < 2) && + "Unexpected value of mul*_da* first argument, it must be m0 or m1"); + unsigned MXReg = MRTable[MXVal]; + + const EVT MULAResTys[] = {MVT::Other}; + SmallVector MULAOps; + MULAOps.push_back(CurDAG->getRegister(MXReg, MVT::i32)); + MULAOps.push_back(ValueT); + MULAOps.push_back(ChainIn); + + SDNode *MULA = CurDAG->getMachineNode(OpCode, DL, MULAResTys, MULAOps); + ReplaceNode(Node, MULA); + return; + } + case Intrinsic::xtensa_mul_ad_ll: + case Intrinsic::xtensa_mul_ad_lh: + case Intrinsic::xtensa_mul_ad_hl: + case Intrinsic::xtensa_mul_ad_hh: + case Intrinsic::xtensa_mula_ad_ll: + case Intrinsic::xtensa_mula_ad_lh: + case Intrinsic::xtensa_mula_ad_hl: + case Intrinsic::xtensa_mula_ad_hh: + case Intrinsic::xtensa_muls_ad_ll: + case Intrinsic::xtensa_muls_ad_lh: + case Intrinsic::xtensa_muls_ad_hl: + case Intrinsic::xtensa_muls_ad_hh: { + SDValue ChainIn = Node->getOperand(0); + SDValue ValueS = Node->getOperand(2); + SDValue ValueMY = Node->getOperand(3); + unsigned OpCode; + + switch (IntNo) { + case Intrinsic::xtensa_mul_ad_ll: + OpCode = Xtensa::MUL_AD_LL; + break; + case Intrinsic::xtensa_mul_ad_lh: + OpCode = Xtensa::MUL_AD_LH; + break; + case Intrinsic::xtensa_mul_ad_hl: + OpCode = Xtensa::MUL_AD_HL; + break; + case Intrinsic::xtensa_mul_ad_hh: + OpCode = Xtensa::MUL_AD_HH; + break; + case Intrinsic::xtensa_mula_ad_ll: + OpCode = Xtensa::MULA_AD_LL; + break; + case Intrinsic::xtensa_mula_ad_lh: + OpCode = Xtensa::MULA_AD_LH; + break; + case Intrinsic::xtensa_mula_ad_hl: + OpCode = Xtensa::MULA_AD_HL; + break; + case Intrinsic::xtensa_mula_ad_hh: + OpCode = Xtensa::MULA_AD_HH; + break; + case Intrinsic::xtensa_muls_ad_ll: + OpCode = Xtensa::MULS_AD_LL; + break; + case Intrinsic::xtensa_muls_ad_lh: + OpCode = Xtensa::MULS_AD_LH; + break; + case Intrinsic::xtensa_muls_ad_hl: + OpCode = Xtensa::MULS_AD_HL; + break; + case Intrinsic::xtensa_muls_ad_hh: + OpCode = Xtensa::MULS_AD_HH; + break; + } + + uint64_t MYVal = 4; + if (ValueMY.getOpcode() == ISD::TargetConstant) { + MYVal = cast(ValueMY)->getZExtValue(); + } + + assert( + ((MYVal > 1) && (MYVal < 4)) && + "Unexpected value of mul*_ad* second argument, it must be m2 or m3"); + unsigned MYReg = MRTable[MYVal]; + + const EVT MULAResTys[] = {MVT::Other}; + SmallVector MULAOps; + MULAOps.push_back(ValueS); + MULAOps.push_back(CurDAG->getRegister(MYReg, MVT::i32)); + MULAOps.push_back(ChainIn); + + SDNode *MULA = CurDAG->getMachineNode(OpCode, DL, MULAResTys, MULAOps); + ReplaceNode(Node, MULA); + return; + } + case Intrinsic::xtensa_mul_dd_ll: + case Intrinsic::xtensa_mul_dd_lh: + case Intrinsic::xtensa_mul_dd_hl: + case Intrinsic::xtensa_mul_dd_hh: + case Intrinsic::xtensa_mula_dd_ll: + case Intrinsic::xtensa_mula_dd_lh: + case Intrinsic::xtensa_mula_dd_hl: + case Intrinsic::xtensa_mula_dd_hh: + case Intrinsic::xtensa_muls_dd_ll: + case Intrinsic::xtensa_muls_dd_lh: + case Intrinsic::xtensa_muls_dd_hl: + case Intrinsic::xtensa_muls_dd_hh: { + SDValue ChainIn = Node->getOperand(0); + SDValue ValueMX = Node->getOperand(2); + SDValue ValueMY = Node->getOperand(3); + unsigned OpCode; + + switch (IntNo) { + case Intrinsic::xtensa_mul_dd_ll: + OpCode = Xtensa::MUL_DD_LL; + break; + case Intrinsic::xtensa_mul_dd_lh: + OpCode = Xtensa::MUL_DD_LH; + break; + case Intrinsic::xtensa_mul_dd_hl: + OpCode = Xtensa::MUL_DD_HL; + break; + case Intrinsic::xtensa_mul_dd_hh: + OpCode = Xtensa::MUL_DD_HH; + break; + case Intrinsic::xtensa_mula_dd_ll: + OpCode = Xtensa::MULA_DD_LL; + break; + case Intrinsic::xtensa_mula_dd_lh: + OpCode = Xtensa::MULA_DD_LH; + break; + case Intrinsic::xtensa_mula_dd_hl: + OpCode = Xtensa::MULA_DD_HL; + break; + case Intrinsic::xtensa_mula_dd_hh: + OpCode = Xtensa::MULA_DD_HH; + break; + case Intrinsic::xtensa_muls_dd_ll: + OpCode = Xtensa::MULS_DD_LL; + break; + case Intrinsic::xtensa_muls_dd_lh: + OpCode = Xtensa::MULS_DD_LH; + break; + case Intrinsic::xtensa_muls_dd_hl: + OpCode = Xtensa::MULS_DD_HL; + break; + case Intrinsic::xtensa_muls_dd_hh: + OpCode = Xtensa::MULS_DD_HH; + break; + } + uint64_t MXVal = 4; + if (ValueMX.getOpcode() == ISD::TargetConstant) { + MXVal = cast(ValueMX)->getZExtValue(); + } + + assert( + (MXVal < 2) && + "Unexpected value of mul*_dd* first argument, it must be m0 or m1"); + unsigned MXReg = MRTable[MXVal]; + + uint64_t MYVal = 4; + if (ValueMY.getOpcode() == ISD::TargetConstant) { + MYVal = cast(ValueMY)->getZExtValue(); + } + + assert( + ((MYVal > 1) && (MYVal < 4)) && + "Unexpected value of mul*_dd* second argument, it must be m2 or m3"); + unsigned MYReg = MRTable[MYVal]; + + const EVT MULAResTys[] = {MVT::Other}; + SmallVector MULAOps; + MULAOps.push_back(CurDAG->getRegister(MXReg, MVT::i32)); + MULAOps.push_back(CurDAG->getRegister(MYReg, MVT::i32)); + MULAOps.push_back(ChainIn); + + SDNode *MULA = CurDAG->getMachineNode(OpCode, DL, MULAResTys, MULAOps); + ReplaceNode(Node, MULA); + return; + } + } + break; + } + default: + break; } SelectCode(Node); diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 270e28cf4a5c9..9d784682adef5 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -2685,6 +2685,268 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( DebugLoc DL = MI.getDebugLoc(); switch (MI.getOpcode()) { + case Xtensa::MULA_DA_LL_LDDEC_P: + case Xtensa::MULA_DA_LH_LDDEC_P: + case Xtensa::MULA_DA_HL_LDDEC_P: + case Xtensa::MULA_DA_HH_LDDEC_P: + case Xtensa::MULA_DA_LL_LDINC_P: + case Xtensa::MULA_DA_LH_LDINC_P: + case Xtensa::MULA_DA_HL_LDINC_P: + case Xtensa::MULA_DA_HH_LDINC_P: { + MachineOperand &MW = MI.getOperand(0); + MachineOperand &S = MI.getOperand(1); + MachineOperand &MX = MI.getOperand(2); + MachineOperand &T = MI.getOperand(3); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned Reg1 = MRI.createVirtualRegister(RC); + unsigned Reg2 = MRI.createVirtualRegister(RC); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::L32I), Reg1) + .addReg(S.getReg()) + .addImm(0); + + unsigned Opc; + switch (MI.getOpcode()) { + case Xtensa::MULA_DA_LL_LDDEC_P: + Opc = Xtensa::MULA_DA_LL_LDDEC; + break; + case Xtensa::MULA_DA_LH_LDDEC_P: + Opc = Xtensa::MULA_DA_LH_LDDEC; + break; + case Xtensa::MULA_DA_HL_LDDEC_P: + Opc = Xtensa::MULA_DA_HL_LDDEC; + break; + case Xtensa::MULA_DA_HH_LDDEC_P: + Opc = Xtensa::MULA_DA_HH_LDDEC; + break; + case Xtensa::MULA_DA_LL_LDINC_P: + Opc = Xtensa::MULA_DA_LL_LDINC; + break; + case Xtensa::MULA_DA_LH_LDINC_P: + Opc = Xtensa::MULA_DA_LH_LDINC; + break; + case Xtensa::MULA_DA_HL_LDINC_P: + Opc = Xtensa::MULA_DA_HL_LDINC; + break; + case Xtensa::MULA_DA_HH_LDINC_P: + Opc = Xtensa::MULA_DA_HH_LDINC; + break; + } + + unsigned MWVal = MW.getImm(); + assert((MWVal < 4) && "Unexpected value of mula_da*ld* first argument, it " + "must be from m0..m3"); + unsigned MXVal = MX.getImm(); + assert((MXVal < 2) && "Unexpected value of mula_da*ld* third " + "argument, it must be m0 or m1"); + + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::M0 + MWVal, RegState::Define) + .addReg(Reg2, RegState::Define) + .addReg(Reg1) + .addReg(Xtensa::M0 + MXVal) + .addReg(T.getReg()); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::S32I)) + .addReg(Reg2) + .addReg(S.getReg()) + .addImm(0); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::MULA_DD_LL_LDDEC_P: + case Xtensa::MULA_DD_LH_LDDEC_P: + case Xtensa::MULA_DD_HL_LDDEC_P: + case Xtensa::MULA_DD_HH_LDDEC_P: + case Xtensa::MULA_DD_LL_LDINC_P: + case Xtensa::MULA_DD_LH_LDINC_P: + case Xtensa::MULA_DD_HL_LDINC_P: + case Xtensa::MULA_DD_HH_LDINC_P: { + MachineOperand &MW = MI.getOperand(0); + MachineOperand &S = MI.getOperand(1); + MachineOperand &MX = MI.getOperand(2); + MachineOperand &MY = MI.getOperand(3); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned Reg1 = MRI.createVirtualRegister(RC); + unsigned Reg2 = MRI.createVirtualRegister(RC); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::L32I), Reg1) + .addReg(S.getReg()) + .addImm(0); + + unsigned Opc; + switch (MI.getOpcode()) { + case Xtensa::MULA_DD_LL_LDDEC_P: + Opc = Xtensa::MULA_DD_LL_LDDEC; + break; + case Xtensa::MULA_DD_LH_LDDEC_P: + Opc = Xtensa::MULA_DD_LH_LDDEC; + break; + case Xtensa::MULA_DD_HL_LDDEC_P: + Opc = Xtensa::MULA_DD_HL_LDDEC; + break; + case Xtensa::MULA_DD_HH_LDDEC_P: + Opc = Xtensa::MULA_DD_HH_LDDEC; + break; + case Xtensa::MULA_DD_LL_LDINC_P: + Opc = Xtensa::MULA_DD_LL_LDINC; + break; + case Xtensa::MULA_DD_LH_LDINC_P: + Opc = Xtensa::MULA_DD_LH_LDINC; + break; + case Xtensa::MULA_DD_HL_LDINC_P: + Opc = Xtensa::MULA_DD_HL_LDINC; + break; + case Xtensa::MULA_DD_HH_LDINC_P: + Opc = Xtensa::MULA_DD_HH_LDINC; + break; + } + + unsigned MWVal = MW.getImm(); + assert((MWVal < 4) && "Unexpected value of mula_dd*ld* first argument, " + "it must be from m0..m3"); + unsigned MXVal = MX.getImm(); + assert((MXVal < 2) && "Unexpected value of mula_dd*ld* third " + "argument, it must be m0 or m1"); + unsigned MYVal = MY.getImm(); + assert(((MYVal > 1) && (MYVal < 4)) && + "Unexpected value of mula_dd*ld* fourth " + "argument, it must be m2 or m3"); + + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::M0 + MWVal, RegState::Define) + .addReg(Reg2, RegState::Define) + .addReg(Reg1) + .addReg(Xtensa::M0 + MXVal) + .addReg(Xtensa::M0 + MYVal); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::S32I)) + .addReg(Reg2) + .addReg(S.getReg()) + .addImm(0); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::XSR_ACCLO_P: + case Xtensa::XSR_ACCHI_P: + case Xtensa::XSR_M0_P: + case Xtensa::XSR_M1_P: + case Xtensa::XSR_M2_P: + case Xtensa::XSR_M3_P: { + MachineOperand &T = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned Reg1 = MRI.createVirtualRegister(RC); + unsigned Reg2 = MRI.createVirtualRegister(RC); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::L32I), Reg1) + .addReg(T.getReg()) + .addImm(0); + + unsigned SReg; + switch (MI.getOpcode()) { + case Xtensa::XSR_ACCLO_P: + SReg = Xtensa::ACCLO; + break; + case Xtensa::XSR_ACCHI_P: + SReg = Xtensa::ACCHI; + break; + case Xtensa::XSR_M0_P: + SReg = Xtensa::M0; + break; + case Xtensa::XSR_M1_P: + SReg = Xtensa::M1; + break; + case Xtensa::XSR_M2_P: + SReg = Xtensa::M2; + break; + case Xtensa::XSR_M3_P: + SReg = Xtensa::M3; + break; + } + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::XSR)) + .addReg(Reg2, RegState::Define) + .addReg(SReg, RegState::Define) + .addReg(Reg1) + .addReg(SReg); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::S32I)) + .addReg(Reg2) + .addReg(T.getReg()) + .addImm(0); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::WSR_ACCLO_P: + case Xtensa::WSR_ACCHI_P: + case Xtensa::WSR_M0_P: + case Xtensa::WSR_M1_P: + case Xtensa::WSR_M2_P: + case Xtensa::WSR_M3_P: { + MachineOperand &T = MI.getOperand(0); + + unsigned SReg; + switch (MI.getOpcode()) { + case Xtensa::WSR_ACCLO_P: + SReg = Xtensa::ACCLO; + break; + case Xtensa::WSR_ACCHI_P: + SReg = Xtensa::ACCHI; + break; + case Xtensa::WSR_M0_P: + SReg = Xtensa::M0; + break; + case Xtensa::WSR_M1_P: + SReg = Xtensa::M1; + break; + case Xtensa::WSR_M2_P: + SReg = Xtensa::M2; + break; + case Xtensa::WSR_M3_P: + SReg = Xtensa::M3; + break; + } + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::WSR)) + .addReg(SReg, RegState::Define) + .addReg(T.getReg()); + MI.eraseFromParent(); + return MBB; + } + case Xtensa::LDDEC_P: + case Xtensa::LDINC_P: { + MachineOperand &MW = MI.getOperand(0); + MachineOperand &S = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned Reg1 = MRI.createVirtualRegister(RC); + unsigned Reg2 = MRI.createVirtualRegister(RC); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::L32I), Reg1) + .addReg(S.getReg()) + .addImm(0); + + unsigned Opc = Xtensa::LDDEC; + + if (MI.getOpcode() == Xtensa::LDINC_P) + Opc = Xtensa::LDINC; + + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::M0 + MW.getImm(), RegState::Define) + .addReg(Reg2, RegState::Define) + .addReg(Reg1); + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::S32I)) + .addReg(Reg2) + .addReg(S.getReg()) + .addImm(0); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::SELECT_CC_FP_FP: case Xtensa::SELECT_CC_FP_INT: case Xtensa::SELECT_CC_INT_FP: diff --git a/llvm/test/CodeGen/Xtensa/mac16_intrinsics.ll b/llvm/test/CodeGen/Xtensa/mac16_intrinsics.ll new file mode 100644 index 0000000000000..fd58c76872aff --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/mac16_intrinsics.ll @@ -0,0 +1,319 @@ +; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define void @test_xtensa_umul(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: test_xtensa_umul +; CHECK: umul.aa.ll a2, a3 + call void @llvm.xtensa.umul.aa.ll(i32 %a, i32 %b) +; CHECK: umul.aa.lh a2, a3 + call void @llvm.xtensa.umul.aa.lh(i32 %a, i32 %b) +; CHECK: umul.aa.hl a2, a3 + call void @llvm.xtensa.umul.aa.hl(i32 %a, i32 %b) +; CHECK: umul.aa.hh a2, a3 + call void @llvm.xtensa.umul.aa.hh(i32 %a, i32 %b) + ret void +} + +define void @test_xtensa_mul(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: test_xtensa_mul +; CHECK: mul.aa.ll a2, a3 + call void @llvm.xtensa.mul.aa.ll(i32 %a, i32 %b) +; CHECK: mul.aa.lh a2, a3 + call void @llvm.xtensa.mul.aa.lh(i32 %a, i32 %b) +; CHECK: mul.aa.hl a2, a3 + call void @llvm.xtensa.mul.aa.hl(i32 %a, i32 %b) +; CHECK: mul.aa.hh a2, a3 + call void @llvm.xtensa.mul.aa.hh(i32 %a, i32 %b) +; CHECK: mul.ad.ll a2, m2 + call void @llvm.xtensa.mul.ad.ll(i32 %a, i32 2) +; CHECK: mul.ad.lh a2, m2 + call void @llvm.xtensa.mul.ad.lh(i32 %a, i32 2) +; CHECK: mul.ad.hl a2, m2 + call void @llvm.xtensa.mul.ad.hl(i32 %a, i32 2) +; CHECK: mul.ad.hh a2, m2 + call void @llvm.xtensa.mul.ad.hh(i32 %a, i32 2) +; CHECK: mul.da.ll m1, a3 + call void @llvm.xtensa.mul.da.ll(i32 1, i32 %b) +; CHECK: mul.da.lh m1, a3 + call void @llvm.xtensa.mul.da.lh(i32 1, i32 %b) +; CHECK: mul.da.hl m1, a3 + call void @llvm.xtensa.mul.da.hl(i32 1, i32 %b) +; CHECK: mul.da.hh m1, a3 + call void @llvm.xtensa.mul.da.hh(i32 1, i32 %b) +; CHECK: mul.dd.ll m1, m2 + call void @llvm.xtensa.mul.dd.ll(i32 1, i32 2) +; CHECK: mul.dd.lh m1, m2 + call void @llvm.xtensa.mul.dd.lh(i32 1, i32 2) +; CHECK: mul.dd.hl m1, m2 + call void @llvm.xtensa.mul.dd.hl(i32 1, i32 2) +; CHECK: mul.dd.hh m1, m2 + call void @llvm.xtensa.mul.dd.hh(i32 1, i32 2) + ret void +} + +define void @test_xtensa_mula(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: test_xtensa_mula +; CHECK: mula.aa.ll a2, a3 + call void @llvm.xtensa.mula.aa.ll(i32 %a, i32 %b) +; CHECK: mula.aa.lh a2, a3 + call void @llvm.xtensa.mula.aa.lh(i32 %a, i32 %b) +; CHECK: mula.aa.hl a2, a3 + call void @llvm.xtensa.mula.aa.hl(i32 %a, i32 %b) +; CHECK: mula.aa.hh a2, a3 + call void @llvm.xtensa.mula.aa.hh(i32 %a, i32 %b) +; CHECK: mula.ad.ll a2, m2 + call void @llvm.xtensa.mula.ad.ll(i32 %a, i32 2) +; CHECK: mula.ad.lh a2, m2 + call void @llvm.xtensa.mula.ad.lh(i32 %a, i32 2) +; CHECK: mula.ad.hl a2, m2 + call void @llvm.xtensa.mula.ad.hl(i32 %a, i32 2) +; CHECK: mula.ad.hh a2, m2 + call void @llvm.xtensa.mula.ad.hh(i32 %a, i32 2) +; CHECK: mula.da.ll m1, a3 + call void @llvm.xtensa.mula.da.ll(i32 1, i32 %b) +; CHECK: mula.da.lh m1, a3 + call void @llvm.xtensa.mula.da.lh(i32 1, i32 %b) +; CHECK: mula.da.hl m1, a3 + call void @llvm.xtensa.mula.da.hl(i32 1, i32 %b) +; CHECK: mula.da.hh m1, a3 + call void @llvm.xtensa.mula.da.hh(i32 1, i32 %b) +; CHECK: mula.dd.ll m1, m2 + call void @llvm.xtensa.mula.dd.ll(i32 1, i32 2) +; CHECK: mula.dd.lh m1, m2 + call void @llvm.xtensa.mula.dd.lh(i32 1, i32 2) +; CHECK: mula.dd.hl m1, m2 + call void @llvm.xtensa.mula.dd.hl(i32 1, i32 2) +; CHECK: mula.dd.hh m1, m2 + call void @llvm.xtensa.mula.dd.hh(i32 1, i32 2) + ret void +} + +define void @test_xtensa_muls(i32 %a, i32 %b) nounwind { +; CHECK-LABEL: test_xtensa_muls +; CHECK: muls.aa.ll a2, a3 + call void @llvm.xtensa.muls.aa.ll(i32 %a, i32 %b) +; CHECK: muls.aa.lh a2, a3 + call void @llvm.xtensa.muls.aa.lh(i32 %a, i32 %b) +; CHECK: muls.aa.hl a2, a3 + call void @llvm.xtensa.muls.aa.hl(i32 %a, i32 %b) +; CHECK: muls.aa.hh a2, a3 + call void @llvm.xtensa.muls.aa.hh(i32 %a, i32 %b) +; CHECK: muls.ad.ll a2, m2 + call void @llvm.xtensa.muls.ad.ll(i32 %a, i32 2) +; CHECK: muls.ad.lh a2, m2 + call void @llvm.xtensa.muls.ad.lh(i32 %a, i32 2) +; CHECK: muls.ad.hl a2, m2 + call void @llvm.xtensa.muls.ad.hl(i32 %a, i32 2) +; CHECK: muls.ad.hh a2, m2 + call void @llvm.xtensa.muls.ad.hh(i32 %a, i32 2) +; CHECK: muls.da.ll m1, a3 + call void @llvm.xtensa.muls.da.ll(i32 1, i32 %b) +; CHECK: muls.da.lh m1, a3 + call void @llvm.xtensa.muls.da.lh(i32 1, i32 %b) +; CHECK: muls.da.hl m1, a3 + call void @llvm.xtensa.muls.da.hl(i32 1, i32 %b) +; CHECK: muls.da.hh m1, a3 + call void @llvm.xtensa.muls.da.hh(i32 1, i32 %b) +; CHECK: muls.dd.ll m1, m2 + call void @llvm.xtensa.muls.dd.ll(i32 1, i32 2) +; CHECK: muls.dd.lh m1, m2 + call void @llvm.xtensa.muls.dd.lh(i32 1, i32 2) +; CHECK: muls.dd.hl m1, m2 + call void @llvm.xtensa.muls.dd.hl(i32 1, i32 2) +; CHECK: muls.dd.hh m1, m2 + call void @llvm.xtensa.muls.dd.hh(i32 1, i32 2) + ret void +} + +define void @test_xtensa_mula_ld(i32 %pa.coerce, i32 %b) nounwind { +; CHECK-LABEL: test_xtensa_mula_ld +entry: + %0 = inttoptr i32 %pa.coerce to i8* +; CHECK: mula.da.ll.lddec m1, a{{[0-9]+}}, m0, a3 + call void @llvm.xtensa.mula.da.ll.lddec(i32 1, i8* %0, i32 0, i32 %b) +; CHECK: mula.da.lh.lddec m1, a{{[0-9]+}}, m0, a3 + call void @llvm.xtensa.mula.da.lh.lddec(i32 1, i8* %0, i32 0, i32 %b) +; CHECK: mula.da.hl.lddec m1, a{{[0-9]+}}, m0, a3 + call void @llvm.xtensa.mula.da.hl.lddec(i32 1, i8* %0, i32 0, i32 %b) +; CHECK: mula.da.hh.lddec m1, a{{[0-9]+}}, m0, a3 + call void @llvm.xtensa.mula.da.hh.lddec(i32 1, i8* %0, i32 0, i32 %b) +; CHECK: mula.dd.ll.lddec m1, a{{[0-9]+}}, m0, m2 + call void @llvm.xtensa.mula.dd.ll.lddec(i32 1, i8* %0, i32 0, i32 2) +; CHECK: mula.dd.lh.lddec m1, a{{[0-9]+}}, m0, m2 + call void @llvm.xtensa.mula.dd.lh.lddec(i32 1, i8* %0, i32 0, i32 2) +; CHECK: mula.dd.hl.lddec m1, a{{[0-9]+}}, m0, m2 + call void @llvm.xtensa.mula.dd.hl.lddec(i32 1, i8* %0, i32 0, i32 2) +; CHECK: mula.dd.hh.lddec m1, a{{[0-9]+}}, m0, m2 + call void @llvm.xtensa.mula.dd.hh.lddec(i32 1, i8* %0, i32 0, i32 2) +; CHECK: mula.da.ll.ldinc m1, a{{[0-9]+}}, m0, a3 + call void @llvm.xtensa.mula.da.ll.ldinc(i32 1, i8* %0, i32 0, i32 %b) +; CHECK: mula.da.lh.ldinc m1, a{{[0-9]+}}, m0, a3 + call void @llvm.xtensa.mula.da.lh.ldinc(i32 1, i8* %0, i32 0, i32 %b) +; CHECK: mula.da.hl.ldinc m1, a{{[0-9]+}}, m0, a3 + call void @llvm.xtensa.mula.da.hl.ldinc(i32 1, i8* %0, i32 0, i32 %b) +; CHECK: mula.da.hh.ldinc m1, a{{[0-9]+}}, m0, a3 + call void @llvm.xtensa.mula.da.hh.ldinc(i32 1, i8* %0, i32 0, i32 %b) +; CHECK: mula.dd.ll.ldinc m1, a{{[0-9]+}}, m0, m2 + call void @llvm.xtensa.mula.dd.ll.ldinc(i32 1, i8* %0, i32 0, i32 2) +; CHECK: mula.dd.lh.ldinc m1, a{{[0-9]+}}, m0, m2 + call void @llvm.xtensa.mula.dd.lh.ldinc(i32 1, i8* %0, i32 0, i32 2) +; CHECK: mula.dd.hl.ldinc m1, a{{[0-9]+}}, m0, m2 + call void @llvm.xtensa.mula.dd.hl.ldinc(i32 1, i8* %0, i32 0, i32 2) +; CHECK: mula.dd.hh.ldinc m1, a{{[0-9]+}}, m0, m2 + call void @llvm.xtensa.mula.dd.hh.ldinc(i32 1, i8* %0, i32 0, i32 2) + ret void +} + +define void @test_xtensa_ld(i32 %pa.coerce) nounwind { +; CHECK-LABEL: test_xtensa_ld +entry: + %0 = inttoptr i32 %pa.coerce to i8* +; CHECK: lddec m0, a{{[0-9]+}} + call void @llvm.xtensa.lddec(i32 0, i8* %0) +; CHECK: ldinc m0, a{{[0-9]+}} + call void @llvm.xtensa.ldinc(i32 0, i8* %0) + ret void +} + +define void @test_xtensa_wsr(i32 %a) { +; CHECK-LABEL: test_xtensa_wsr +; CHECK: wsr a2, acclo + call void @llvm.xtensa.wsr.acclo(i32 %a) +; CHECK: wsr a2, acchi + call void @llvm.xtensa.wsr.acchi(i32 %a) +; CHECK: wsr a2, m0 + call void @llvm.xtensa.wsr.m0(i32 %a) +; CHECK: wsr a2, m1 + call void @llvm.xtensa.wsr.m1(i32 %a) +; CHECK: wsr a2, m2 + call void @llvm.xtensa.wsr.m2(i32 %a) +; CHECK: wsr a2, m3 + call void @llvm.xtensa.wsr.m3(i32 %a) + ret void +} + +define void @test_xtensa_xsr(i32 %a.coerce) { +; CHECK-LABEL: test_xtensa_xsr +entry: + %0 = inttoptr i32 %a.coerce to i8* +; CHECK: xsr a{{[0-9]+}}, acclo + call void @llvm.xtensa.xsr.acclo(i8* %0) +; CHECK: xsr a{{[0-9]+}}, acchi + call void @llvm.xtensa.xsr.acchi(i8* %0) +; CHECK: xsr a{{[0-9]+}}, m0 + call void @llvm.xtensa.xsr.m0(i8* %0) +; CHECK: xsr a{{[0-9]+}}, m1 + call void @llvm.xtensa.xsr.m1(i8* %0) +; CHECK: xsr a{{[0-9]+}}, m2 + call void @llvm.xtensa.xsr.m2(i8* %0) +; CHECK: xsr a{{[0-9]+}}, m3 + call void @llvm.xtensa.xsr.m3(i8* %0) + ret void +} + +define void @test_xtensa_rsr() { +; CHECK-LABEL: test_xtensa_rsr +entry: +; CHECK: rsr a{{[0-9]+}}, acclo + %0 = call i32 @llvm.xtensa.rsr.acclo() +; CHECK: rsr a{{[0-9]+}}, acchi + %1 = call i32 @llvm.xtensa.rsr.acchi() +; CHECK: rsr a{{[0-9]+}}, m0 + %2 = call i32 @llvm.xtensa.rsr.m0() +; CHECK: rsr a{{[0-9]+}}, m1 + %3 = call i32 @llvm.xtensa.rsr.m1() +; CHECK: rsr a{{[0-9]+}}, m2 + %4 = call i32 @llvm.xtensa.rsr.m2() +; CHECK: rsr a{{[0-9]+}}, m3 + %5 = call i32 @llvm.xtensa.rsr.m3() + ret void +} + +declare void @llvm.xtensa.umul.aa.ll(i32, i32) nounwind +declare void @llvm.xtensa.umul.aa.lh(i32, i32) nounwind +declare void @llvm.xtensa.umul.aa.hl(i32, i32) nounwind +declare void @llvm.xtensa.umul.aa.hh(i32, i32) nounwind +declare void @llvm.xtensa.mul.aa.ll(i32, i32) nounwind +declare void @llvm.xtensa.mul.aa.lh(i32, i32) nounwind +declare void @llvm.xtensa.mul.aa.hl(i32, i32) nounwind +declare void @llvm.xtensa.mul.aa.hh(i32, i32) nounwind +declare void @llvm.xtensa.mul.ad.ll(i32, i32 immarg) nounwind +declare void @llvm.xtensa.mul.ad.lh(i32, i32 immarg) nounwind +declare void @llvm.xtensa.mul.ad.hl(i32, i32 immarg) nounwind +declare void @llvm.xtensa.mul.ad.hh(i32, i32 immarg) nounwind +declare void @llvm.xtensa.mul.da.ll(i32 immarg, i32) nounwind +declare void @llvm.xtensa.mul.da.lh(i32 immarg, i32) nounwind +declare void @llvm.xtensa.mul.da.hl(i32 immarg, i32) nounwind +declare void @llvm.xtensa.mul.da.hh(i32 immarg, i32) nounwind +declare void @llvm.xtensa.mul.dd.ll(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mul.dd.lh(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mul.dd.hl(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mul.dd.hh(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.aa.ll(i32, i32) nounwind +declare void @llvm.xtensa.mula.aa.lh(i32, i32) nounwind +declare void @llvm.xtensa.mula.aa.hl(i32, i32) nounwind +declare void @llvm.xtensa.mula.aa.hh(i32, i32) nounwind +declare void @llvm.xtensa.mula.ad.ll(i32, i32 immarg) nounwind +declare void @llvm.xtensa.mula.ad.lh(i32, i32 immarg) nounwind +declare void @llvm.xtensa.mula.ad.hl(i32, i32 immarg) nounwind +declare void @llvm.xtensa.mula.ad.hh(i32, i32 immarg) nounwind +declare void @llvm.xtensa.mula.da.ll(i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.da.lh(i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.da.hl(i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.da.hh(i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.dd.ll(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.dd.lh(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.dd.hl(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.dd.hh(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.muls.aa.ll(i32, i32) nounwind +declare void @llvm.xtensa.muls.aa.lh(i32, i32) nounwind +declare void @llvm.xtensa.muls.aa.hl(i32, i32) nounwind +declare void @llvm.xtensa.muls.aa.hh(i32, i32) nounwind +declare void @llvm.xtensa.muls.ad.ll(i32, i32 immarg) nounwind +declare void @llvm.xtensa.muls.ad.lh(i32, i32 immarg) nounwind +declare void @llvm.xtensa.muls.ad.hl(i32, i32 immarg) nounwind +declare void @llvm.xtensa.muls.ad.hh(i32, i32 immarg) nounwind +declare void @llvm.xtensa.muls.da.ll(i32 immarg, i32) nounwind +declare void @llvm.xtensa.muls.da.lh(i32 immarg, i32) nounwind +declare void @llvm.xtensa.muls.da.hl(i32 immarg, i32) nounwind +declare void @llvm.xtensa.muls.da.hh(i32 immarg, i32) nounwind +declare void @llvm.xtensa.muls.dd.ll(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.muls.dd.lh(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.muls.dd.hl(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.muls.dd.hh(i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.da.ll.lddec(i32 immarg, i8*, i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.da.lh.lddec(i32 immarg, i8*, i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.da.hl.lddec(i32 immarg, i8*, i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.da.hh.lddec(i32 immarg, i8*, i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.dd.ll.lddec(i32 immarg, i8*, i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.dd.lh.lddec(i32 immarg, i8*, i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.dd.hl.lddec(i32 immarg, i8*, i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.dd.hh.lddec(i32 immarg, i8*, i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.da.ll.ldinc(i32 immarg, i8*, i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.da.lh.ldinc(i32 immarg, i8*, i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.da.hl.ldinc(i32 immarg, i8*, i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.da.hh.ldinc(i32 immarg, i8*, i32 immarg, i32) nounwind +declare void @llvm.xtensa.mula.dd.ll.ldinc(i32 immarg, i8*, i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.dd.lh.ldinc(i32 immarg, i8*, i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.dd.hl.ldinc(i32 immarg, i8*, i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.mula.dd.hh.ldinc(i32 immarg, i8*, i32 immarg, i32 immarg) nounwind +declare void @llvm.xtensa.lddec(i32 immarg, i8*) nounwind +declare void @llvm.xtensa.ldinc(i32 immarg, i8*) nounwind +declare i32 @llvm.xtensa.rsr.acclo() nounwind +declare i32 @llvm.xtensa.rsr.acchi() nounwind +declare i32 @llvm.xtensa.rsr.m0() nounwind +declare i32 @llvm.xtensa.rsr.m1() nounwind +declare i32 @llvm.xtensa.rsr.m2() nounwind +declare i32 @llvm.xtensa.rsr.m3() nounwind +declare void @llvm.xtensa.xsr.acclo(i8*) nounwind +declare void @llvm.xtensa.xsr.acchi(i8*) nounwind +declare void @llvm.xtensa.xsr.m0(i8*) nounwind +declare void @llvm.xtensa.xsr.m1(i8*) nounwind +declare void @llvm.xtensa.xsr.m2(i8*) nounwind +declare void @llvm.xtensa.xsr.m3(i8*) nounwind +declare void @llvm.xtensa.wsr.acclo(i32) nounwind +declare void @llvm.xtensa.wsr.acchi(i32) nounwind +declare void @llvm.xtensa.wsr.m0(i32) nounwind +declare void @llvm.xtensa.wsr.m1(i32) nounwind +declare void @llvm.xtensa.wsr.m2(i32) nounwind +declare void @llvm.xtensa.wsr.m3(i32) nounwind + diff --git a/llvm/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn b/llvm/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn index 87e58608617ae..329a9e5990b5a 100644 --- a/llvm/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/include/llvm/IR/BUILD.gn @@ -105,6 +105,10 @@ gen_arch_intrinsics("IntrinsicsXCore") { intrinsic_prefix = "xcore" } +gen_arch_intrinsics("IntrinsicsXtensa") { + intrinsic_prefix = "xtensa" +} + # Groups all tablegen() calls that create .inc files that are included in # IR's public headers. //llvm/lib/IR has this as a public_dep, so targets # depending on //llvm/lib/IR don't need to depend on this. This exists @@ -137,5 +141,6 @@ group("public_tablegen") { ":IntrinsicsWebAssembly", ":IntrinsicsX86", ":IntrinsicsXCore", + ":IntrinsicsXtensa", ] } From 243ba423e6fe773a6a0f6b598a7c1272a476e6af Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 19 Aug 2024 23:58:40 +0300 Subject: [PATCH 047/289] [Xtensa] Implement lowering llvm intrinsics fshr/fshl. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 23 ++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 1 + llvm/test/CodeGen/Xtensa/funnel-shift.ll | 27 ++++++++++ llvm/test/CodeGen/Xtensa/rotl-rotr.ll | 54 +++---------------- 4 files changed, 59 insertions(+), 46 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/funnel-shift.ll diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 9d784682adef5..e80c784c963a1 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -165,6 +165,11 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); + // Funnel shifts + setOperationAction(ISD::FSHR, MVT::i32, Custom); + setOperationAction(ISD::FSHL, MVT::i32, Custom); + + // Bit Manipulation setOperationAction(ISD::BSWAP, MVT::i32, Expand); setOperationAction(ISD::ROTL, MVT::i32, Expand); setOperationAction(ISD::ROTR, MVT::i32, Expand); @@ -1682,6 +1687,21 @@ bool XtensaTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, return false; } +SDValue XtensaTargetLowering::LowerFunnelShift(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + SDValue Op0 = Op.getOperand(0); + SDValue Op1 = Op.getOperand(1); + SDValue Shamt = Op.getOperand(2); + MVT VT = Op.getSimpleValueType(); + + bool IsFSHR = Op.getOpcode() == ISD::FSHR; + assert((VT == MVT::i32) && "Unexpected funnel shift type!"); + + return DAG.getNode(IsFSHR ? XtensaISD::SRCR : XtensaISD::SRCL, DL, VT, Op0, + Op1, Shamt); +} + SDValue XtensaTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); @@ -1738,6 +1758,9 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, return LowerShiftRightParts(Op, DAG, true); case ISD::SRL_PARTS: return LowerShiftRightParts(Op, DAG, false); + case ISD::FSHL: + case ISD::FSHR: + return LowerFunnelShift(Op, DAG); default: report_fatal_error("Unexpected node to lower"); } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 7c0edc91960b3..5720e54822f6e 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -217,6 +217,7 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; + SDValue LowerFunnelShift(SDValue Op, SelectionDAG &DAG) const; SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/test/CodeGen/Xtensa/funnel-shift.ll b/llvm/test/CodeGen/Xtensa/funnel-shift.ll new file mode 100644 index 0000000000000..70ed35c1aa251 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/funnel-shift.ll @@ -0,0 +1,27 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define dso_local i32 @test_fshr(i32 %value1, i32 %value2, i32 %shift) nounwind { +; CHECK-LABEL: test_fshr: +; CHECK: entry a1, 32 +; CHECK-NEXT: ssr a4 +; CHECK-NEXT: src a2, a2, a3 +; CHECK-NEXT: retw.n +entry: + %0 = tail call i32 @llvm.fshr.i32(i32 %value1, i32 %value2, i32 %shift) + ret i32 %0 +} + +define dso_local i32 @test_fshl(i32 %value1, i32 %value2, i32 %shift) nounwind { +; CHECK-LABEL: test_fshl: +; CHECK: entry a1, 32 +; CHECK-NEXT: ssl a4 +; CHECK-NEXT: src a2, a2, a3 +; CHECK-NEXT: retw.n +entry: + %0 = tail call i32 @llvm.fshl.i32(i32 %value1, i32 %value2, i32 %shift) + ret i32 %0 +} + +declare i32 @llvm.fshr.i32(i32, i32, i32) nounwind +declare i32 @llvm.fshl.i32(i32, i32, i32) nounwind diff --git a/llvm/test/CodeGen/Xtensa/rotl-rotr.ll b/llvm/test/CodeGen/Xtensa/rotl-rotr.ll index 9b704e0ac2177..5034debfe9a02 100644 --- a/llvm/test/CodeGen/Xtensa/rotl-rotr.ll +++ b/llvm/test/CodeGen/Xtensa/rotl-rotr.ll @@ -5,12 +5,7 @@ define i32 @rotl_32(i32 %x, i32 %y) nounwind { ; XTENSA-LABEL: rotl_32: ; XTENSA: ssl a3 -; XTENSA-NEXT: sll a8, a2 -; XTENSA-NEXT: movi a9, 32 -; XTENSA-NEXT: sub a9, a9, a3 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a9, a2 -; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: src a2, a2, a2 ; XTENSA-NEXT: ret %z = sub i32 32, %y %b = shl i32 %x, %y @@ -22,12 +17,7 @@ define i32 @rotl_32(i32 %x, i32 %y) nounwind { define i32 @rotr_32(i32 %x, i32 %y) nounwind { ; XTENSA-LABEL: rotr_32: ; XTENSA: ssr a3 -; XTENSA-NEXT: srl a8, a2 -; XTENSA-NEXT: movi a9, 32 -; XTENSA-NEXT: sub a9, a9, a3 -; XTENSA-NEXT: ssl a9 -; XTENSA-NEXT: sll a9, a2 -; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: src a2, a2, a2 ; XTENSA-NEXT: ret %z = sub i32 32, %y %b = lshr i32 %x, %y @@ -131,13 +121,7 @@ define i64 @rotr_64(i64 %x, i64 %y) nounwind { define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { ; XTENSA-LABEL: rotl_32_mask: ; XTENSA: ssl a3 -; XTENSA-NEXT: sll a8, a2 -; XTENSA-NEXT: neg a9, a3 -; XTENSA-NEXT: movi a10, 31 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a9, a2 -; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: src a2, a2, a2 ; XTENSA-NEXT: ret %z = sub i32 0, %y %and = and i32 %z, 31 @@ -149,16 +133,8 @@ define i32 @rotl_32_mask(i32 %x, i32 %y) nounwind { define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; XTENSA-LABEL: rotl_32_mask_and_63_and_31: -; XTENSA: movi a8, 63 -; XTENSA-NEXT: and a8, a3, a8 -; XTENSA-NEXT: ssl a8 -; XTENSA-NEXT: sll a8, a2 -; XTENSA-NEXT: neg a9, a3 -; XTENSA-NEXT: movi a10, 31 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: ssr a9 -; XTENSA-NEXT: srl a9, a2 -; XTENSA-NEXT: or a2, a8, a9 +; XTENSA: ssl a3 +; XTENSA-NEXT: src a2, a2, a2 ; XTENSA-NEXT: ret %a = and i32 %y, 63 %b = shl i32 %x, %a @@ -172,13 +148,7 @@ define i32 @rotl_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { ; XTENSA-LABEL: rotr_32_mask: ; XTENSA: ssr a3 -; XTENSA-NEXT: srl a8, a2 -; XTENSA-NEXT: neg a9, a3 -; XTENSA-NEXT: movi a10, 31 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: ssl a9 -; XTENSA-NEXT: sll a9, a2 -; XTENSA-NEXT: or a2, a8, a9 +; XTENSA-NEXT: src a2, a2, a2 ; XTENSA-NEXT: ret %z = sub i32 0, %y %and = and i32 %z, 31 @@ -190,16 +160,8 @@ define i32 @rotr_32_mask(i32 %x, i32 %y) nounwind { define i32 @rotr_32_mask_and_63_and_31(i32 %x, i32 %y) nounwind { ; XTENSA-LABEL: rotr_32_mask_and_63_and_31: -; XTENSA: movi a8, 63 -; XTENSA-NEXT: and a8, a3, a8 -; XTENSA-NEXT: ssr a8 -; XTENSA-NEXT: srl a8, a2 -; XTENSA-NEXT: neg a9, a3 -; XTENSA-NEXT: movi a10, 31 -; XTENSA-NEXT: and a9, a9, a10 -; XTENSA-NEXT: ssl a9 -; XTENSA-NEXT: sll a9, a2 -; XTENSA-NEXT: or a2, a8, a9 +; XTENSA: ssr a3 +; XTENSA-NEXT: src a2, a2, a2 ; XTENSA-NEXT: ret %a = and i32 %y, 63 %b = lshr i32 %x, %a From 0759bc1340091edf4ce6d5d41dfbe8cfe5e7d129 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 00:08:04 +0300 Subject: [PATCH 048/289] [Xtensa] Add functions needed to use as rust submodule. --- llvm/include/llvm/MC/MCSubtargetInfo.h | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/llvm/include/llvm/MC/MCSubtargetInfo.h b/llvm/include/llvm/MC/MCSubtargetInfo.h index 9891f1d127f1c..1ed4f30c74f8e 100644 --- a/llvm/include/llvm/MC/MCSubtargetInfo.h +++ b/llvm/include/llvm/MC/MCSubtargetInfo.h @@ -243,6 +243,16 @@ class MCSubtargetInfo { /// Return the list of processor features currently enabled. std::vector getEnabledProcessorFeatures() const; + ArrayRef getCPUTable() const { + return ProcDesc; + } + + ArrayRef getFeatureTable() const { + return ProcFeatures; + } + + virtual unsigned getHwMode() const { return 0; } + /// HwMode IDs are stored and accessed in a bit set format, enabling /// users to efficiently retrieve specific IDs, such as the RegInfo /// HwMode ID, from the set as required. Using this approach, various @@ -263,7 +273,7 @@ class MCSubtargetInfo { virtual unsigned getHwModeSet() const { return 0; } /// HwMode ID corresponding to the 'type' parameter is retrieved from the - /// HwMode bit set of the current subtarget. It’s important to note that if + /// HwMode bit set of the current subtarget. It’s important to note that if /// the current subtarget possesses two HwMode IDs and both control a single /// attribute (such as RegInfo), this interface will result in an error. virtual unsigned getHwMode(enum HwModeType type = HwMode_Default) const { From 80c96e21da8c81da78623ecab49029ecfb63cc0b Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 00:10:56 +0300 Subject: [PATCH 049/289] [Xtensa] Correct Call ABI for function return arguments. --- clang/lib/CodeGen/Targets/Xtensa.cpp | 12 +++++++++--- clang/test/CodeGen/xtensa-abi.c | 14 ++++++++++++++ 2 files changed, 23 insertions(+), 3 deletions(-) create mode 100644 clang/test/CodeGen/xtensa-abi.c diff --git a/clang/lib/CodeGen/Targets/Xtensa.cpp b/clang/lib/CodeGen/Targets/Xtensa.cpp index 37be0962e97ca..3d22b18f146b2 100644 --- a/clang/lib/CodeGen/Targets/Xtensa.cpp +++ b/clang/lib/CodeGen/Targets/Xtensa.cpp @@ -119,10 +119,16 @@ ABIArgInfo XtensaABIInfo::classifyArgumentType(QualType Ty, ABIArgInfo XtensaABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); + int ArgGPRsLeft = MaxNumRetGPRs; - // The rules for return and argument types are the same, so defer to - // classifyArgumentType. - return classifyArgumentType(RetTy, ArgGPRsLeft); + auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32; + + // The rules for return and argument with type size more then 4 bytes + // are the same, so defer to classifyArgumentType. + if (RetSize > 1) + return classifyArgumentType(RetTy, ArgGPRsLeft); + + return DefaultABIInfo::classifyReturnType(RetTy); } RValue XtensaABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, diff --git a/clang/test/CodeGen/xtensa-abi.c b/clang/test/CodeGen/xtensa-abi.c new file mode 100644 index 0000000000000..df7a99d77bc0f --- /dev/null +++ b/clang/test/CodeGen/xtensa-abi.c @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s + +#define __malloc_like __attribute__((__malloc__)) +char *bufalloc () __malloc_like ;//__result_use_check; +extern void* malloc (unsigned size); + +char *bufalloc () +{ + char* buf = malloc(1024); + + return buf; +} + +// CHECK: define dso_local noalias ptr @bufalloc() #0 { From a9e083f84ffb92821977ddb43f2c66e84b37f4de Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 00:28:59 +0300 Subject: [PATCH 050/289] [Xtensa] Implement rest part of FP instructions. Add FP instructions test, format FP instruction descriptions. --- .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 6 +- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 10 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 4 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 263 ++++++++++++------ llvm/test/MC/Xtensa/xtensa-valid-float.s | 178 ++++++++++++ 5 files changed, 366 insertions(+), 95 deletions(-) create mode 100644 llvm/test/MC/Xtensa/xtensa-valid-float.s diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 01c7bd4bd3185..88daa562baedc 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -281,8 +281,10 @@ XtensaMCCodeEmitter::getMemRegEncoding(const MCInst &MI, unsigned OpNo, case Xtensa::L32I: case Xtensa::S32I_N: case Xtensa::L32I_N: - case Xtensa::S32F: - case Xtensa::L32F: + case Xtensa::SSI: + case Xtensa::SSIP: + case Xtensa::LSI: + case Xtensa::LSIP: case Xtensa::S32C1I: if (Res & 0x3) { report_fatal_error("Unexpected operand value!"); diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index e80c784c963a1..a5f479c1f730f 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -3111,13 +3111,19 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( case Xtensa::S16I: case Xtensa::S32I: case Xtensa::S32I_N: - case Xtensa::S32F: + case Xtensa::SSI: + case Xtensa::SSIP: + case Xtensa::SSX: + case Xtensa::SSXP: case Xtensa::L8UI: case Xtensa::L16SI: case Xtensa::L16UI: case Xtensa::L32I: case Xtensa::L32I_N: - case Xtensa::L32F: { + case Xtensa::LSI: + case Xtensa::LSIP: + case Xtensa::LSX: + case Xtensa::LSXP: { const MachineMemOperand &MMO = **MI.memoperands_begin(); if (MMO.isVolatile()) { BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index 1245edfa2bb39..b4645c59a50aa 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -168,8 +168,8 @@ void XtensaInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, LoadOpcode = Xtensa::L32I; StoreOpcode = Xtensa::S32I; } else if (RC == &Xtensa::FPRRegClass) { - LoadOpcode = Xtensa::L32F; - StoreOpcode = Xtensa::S32F; + LoadOpcode = Xtensa::LSI; + StoreOpcode = Xtensa::SSI; } else llvm_unreachable("Unsupported regclass to load or store"); } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index c0546bb41b423..0868c27486966 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -909,85 +909,63 @@ def ADD_S : FPArith_RRR<0x00, 0x0A, "add.s", fadd, 1>; def SUB_S : FPArith_RRR<0x01, 0x0A, "sub.s", fsub>; def MUL_S : FPArith_RRR<0x02, 0x0A, "mul.s", fmul, 1>; -def ABS_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), - "abs.s\t$r, $s", - [(set FPR:$r, (fabs FPR:$s))]> { - let t = 0x01; -} - -def NEG_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), - "neg.s\t$r, $s", - [(set FPR:$r, (fneg FPR:$s))]> { - let t = 0x06; -} +// FP load instructions +let mayLoad = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { + def LSI : RRI8_Inst<0x03, (outs FPR:$t), (ins mem32:$addr), + "lsi\t$t, $addr", []> { + bits<12> addr; -def TRUNC_S : RRR_Inst<0x00, 0x0A, 0x09, (outs AR:$r), (ins FPR:$s), - "trunc.s\t$r, $s, 0", - [(set AR:$r, (fp_to_sint FPR:$s))]> { - let t = 0x00; -} + let r = 0x00; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } -def UTRUNC_S : RRR_Inst<0x00, 0x0A, 0x0e, (outs AR:$r), (ins FPR:$s), - "utrunc.s\t$r, $s, 0", - [(set AR:$r, (fp_to_uint FPR:$s))]> { - let t = 0x00; -} + def LSIP : RRI8_Inst<0x03, (outs FPR:$t), (ins mem32:$addr), + "lsip\t$t, $addr", []> { + bits<12> addr; -def FLOAT_S : RRR_Inst<0x00, 0x0A, 0x0c, (outs FPR:$r), (ins AR:$s), - "float.s\t$r, $s, 0", - [(set FPR:$r, (sint_to_fp AR:$s))]> { - let t = 0x00; -} + let r = 0x08; + let imm8{7-0} = addr{11-4}; + let s{3-0} = addr{3-0}; + } -def UFLOAT_S : RRR_Inst<0x00, 0x0A, 0x0D, (outs FPR:$r), (ins AR:$s), - "ufloat.s\t$r, $s, 0", - [(set FPR:$r, (uint_to_fp AR:$s))]> { - let t = 0x00; -} + def LSX : RRR_Inst<0x00, 0x08, 0x00, (outs), (ins FPR:$r, AR:$s, AR:$t), + "lsx\t$r, $s, $t", []>; -def RFR : RRR_Inst<0x00, 0x0A, 0x0f, (outs AR:$r), (ins FPR:$s), - "rfr\t$r, $s", - [(set AR:$r, (bitconvert FPR:$s))]> { - let t = 0x04; + def LSXP : RRR_Inst<0x00, 0x08, 0x01, (outs), (ins FPR:$r, AR:$s, AR:$t), + "lsxp\t$r, $s, $t", []>; } -def WFR : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins AR:$s), - "wfr\t$r, $s", - [(set FPR:$r, (bitconvert AR:$s))]> { - let t = 0x05; -} +def : Pat<(f32 (load addr_ish4:$addr)), (f32 (LSI mem32:$addr))>; -// FP load instructions -let mayLoad = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { - class LoadF_RRI8 oper, string instrAsm, SDPatternOperator opNode, - ComplexPattern addrOp,Operand memOp>: RRI8_Inst<0x03, (outs FPR:$t), (ins memOp:$addr), - instrAsm#"\t$t, $addr", - [(set FPR:$t, (opNode addrOp:$addr))]> { +// FP store instructions +let mayStore = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { + def SSI : RRI8_Inst<0x03, (outs), (ins FPR:$t, mem32:$addr), + "ssi\t$t, $addr", []> { bits<12> addr; - let r = oper; + let r = 0x04; let imm8{7-0} = addr{11-4}; let s{3-0} = addr{3-0}; } -} - -def L32F : LoadF_RRI8<0x00, "lsi", load, addr_ish4, mem32>, Requires<[]>; -// FP store instructions -let mayStore = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { - class StoreF_RRI8 oper, string instrAsm, SDPatternOperator opNode, - ComplexPattern addrOp, Operand memOp>: RRI8_Inst<0x03, (outs), (ins FPR:$t, memOp:$addr), - instrAsm#"\t$t, $addr", - [(opNode FPR:$t, addrOp:$addr)]> { + def SSIP : RRI8_Inst<0x03, (outs), (ins FPR:$t, mem32:$addr), + "ssip\t$t, $addr", []> { bits<12> addr; - let r = oper; + let r = 0x0C; let imm8{7-0} = addr{11-4}; let s{3-0} = addr{3-0}; } + + def SSX: RRR_Inst<0x00, 0x08, 0x04, (outs), (ins FPR:$r, AR:$s, AR:$t), + "ssx\t$r, $s, $t", []>; + + def SSXP: RRR_Inst<0x00, 0x08, 0x05, (outs), (ins FPR:$r, AR:$s, AR:$t), + "ssxp\t$r, $s, $t", []>; } -def S32F : StoreF_RRI8<0x04, "ssi", store, addr_ish4, mem32>; +def : Pat<(store FPR:$t, addr_ish4:$addr), (SSI FPR:$t, mem32:$addr)>; // FP compare instructions let isCompare = 1, Predicates = [HasSingleFloat] in { @@ -1011,31 +989,27 @@ def ULT_S : FCompare<0x05, 0x0b, "ult.s", Xtensa_cmpult, 0>; def ULE_S : FCompare<0x07, 0x0b, "ule.s", Xtensa_cmpule, 0>; def UN_S : FCompare<0x01, 0x0b, "un.s", Xtensa_cmpuo, 1>; -//FP complex operations -def MADD_S : RRR_Inst<0x00, 0x0A, 0x04, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), - "madd.s\t$r, $s, $t", - [(set FPR:$r, (Xtensa_madd FPR:$a, FPR:$s, FPR:$t))]>, - Requires<[HasSingleFloat]> { - let isCommutable = 0; - let isReMaterializable = 0; - let Constraints = "$r = $a"; +def ABS_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "abs.s\t$r, $s", + [(set FPR:$r, (fabs FPR:$s))]> { + let t = 0x01; } -def MSUB_S : RRR_Inst<0x00, 0x0A, 0x05, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), - "msub.s\t$r, $s, $t", - [(set FPR:$r, (Xtensa_msub FPR:$a, FPR:$s, FPR:$t))]>, - Requires<[HasSingleFloat]> { - let isCommutable = 0; - let isReMaterializable = 0; - let Constraints = "$r = $a"; +def ADDEXP_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "addexp.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x0E; } -//FP move operations -def MOV_S : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins FPR:$s), - "mov.s\t$r, $s", - [(set FPR:$r, (Xtensa_movs FPR:$s))]>, Requires<[HasSingleFloat]> -{ - let t = 0x00; +def ADDEXPM_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "addexpm.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x0F; +} + +def CEIL_S : RRR_Inst<0x00, 0x0A, 0x0B, (outs AR:$r), (ins FPR:$s, uimm4:$imm), + "ceil.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { + bits<4> imm; + + let t = imm; } def CONST_S : RRR_Inst<0x00, 0x0a, 0x0f, (outs FPR:$r), (ins uimm4:$imm), @@ -1051,11 +1025,40 @@ def DIV0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), let t = 0x7; } +def DIVN_S : RRR_Inst<0x00, 0x0A, 0x07, (outs FPR:$r), (ins FPR:$s, FPR:$t), + "divn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + +def FLOAT_S : RRR_Inst<0x00, 0x0A, 0x0c, (outs FPR:$r), (ins AR:$s, uimm4:$imm), + "float.s\t$r, $s, $imm", []> { + bits<4> imm; + + let t = imm; +} + +def : Pat<(f32 (sint_to_fp AR:$s)), (FLOAT_S AR:$s, 0)>; + +def FLOOR_S : RRR_Inst<0x00, 0x0A, 0x0A, (outs AR:$r), (ins FPR:$s, uimm4:$imm), + "floor.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { + bits<4> imm; + + let t = imm; +} + def MADDN_S : RRR_Inst<0x00, 0x0A, 0x06, (outs FPR:$r), (ins FPR:$s, FPR:$t), "maddn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]> { let isCommutable = 0; } +// FP multipy-add +def MADD_S : RRR_Inst<0x00, 0x0A, 0x04, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), + "madd.s\t$r, $s, $t", + [(set FPR:$r, (Xtensa_madd FPR:$a, FPR:$s, FPR:$t))]>, + Requires<[HasSingleFloat]> { + let isCommutable = 0; + let isReMaterializable = 0; + let Constraints = "$r = $a"; +} + def MKDADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "mkdadj.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0D; @@ -1066,29 +1069,112 @@ def MKSADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), let t = 0x0C; } -def ADDEXP_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), - "addexp.s\t$r, $s", []>, Requires<[HasSingleFloat]> { - let t = 0x0E; +// FP move instructions +def MOV_S : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins FPR:$s), + "mov.s\t$r, $s", + [(set FPR:$r, (Xtensa_movs FPR:$s))]>, Requires<[HasSingleFloat]> { + let t = 0x00; } -def ADDEXPM_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), - "addexpm.s\t$r, $s", []>, Requires<[HasSingleFloat]> { - let t = 0x0F; -} +def MOVEQZ_S : RRR_Inst<0x00, 0x0B, 0x08, (outs FPR:$r), (ins FPR:$s, AR:$t), + "moveqz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -def DIVN_S : RRR_Inst<0x00, 0x0A, 0x07, (outs FPR:$r), (ins FPR:$s, FPR:$t), - "divn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; +def MOVF_S : RRR_Inst<0x00, 0x0B, 0x0C, (outs FPR:$r), (ins FPR:$s, BR:$t), + "movf.s\t$r, $s, $t", []>, Requires<[HasBoolean, HasSingleFloat]>; + +def MOVGEZ_S : RRR_Inst<0x00, 0x0B, 0x0B, (outs FPR:$r), (ins FPR:$s, AR:$t), + "movgez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + +def MOVLTZ_S : RRR_Inst<0x00, 0x0B, 0x0A, (outs FPR:$r), (ins FPR:$s, AR:$t), + "movltz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + +def MOVNEZ_S : RRR_Inst<0x00, 0x0B, 0x09, (outs FPR:$r), (ins FPR:$s, AR:$t), + "movnez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + +def MOVT_S : RRR_Inst<0x00, 0x0B, 0x0D, (outs FPR:$r), (ins FPR:$s, BR:$t), + "movt.s\t$r, $s, $t", []>, Requires<[HasBoolean, HasSingleFloat]>; + +// FP multipy-sub +def MSUB_S : RRR_Inst<0x00, 0x0A, 0x05, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), + "msub.s\t$r, $s, $t", + [(set FPR:$r, (Xtensa_msub FPR:$a, FPR:$s, FPR:$t))]>, Requires<[HasSingleFloat]> { + let isCommutable = 0; + let isReMaterializable = 0; + let Constraints = "$r = $a"; +} def NEXP01_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "nexp01.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0B; } +def NEG_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "neg.s\t$r, $s", + [(set FPR:$r, (fneg FPR:$s))]> { + let t = 0x06; +} + +def RECIP0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "recip0.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x08; +} + +def RFR : RRR_Inst<0x00, 0x0A, 0x0f, (outs AR:$r), (ins FPR:$s), + "rfr\t$r, $s", + [(set AR:$r, (bitconvert FPR:$s))]> { + let t = 0x04; +} + +def ROUND_S : RRR_Inst<0x00, 0x0A, 0x08, (outs AR:$r), (ins FPR:$s, uimm4:$imm), + "round.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { + bits<4> imm; + + let t = imm; +} + +def RSQRT0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), + "rsqrt0.s\t$r, $s", []>, Requires<[HasSingleFloat]> { + let t = 0x0A; +} + def SQRT0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "sqrt0.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x09; } +def TRUNC_S : RRR_Inst<0x00, 0x0A, 0x09, (outs AR:$r), (ins FPR:$s, uimm4:$imm), + "trunc.s\t$r, $s, $imm", []> { + bits<4> imm; + + let t = imm; +} + +def : Pat<(i32 (fp_to_sint FPR:$s)), (TRUNC_S FPR:$s, 0)>; + +def UFLOAT_S : RRR_Inst<0x00, 0x0A, 0x0D, (outs FPR:$r), (ins AR:$s, uimm4:$imm), + "ufloat.s\t$r, $s, $imm", []> { + bits<4> imm; + + let t = imm; +} + +def : Pat<(f32 (uint_to_fp AR:$s)), (UFLOAT_S AR:$s, 0)>; + +def UTRUNC_S : RRR_Inst<0x00, 0x0A, 0x0e, (outs AR:$r), (ins FPR:$s, uimm4:$imm), + "utrunc.s\t$r, $s, $imm", []> { + bits<4> imm; + + let t = imm; +} + +def : Pat<(i32 (fp_to_uint FPR:$s)), (UTRUNC_S FPR:$s, 0)>; + +def WFR : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins AR:$s), + "wfr\t$r, $s", + [(set FPR:$r, (bitconvert AR:$s))]> { + let t = 0x05; +} + // FP select operations let usesCustomInserter = 1 in { def SELECT_CC_FP_INT : Pseudo<(outs AR:$dst), (ins FPR:$lhs, FPR:$rhs, AR:$t, AR:$f, i32imm:$cond), @@ -1101,7 +1187,6 @@ let usesCustomInserter = 1 in { "!select_cc_fp_fp $dst, $lhs, $rhs, $t, $f, $cond", [(set FPR:$dst, (Xtensa_select_cc_fp FPR:$lhs, FPR:$rhs, FPR:$t, FPR:$f, imm:$cond))]>; } - //===----------------------------------------------------------------------===// // Loop Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/test/MC/Xtensa/xtensa-valid-float.s b/llvm/test/MC/Xtensa/xtensa-valid-float.s new file mode 100644 index 0000000000000..40405e93c5843 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-valid-float.s @@ -0,0 +1,178 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+fp -mattr=+bool -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +.align 4 +LBL0: + +# CHECK-INST: abs.s f2, f3 +# CHECK: encoding: [0x10,0x23,0xfa] + abs.s f2, f3 +# CHECK-INST: add.s f2, f3, f4 +# CHECK: encoding: [0x40,0x23,0x0a] + add.s f2, f3, f4 +# CHECK-INST: addexp.s f2, f3 +# CHECK: encoding: [0xe0,0x23,0xfa] + addexp.s f2, f3 +# CHECK-INST: addexpm.s f2, f3 +# CHECK: encoding: [0xf0,0x23,0xfa] + addexpm.s f2, f3 + +# CHECK-INST: ceil.s a2, f3, 5 +# CHECK: encoding: [0x50,0x23,0xba] + ceil.s a2, f3, 5 +# CHECK-INST: const.s f3, 5 +# CHECK: encoding: [0x30,0x35,0xfa] + const.s f3, 5 + +# CHECK-INST: div0.s f2, f3 +# CHECK: encoding: [0x70,0x23,0xfa] + div0.s f2, f3 +# CHECK-INST: divn.s f2, f3, f4 +# CHECK: encoding: [0x40,0x23,0x7a] + divn.s f2, f3, f4 + +# CHECK-INST: float.s f2, a3, 5 +# CHECK: encoding: [0x50,0x23,0xca] + float.s f2, a3, 5 +# CHECK-INST: floor.s a2, f3, 5 +# CHECK: encoding: [0x50,0x23,0xaa] + floor.s a2, f3, 5 + +# CHECK-INST: lsi f2, a3, 8 +# CHECK: encoding: [0x23,0x03,0x02] + lsi f2, a3, 8 +# CHECK-INST: lsip f2, a3, 8 +# CHECK: encoding: [0x23,0x83,0x02] + lsip f2, a3, 8 +# CHECK-INST: lsx f2, a3, a4 +# CHECK: encoding: [0x40,0x23,0x08] + lsx f2, a3, a4 +# CHECK-INST: lsxp f2, a3, a4 +# CHECK: encoding: [0x40,0x23,0x18] + lsxp f2, a3, a4 + +# CHECK-INST: madd.s f2, f3, f4 +# CHECK: encoding: [0x40,0x23,0x4a] + madd.s f2, f3, f4 +# CHECK-INST: maddn.s f2, f3, f4 +# CHECK: encoding: [0x40,0x23,0x6a] + maddn.s f2, f3, f4 +# CHECK-INST: mkdadj.s f2, f3 +# CHECK: encoding: [0xd0,0x23,0xfa] + mkdadj.s f2, f3 +# CHECK-INST: mksadj.s f2, f3 +# CHECK: encoding: [0xc0,0x23,0xfa] + mksadj.s f2, f3 + +# CHECK-INST: mov.s f2, f3 +# CHECK: encoding: [0x00,0x23,0xfa] + mov.s f2, f3 + +# CHECK-INST: moveqz.s f2, f3, a4 +# CHECK: encoding: [0x40,0x23,0x8b] + moveqz.s f2, f3, a4 +# CHECK-INST: movf.s f2, f3, b0 +# CHECK: encoding: [0x00,0x23,0xcb] + movf.s f2, f3, b0 +# CHECK-INST: movgez.s f2, f3, a4 +# CHECK: encoding: [0x40,0x23,0xbb] + movgez.s f2, f3, a4 +# CHECK-INST: movltz.s f2, f3, a4 +# CHECK: encoding: [0x40,0x23,0xab] + movltz.s f2, f3, a4 +# CHECK-INST: movnez.s f2, f3, a4 +# CHECK: encoding: [0x40,0x23,0x9b] + movnez.s f2, f3, a4 +# CHECK-INST: movt.s f2, f3, b0 +# CHECK: encoding: [0x00,0x23,0xdb] + movt.s f2, f3, b0 + +# CHECK-INST: msub.s f2, f3, f4 +# CHECK: encoding: [0x40,0x23,0x5a] + msub.s f2, f3, f4 +# CHECK-INST: mul.s f2, f3, f4 +# CHECK: encoding: [0x40,0x23,0x2a] + mul.s f2, f3, f4 +# CHECK-INST: neg.s f2, f3 +# CHECK: encoding: [0x60,0x23,0xfa] + neg.s f2, f3 + +# CHECK-INST: nexp01.s f2, f3 +# CHECK: encoding: [0xb0,0x23,0xfa] + nexp01.s f2, f3 + +# CHECK-INST: oeq.s b0, f2, f3 +# CHECK: encoding: [0x30,0x02,0x2b] + oeq.s b0, f2, f3 +# CHECK-INST: ole.s b0, f2, f3 +# CHECK: encoding: [0x30,0x02,0x6b] + ole.s b0, f2, f3 +# CHECK-INST: olt.s b0, f2, f3 +# CHECK: encoding: [0x30,0x02,0x4b] + olt.s b0, f2, f3 + +# CHECK-INST: recip0.s f2, f3 +# CHECK: encoding: [0x80,0x23,0xfa] + recip0.s f2, f3 + +# CHECK-INST: rfr a2, f3 +# CHECK: encoding: [0x40,0x23,0xfa] + rfr a2, f3 + +# CHECK-INST: round.s a2, f3, 5 +# CHECK: encoding: [0x50,0x23,0x8a] + round.s a2, f3, 5 +# CHECK-INST: rsqrt0.s f2, f3 +# CHECK: encoding: [0xa0,0x23,0xfa] + rsqrt0.s f2, f3 +# CHECK-INST: sqrt0.s f2, f3 +# CHECK: encoding: [0x90,0x23,0xfa] + sqrt0.s f2, f3 + +# CHECK-INST: ssi f2, a3, 8 +# CHECK: encoding: [0x23,0x43,0x02] + ssi f2, a3, 8 +# CHECK-INST: ssip f2, a3, 8 +# CHECK: encoding: [0x23,0xc3,0x02] + ssip f2, a3, 8 +# CHECK-INST: ssx f2, a3, a4 +# CHECK: encoding: [0x40,0x23,0x48] + ssx f2, a3, a4 +# CHECK-INST: ssxp f2, a3, a4 +# CHECK: encoding: [0x40,0x23,0x58] + ssxp f2, a3, a4 + +# CHECK-INST: sub.s f2, f3, f4 +# CHECK: encoding: [0x40,0x23,0x1a] + sub.s f2, f3, f4 + +# CHECK-INST: trunc.s a2, f3, 5 +# CHECK: encoding: [0x50,0x23,0x9a] + trunc.s a2, f3, 5 + +# CHECK-INST: ueq.s b0, f2, f3 +# CHECK: encoding: [0x30,0x02,0x3b] + ueq.s b0, f2, f3 + +# CHECK-INST: ufloat.s f2, a3, 5 +# CHECK: encoding: [0x50,0x23,0xda] + ufloat.s f2, a3, 5 + +# CHECK-INST: ule.s b0, f2, f3 +# CHECK: encoding: [0x30,0x02,0x7b] + ule.s b0, f2, f3 +# CHECK-INST: ult.s b0, f2, f3 +# CHECK: encoding: [0x30,0x02,0x5b] + ult.s b0, f2, f3 +# CHECK-INST: un.s b0, f2, f3 +# CHECK: encoding: [0x30,0x02,0x1b] + un.s b0, f2, f3 + +# CHECK-INST: utrunc.s a2, f3, 5 +# CHECK: encoding: [0x50,0x23,0xea] + utrunc.s a2, f3, 5 + +# CHECK-INST: wfr f2, a3 +# CHECK: encoding: [0x50,0x23,0xfa] + wfr f2, a3 + From af3b6b0252faaf7eb85bb48cf7259783d9f4393f Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 00:40:28 +0300 Subject: [PATCH 051/289] [Xtensa] Correct lowering BR_CC with FP operands. Remove register class for boolean operands, because it is only suitable for FP compare operations and may lead to problems in other cases. Disable load width reduction, because for IRAM memory it may cause exceptions. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 116 ++++++------------ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 12 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 12 +- llvm/lib/Target/Xtensa/XtensaOperators.td | 9 +- llvm/test/CodeGen/Xtensa/xtensa-fcmp.ll | 18 +++ llvm/test/CodeGen/Xtensa/xtensa-icmp.ll | 17 +++ 6 files changed, 96 insertions(+), 88 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-fcmp.ll create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-icmp.ll diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index a5f479c1f730f..85d0f526ccc75 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -334,10 +334,6 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, // Compute derived properties from the register classes computeRegisterProperties(STI.getRegisterInfo()); - - if (Subtarget.hasBoolean()) { - addRegisterClass(MVT::i1, &Xtensa::BRRegClass); - } } bool XtensaTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, @@ -1046,73 +1042,6 @@ XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, DL, MVT::Other, RetOps); } -static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, SDLoc dl, - SelectionDAG &DAG, int &br_code) { - // Minor optimization: if LHS is a constant, swap operands, then the - // constant can be folded into comparison. - if (LHS.getOpcode() == ISD::Constant) - std::swap(LHS, RHS); - int cmp_code = 0; - - switch (CC) { - default: - llvm_unreachable("Invalid condition!"); - break; - case ISD::SETUNE: - br_code = XtensaISD::BR_CC_F; - cmp_code = XtensaISD::CMPOEQ; - break; - case ISD::SETUO: - br_code = XtensaISD::BR_CC_T; - cmp_code = XtensaISD::CMPUO; - break; - case ISD::SETO: - br_code = XtensaISD::BR_CC_F; - cmp_code = XtensaISD::CMPUO; - break; - case ISD::SETUEQ: - br_code = XtensaISD::BR_CC_T; - cmp_code = XtensaISD::CMPUEQ; - break; - case ISD::SETULE: - br_code = XtensaISD::BR_CC_T; - cmp_code = XtensaISD::CMPULE; - break; - case ISD::SETULT: - br_code = XtensaISD::BR_CC_T; - cmp_code = XtensaISD::CMPULT; - break; - case ISD::SETEQ: - case ISD::SETOEQ: - br_code = XtensaISD::BR_CC_T; - cmp_code = XtensaISD::CMPOEQ; - break; - case ISD::SETNE: - br_code = XtensaISD::BR_CC_F; - cmp_code = XtensaISD::CMPOEQ; - break; - case ISD::SETLE: - case ISD::SETOLE: - br_code = XtensaISD::BR_CC_T; - cmp_code = XtensaISD::CMPOLE; - break; - case ISD::SETLT: - case ISD::SETOLT: - br_code = XtensaISD::BR_CC_T; - cmp_code = XtensaISD::CMPOLT; - break; - case ISD::SETGE: - br_code = XtensaISD::BR_CC_F; - cmp_code = XtensaISD::CMPOLT; - break; - case ISD::SETGT: - br_code = XtensaISD::BR_CC_F; - cmp_code = XtensaISD::CMPOLE; - break; - } - return DAG.getNode(cmp_code, dl, MVT::i1, LHS, RHS); -} - SDValue XtensaTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Op.getOperand(0); ISD::CondCode CC = cast(Op.getOperand(1))->get(); @@ -1122,9 +1051,9 @@ SDValue XtensaTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); if (LHS.getValueType() == MVT::f32) { - int br_code; - SDValue Flag = EmitCMP(LHS, RHS, CC, DL, DAG, br_code); - return DAG.getNode(br_code, DL, Op.getValueType(), Chain, Flag, Dest); + SDValue TargetCC = DAG.getConstant(CC, DL, MVT::i32); + return DAG.getNode(XtensaISD::BR_CC_FP, DL, Op.getValueType(), Chain, + TargetCC, LHS, RHS, Dest); } else { llvm_unreachable("invalid BR_CC to lower"); } @@ -1788,10 +1717,12 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::SELECT_CC"; case XtensaISD::SELECT_CC_FP: return "XtensaISD::SELECT_CC_FP"; - case XtensaISD::BR_CC_T: - return "XtensaISD::BR_CC_T"; - case XtensaISD::BR_CC_F: - return "XtensaISD::BR_CC_F"; + case XtensaISD::BR_T: + return "XtensaISD::BR_T"; + case XtensaISD::BR_F: + return "XtensaISD::BR_F"; + case XtensaISD::BR_CC_FP: + return "XtensaISD::BR_CC_FP"; case XtensaISD::SRCL: return "XtensaISD::SRCL"; case XtensaISD::SRCR: @@ -1931,11 +1862,12 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI, (MI.getOpcode() == Xtensa::SELECT_CC_FP_INT)) { int BrKind = 0; int CmpKind = 0; - unsigned b = Xtensa::B0; + MachineRegisterInfo &RegInfo = F->getRegInfo(); + const TargetRegisterClass *RC = &Xtensa::BRRegClass; + unsigned b = RegInfo.createVirtualRegister(RC); GetFPBranchKind(Cond, BrKind, CmpKind); - BuildMI(MBB, DL, TII.get(CmpKind), b) - .addReg(LHS.getReg()) + BuildMI(MBB, DL, TII.get(CmpKind), b) .addReg(LHS.getReg()) .addReg(RHS.getReg()); BuildMI(MBB, DL, TII.get(BrKind)).addReg(b, RegState::Kill).addMBB(SinkMBB); } else { @@ -2970,6 +2902,28 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( return MBB; } + case Xtensa::BRCC_FP: { + MachineOperand &Cond = MI.getOperand(0); + MachineOperand &LHS = MI.getOperand(1); + MachineOperand &RHS = MI.getOperand(2); + MachineBasicBlock *TargetBB = MI.getOperand(3).getMBB(); + int BrKind = 0; + int CmpKind = 0; + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo &RegInfo = MF->getRegInfo(); + const TargetRegisterClass *RC = &Xtensa::BRRegClass; + + unsigned RegB = RegInfo.createVirtualRegister(RC); + GetFPBranchKind(Cond.getImm(), BrKind, CmpKind); + BuildMI(*MBB, MI, DL, TII.get(CmpKind), RegB) + .addReg(LHS.getReg()) + .addReg(RHS.getReg()); + BuildMI(*MBB, MI, DL, TII.get(BrKind)).addReg(RegB).addMBB(TargetBB); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::SELECT_CC_FP_FP: case Xtensa::SELECT_CC_FP_INT: case Xtensa::SELECT_CC_INT_FP: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 5720e54822f6e..3044c415a220e 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -23,8 +23,11 @@ namespace llvm { namespace XtensaISD { enum { FIRST_NUMBER = ISD::BUILTIN_OP_END, - BR_CC_T, - BR_CC_F, + BR_T, + BR_F, + + //Conditional branch with FP operands + BR_CC_FP, BR_JT, @@ -166,6 +169,11 @@ class XtensaTargetLowering : public TargetLowering { return true; } + bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, + EVT NewVT) const override { + return false; + } + bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 0868c27486966..89176c2818364 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -888,8 +888,8 @@ def ORBC : RRR_Inst<0x00, 0x02, 0x03, (outs BR:$r), (ins BR:$s, BR:$t), def XORB : RRR_Inst<0x00, 0x02, 0x04, (outs BR:$r), (ins BR:$s, BR:$t), "xorb\t$r, $s, $t", []>, Requires<[HasBoolean]>; -def : Pat<(Xtensa_brcc_t BR:$b, bb:$target), (BT BR:$b, bb:$target)>; -def : Pat<(Xtensa_brcc_f BR:$b, bb:$target), (BF BR:$b, bb:$target)>; +def : Pat<(Xtensa_br_t BR:$b, bb:$target), (BT BR:$b, bb:$target)>; +def : Pat<(Xtensa_br_f BR:$b, bb:$target), (BF BR:$b, bb:$target)>; //===----------------------------------------------------------------------===// // Floating-Point Instructions @@ -1187,6 +1187,14 @@ let usesCustomInserter = 1 in { "!select_cc_fp_fp $dst, $lhs, $rhs, $t, $f, $cond", [(set FPR:$dst, (Xtensa_select_cc_fp FPR:$lhs, FPR:$rhs, FPR:$t, FPR:$f, imm:$cond))]>; } + +// FP brcc pesudo operation +let usesCustomInserter = 1, isBranch = 1, isTerminator = 1, isBarrier = 1 in { + def BRCC_FP : Pseudo<(outs), (ins i32imm:$cond, FPR:$lhs, FPR:$rhs, brtarget:$target), + "!brcc_fp $cond, $lhs, $rhs, $target", + [(Xtensa_brcc_fp imm:$cond, FPR:$lhs, FPR:$rhs, bb:$target)]>; +} + //===----------------------------------------------------------------------===// // Loop Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index 01871a44569e8..4dd1dd27b519d 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -26,7 +26,8 @@ def SDT_XtensaSelectCC : SDTypeProfile<1, 5, SDTCisVT<5, i32>]>; def SDT_XtensaMOVSP : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; -def SDT_XtensaBrCC : SDTypeProfile<0, 2, [SDTCisVT<0, i1>, SDTCisVT<1, OtherVT>]>; +def SDT_XtensaBrBool : SDTypeProfile<0, 2, [SDTCisVT<0, i1>, SDTCisVT<1, OtherVT>]>; +def SDT_XtensaBrCCFP : SDTypeProfile<0, 4, [SDTCisVT<0, i32>, SDTCisVT<1, f32>, SDTCisVT<2, f32>, SDTCisVT<3, OtherVT>]>; def SDT_XtensaCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i1>, SDTCisVT<1, f32>, SDTCisVT<2, f32>]>; def SDT_XtensaMADD : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisVT<0, f32>]>; def SDT_XtensaMOVS : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, f32>]>; @@ -81,9 +82,11 @@ def Xtensa_extui: SDNode<"XtensaISD::EXTUI", SDT_XtensaEXTUI>; def Xtensa_movsp: SDNode<"XtensaISD::MOVSP", SDT_XtensaMOVSP, [SDNPInGlue]>; -def Xtensa_brcc_t : SDNode<"XtensaISD::BR_CC_T", SDT_XtensaBrCC, +def Xtensa_br_t : SDNode<"XtensaISD::BR_T", SDT_XtensaBrBool, [SDNPHasChain, SDNPInGlue]>; -def Xtensa_brcc_f : SDNode<"XtensaISD::BR_CC_F", SDT_XtensaBrCC, +def Xtensa_br_f : SDNode<"XtensaISD::BR_F", SDT_XtensaBrBool, + [SDNPHasChain, SDNPInGlue]>; +def Xtensa_brcc_fp : SDNode<"XtensaISD::BR_CC_FP", SDT_XtensaBrCCFP, [SDNPHasChain, SDNPInGlue]>; def Xtensa_cmpoeq : SDNode<"XtensaISD::CMPOEQ", SDT_XtensaCmp, [SDNPOutGlue]>; diff --git a/llvm/test/CodeGen/Xtensa/xtensa-fcmp.ll b/llvm/test/CodeGen/Xtensa/xtensa-fcmp.ll new file mode 100644 index 0000000000000..ffd4977a03c61 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-fcmp.ll @@ -0,0 +1,18 @@ +; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define void @test_fcmp(i32 %x.coerce) { +; CHECK-LABEL: @test_fcmp +entry: + %0 = bitcast i32 %x.coerce to float + %cmp = fcmp oeq float %0, 0x7FF0000000000000 + br i1 %cmp, label %if.then, label %if.else +; CHECK: oeq.s b0, f9, f8 +; CHECK: bf b0, .LBB0_2 + +if.then: ; preds = %entry + unreachable + +if.else: ; preds = %entry + unreachable +} + diff --git a/llvm/test/CodeGen/Xtensa/xtensa-icmp.ll b/llvm/test/CodeGen/Xtensa/xtensa-icmp.ll new file mode 100644 index 0000000000000..684ff3b2b60b8 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-icmp.ll @@ -0,0 +1,17 @@ +; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define i8 @test_bit(i8 %a) { +; CHECK-LABEL: @test_bit + %b = and i8 %a, 16 + %bool = icmp eq i8 %b, 0 + br i1 %bool, label %true, label %false +; CHECK: movi.n a8, 16 +; CHECK: and a8, a2, a8 +; CHECK: bnez a8, .LBB0_2 + +true: + ret i8 1 + +false: + ret i8 0 +} From e6e31ab5c79261b30c48af54583e77c9270aec72 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 00:51:58 +0300 Subject: [PATCH 052/289] [Xtensa] Use ctors for Xtensa target by default. --- llvm/lib/Target/Xtensa/CMakeLists.txt | 1 + .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 7 +++++- .../Target/Xtensa/XtensaTargetObjectFile.cpp | 23 +++++++++++++++++ .../Target/Xtensa/XtensaTargetObjectFile.h | 25 +++++++++++++++++++ 4 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 llvm/lib/Target/Xtensa/XtensaTargetObjectFile.cpp create mode 100644 llvm/lib/Target/Xtensa/XtensaTargetObjectFile.h diff --git a/llvm/lib/Target/Xtensa/CMakeLists.txt b/llvm/lib/Target/Xtensa/CMakeLists.txt index aeeec1dfbd2f9..3d119e8f86b32 100644 --- a/llvm/lib/Target/Xtensa/CMakeLists.txt +++ b/llvm/lib/Target/Xtensa/CMakeLists.txt @@ -26,6 +26,7 @@ add_llvm_target(XtensaCodeGen XtensaSizeReductionPass.cpp XtensaSubtarget.cpp XtensaTargetMachine.cpp + XtensaTargetObjectFile.cpp XtensaUtils.cpp LINK_COMPONENTS diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index dfe04ee2f4dcc..ebba17d389858 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -14,6 +14,7 @@ #include "XtensaMachineFunctionInfo.h" #include "XtensaTargetMachine.h" +#include "XtensaTargetObjectFile.h" #include "TargetInfo/XtensaTargetInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" @@ -43,6 +44,10 @@ static Reloc::Model getEffectiveRelocModel(bool JIT, return *RM; } +static std::unique_ptr createTLOF() { + return std::make_unique(); +} + XtensaTargetMachine::XtensaTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, @@ -53,7 +58,7 @@ XtensaTargetMachine::XtensaTargetMachine(const Target &T, const Triple &TT, : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, IsLittle), TT, CPU, FS, Options, getEffectiveRelocModel(JIT, RM), getEffectiveCodeModel(CM, CodeModel::Small), OL), - TLOF(std::make_unique()) { + TLOF(createTLOF()) { initAsmInfo(); } diff --git a/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.cpp b/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.cpp new file mode 100644 index 0000000000000..27da879ea860d --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.cpp @@ -0,0 +1,23 @@ +//===-- llvm/Target/XtensaTargetObjectFile.cpp - Xtensa Object Info Impl --===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "XtensaTargetObjectFile.h" +#include "llvm/MC/MCContext.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +//===----------------------------------------------------------------------===// +// ELF Target +//===----------------------------------------------------------------------===// + +void XtensaElfTargetObjectFile::Initialize(MCContext &Ctx, + const TargetMachine &TM) { + TargetLoweringObjectFileELF::Initialize(Ctx, TM); + InitializeELF(false); +} diff --git a/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.h b/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.h new file mode 100644 index 0000000000000..dae8f890459aa --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaTargetObjectFile.h @@ -0,0 +1,25 @@ +//===- llvm/Target/XtensaTargetObjectFile.h - Xtensa Object Info -*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSATARGETOBJECTFILE_H +#define LLVM_LIB_TARGET_XTENSA_XTENSATARGETOBJECTFILE_H + +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" + +namespace llvm { + +class XtensaElfTargetObjectFile : public TargetLoweringObjectFileELF { +public: + XtensaElfTargetObjectFile() : TargetLoweringObjectFileELF() {} + + void Initialize(MCContext &Ctx, const TargetMachine &TM) override; +}; + +} // end namespace llvm + +#endif // LLVM_LIB_TARGET_XTENSA_XTENSATARGETOBJECTFILE_H From cc326bfba569d04ffd1dfffd311404d1f8f70ec5 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 11:49:36 +0300 Subject: [PATCH 053/289] [Xtensa] Implement Hardware Loop optimization pass --- llvm/lib/Target/Xtensa/CMakeLists.txt | 3 + .../Disassembler/XtensaDisassembler.cpp | 10 + .../Xtensa/MCTargetDesc/XtensaAsmBackend.cpp | 8 +- .../Xtensa/MCTargetDesc/XtensaFixupKinds.h | 1 + .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 15 + .../Xtensa/MCTargetDesc/XtensaInstPrinter.h | 1 + .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 21 + llvm/lib/Target/Xtensa/Xtensa.h | 2 + llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp | 2 + llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp | 388 ++++++++++++++++++ .../lib/Target/Xtensa/XtensaHardwareLoops.cpp | 335 +++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 111 ++++- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 3 + llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 56 +++ llvm/lib/Target/Xtensa/XtensaInstrInfo.h | 4 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 41 +- llvm/lib/Target/Xtensa/XtensaOperands.td | 9 +- llvm/lib/Target/Xtensa/XtensaOperators.td | 6 + .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 21 + llvm/lib/Target/Xtensa/XtensaTargetMachine.h | 2 + .../Xtensa/XtensaTargetTransformInfo.cpp | 35 ++ .../Target/Xtensa/XtensaTargetTransformInfo.h | 51 +++ llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll | 55 +++ .../CodeGen/Xtensa/hwloop_unsuitable_loop.ll | 53 +++ 24 files changed, 1203 insertions(+), 30 deletions(-) create mode 100644 llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp create mode 100644 llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp create mode 100644 llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp create mode 100644 llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.h create mode 100644 llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll create mode 100644 llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll diff --git a/llvm/lib/Target/Xtensa/CMakeLists.txt b/llvm/lib/Target/Xtensa/CMakeLists.txt index 3d119e8f86b32..21366425c55ef 100644 --- a/llvm/lib/Target/Xtensa/CMakeLists.txt +++ b/llvm/lib/Target/Xtensa/CMakeLists.txt @@ -17,7 +17,9 @@ add_public_tablegen_target(XtensaCommonTableGen) add_llvm_target(XtensaCodeGen XtensaAsmPrinter.cpp XtensaConstantPoolValue.cpp + XtensaFixupHWLoops.cpp XtensaFrameLowering.cpp + XtensaHardwareLoops.cpp XtensaInstrInfo.cpp XtensaISelDAGToDAG.cpp XtensaISelLowering.cpp @@ -28,6 +30,7 @@ add_llvm_target(XtensaCodeGen XtensaTargetMachine.cpp XtensaTargetObjectFile.cpp XtensaUtils.cpp + XtensaTargetTransformInfo.cpp LINK_COMPONENTS AsmPrinter diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index c10c31d4f0164..2835e682ed199 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -443,6 +443,16 @@ static DecodeStatus decodeBranchOperand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeLoopOperand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder) { + + assert(isUInt<8>(Imm) && "Invalid immediate"); + if (!tryAddingSymbolicOperand(Imm + 4 + Address, true, Address, 0, 3, Inst, + Decoder)) + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + static DecodeStatus decodeL32ROperand(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp index a296a22247a5c..7da92dc4c2af6 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp @@ -67,7 +67,8 @@ XtensaMCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, {"fixup_xtensa_l32r_16", 8, 16, MCFixupKindInfo::FKF_IsPCRel | - MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}}; + MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, + {"fixup_xtensa_loop_8", 16, 8, MCFixupKindInfo::FKF_IsPCRel}}; if (Kind < FirstTargetFixupKind) return MCAsmBackend::getFixupKindInfo(Kind); @@ -117,6 +118,11 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, if (Value & 0x3) Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); return (Value & 0xffffc) >> 2; + case Xtensa::fixup_xtensa_loop_8: + Value -= 4; + if (!isUInt<8>(Value)) + Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + return (Value & 0xff); case Xtensa::fixup_xtensa_l32r_16: unsigned Offset = Fixup.getOffset(); if (Offset & 0x3) diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaFixupKinds.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaFixupKinds.h index 57b114e709a8a..f6b1e58adf073 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaFixupKinds.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaFixupKinds.h @@ -22,6 +22,7 @@ enum FixupKind { fixup_xtensa_jump_18, fixup_xtensa_call_18, fixup_xtensa_l32r_16, + fixup_xtensa_loop_8, fixup_xtensa_invalid, LastTargetFixupKind, NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index 89343d203e9eb..8d5e56b35b51c 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -114,6 +114,21 @@ void XtensaInstPrinter::printBranchTarget(const MCInst *MI, int OpNum, llvm_unreachable("Invalid operand"); } +void XtensaInstPrinter::printLoopTarget(const MCInst *MI, int OpNum, + raw_ostream &OS) { + const MCOperand &MC = MI->getOperand(OpNum); + if (MI->getOperand(OpNum).isImm()) { + int64_t Val = MC.getImm() + 4; + OS << ". "; + if (Val > 0) + OS << '+'; + OS << Val; + } else if (MC.isExpr()) + MC.getExpr()->print(OS, &MAI, true); + else + llvm_unreachable("Invalid operand"); +} + void XtensaInstPrinter::printJumpTarget(const MCInst *MI, int OpNum, raw_ostream &OS) { const MCOperand &MC = MI->getOperand(OpNum); diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index f6858b383cbf1..62b080c635706 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -48,6 +48,7 @@ class XtensaInstPrinter : public MCInstPrinter { void printOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printMemOperand(const MCInst *MI, int OpNUm, raw_ostream &O); void printBranchTarget(const MCInst *MI, int OpNum, raw_ostream &O); + void printLoopTarget(const MCInst *MI, int OpNum, raw_ostream &O); void printJumpTarget(const MCInst *MI, int OpNum, raw_ostream &O); void printCallOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printL32RTarget(const MCInst *MI, int OpNum, raw_ostream &O); diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 88daa562baedc..14f9026a1ea4b 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -67,6 +67,10 @@ class XtensaMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + uint32_t getLoopTargetEncoding(const MCInst &MI, unsigned int OpNum, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getCallEncoding(const MCInst &MI, unsigned int OpNum, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; @@ -219,6 +223,23 @@ uint32_t XtensaMCCodeEmitter::getBranchTargetEncoding( } } +uint32_t +XtensaMCCodeEmitter::getLoopTargetEncoding(const MCInst &MI, unsigned int OpNum, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNum); + if (MO.isImm()) + return static_cast(MO.getImm()); + + assert((MO.isExpr()) && "Unexpected operand value!"); + + const MCExpr *Expr = MO.getExpr(); + + Fixups.push_back(MCFixup::create( + 0, Expr, MCFixupKind(Xtensa::fixup_xtensa_loop_8), MI.getLoc())); + return 0; +} + uint32_t XtensaMCCodeEmitter::getCallEncoding(const MCInst &MI, unsigned int OpNum, SmallVectorImpl &Fixups, diff --git a/llvm/lib/Target/Xtensa/Xtensa.h b/llvm/lib/Target/Xtensa/Xtensa.h index bbf580ffd8904..3322d66eb7610 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.h +++ b/llvm/lib/Target/Xtensa/Xtensa.h @@ -26,5 +26,7 @@ FunctionPass *createXtensaISelDag(XtensaTargetMachine &TM, CodeGenOptLevel OptLevel); FunctionPass *createXtensaSizeReductionPass(); +FunctionPass *createXtensaHardwareLoops(); +FunctionPass *createXtensaFixupHwLoops(); } // namespace llvm #endif // LLVM_LIB_TARGET_XTENSA_XTENSA_H diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp index 58e646f58563f..57bb8dd43317c 100644 --- a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp @@ -52,6 +52,8 @@ void XtensaAsmPrinter::emitInstruction(const MachineInstr *MI) { *OutStreamer, MCInstBuilder(Xtensa::JX).addReg(MI->getOperand(0).getReg())); return; + case Xtensa::LOOPEND: + return; default: MCInst LoweredMI; lowerToMCInst(MI, LoweredMI); diff --git a/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp b/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp new file mode 100644 index 0000000000000..48b0a515ee985 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp @@ -0,0 +1,388 @@ +//===---- XtensaFixupHWLoops.cpp - Fixup HW loops -------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "XtensaTargetMachine.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/Pass.h" +#include "llvm/Support/MathExtras.h" + +using namespace llvm; + +namespace llvm { +FunctionPass *createXtensaFixupHwLoops(); +void initializeXtensaFixupHwLoopsPass(PassRegistry &); +} // namespace llvm + +namespace { +class XtensaFixupHwLoops : public MachineFunctionPass { + // BasicBlockInfo - Information about the offset and size of a single + // basic block. + struct BasicBlockInfo { + // Offset - Distance from the beginning of the function to the beginning + // of this basic block. + // + // The offset is always aligned as required by the basic block. + unsigned Offset = 0; + + // Size - Size of the basic block in bytes. If the block contains + // inline assembly, this is a worst case estimate. + // + // The size does not include any alignment padding whether from the + // beginning of the block, or from an aligned jump table at the end. + unsigned Size = 0; + + BasicBlockInfo() = default; + + // Compute the offset immediately following this block. \p MBB is the next + // block. + unsigned postOffset(const MachineBasicBlock &MBB) const { + const unsigned PO = Offset + Size; + const Align Alignment = MBB.getAlignment(); + if (Alignment == 1) + return PO; + + const Align ParentAlign = MBB.getParent()->getAlignment(); + if (Alignment <= ParentAlign) + return PO + offsetToAlignment(PO, Alignment); + + // The alignment of this MBB is larger than the function's alignment, so + // we can't tell whether or not it will insert nops. Assume that it will. + return PO + Alignment.value() + offsetToAlignment(PO, Alignment); + } + }; + + SmallVector BlockInfo; + SmallPtrSet AnalyzedMBBs; + + MachineFunction *MF; + MachineLoopInfo *MLI; + const TargetRegisterInfo *TRI; + const TargetInstrInfo *TII; + + bool processLoop(MachineLoop *L); + + bool fixupLoopInstrs(MachineLoop *L); + + void scanFunction(); + + uint64_t computeBlockSize(const MachineBasicBlock &MBB) const; + + void adjustBlockOffsets(MachineBasicBlock &Start); + +public: + static char ID; + + XtensaFixupHwLoops() : MachineFunctionPass(ID) { + initializeXtensaFixupHwLoopsPass(*PassRegistry::getPassRegistry()); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + MachineFunctionProperties getRequiredProperties() const override { + return MachineFunctionProperties().set( + MachineFunctionProperties::Property::NoVRegs); + } + + StringRef getPassName() const override { + return "Xtensa Hardware Loop Fixup"; + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + AU.addRequired(); + MachineFunctionPass::getAnalysisUsage(AU); + } +}; + +char XtensaFixupHwLoops::ID = 0; +} // namespace + +INITIALIZE_PASS(XtensaFixupHwLoops, "hwloopsfixup", + "Xtensa Hardware Loops Fixup", false, false) + +FunctionPass *llvm::createXtensaFixupHwLoops() { + return new XtensaFixupHwLoops(); +} + +// Returns true if the instruction is a hardware loop instruction. +static bool isHardwareLoop(const MachineInstr &MI) { + return (MI.getOpcode() == Xtensa::LOOPSTART); +} + +bool XtensaFixupHwLoops::runOnMachineFunction(MachineFunction &mf) { + if (skipFunction(mf.getFunction())) + return false; + + MF = &mf; + MLI = &getAnalysis().getLI(); + const TargetSubtargetInfo &ST = mf.getSubtarget(); + TII = ST.getInstrInfo(); + TRI = ST.getRegisterInfo(); + + // Renumber all of the machine basic blocks in the function, guaranteeing that + // the numbers agree with the position of the block in the function. + mf.RenumberBlocks(); + + // Do the initial scan of the function, building up information about the + // sizes of each block. + scanFunction(); + + AnalyzedMBBs.clear(); + + bool Changed = false; + + for (auto &L : *MLI) + if (!L->getParentLoop()) { + Changed |= processLoop(L); + } + + return Changed; +} + +// Scan loop and find hardware loop pseudo instructions LOOPSTART and LOOPEND. +// Transform LOOPSTART to Xtensa instructions and remove LOOPEND. +bool XtensaFixupHwLoops::fixupLoopInstrs(MachineLoop *L) { + // const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); + MachineBasicBlock &MBB = *(L->getHeader()); + bool Changed = false; + unsigned Num = MBB.getNumber(); + unsigned Offset = BlockInfo[Num].Offset; + MachineBasicBlock *LastBlock = nullptr; + unsigned LHOffset = Offset; + unsigned LastBlockOffset = 0; + + // Loop over all the instructions. + MachineBasicBlock::iterator MII = MBB.begin(); + MachineBasicBlock::iterator MIE = MBB.end(); + MachineInstr *PredI1 = nullptr; + MachineInstr *FirstMI = nullptr; + + for (auto MBI = L->block_begin(), MBIE = L->block_end(); MBI != MBIE; ++MBI) { + if (LastBlockOffset < BlockInfo[(*MBI)->getNumber()].Offset) { + LastBlockOffset = BlockInfo[(*MBI)->getNumber()].Offset; + LastBlock = (*MBI); + } + } + + while (MII != MIE) { + if (MII->isMetaInstruction()) { + ++MII; + continue; + } + + MachineInstr &MI = *MII; + + if (FirstMI == nullptr) + FirstMI = &MI; + + if (isHardwareLoop(*MII)) { + MachineBasicBlock *LoopEnd = nullptr; + + MII->getNextNode(); + + MachineBasicBlock::iterator NextMII = std::next(MII); + + // Check whether loop is empty and remove if true + if (NextMII != MIE) { + if ((*NextMII).getOpcode() == Xtensa::LOOPEND) { + MBB.erase(*NextMII); + MBB.erase(*MII); + MBB.removeSuccessor(&MBB, true); + return true; + } + } + + for (MachineBasicBlock::pred_iterator PI = MBB.pred_begin(), + PE = MBB.pred_end(); + PI != PE; ++PI) { + MachineBasicBlock *PMBB = *PI; + MachineBasicBlock::iterator PIB = PMBB->begin(); + MachineBasicBlock::iterator PII = PMBB->end(); + + do { + --PII; + if (PII->isMetaInstruction()) { + continue; + } + + if ((*PII).getOpcode() == Xtensa::LOOPEND) { + DebugLoc DL = PII->getDebugLoc(); + unsigned OffsetLE = BlockInfo[PMBB->getNumber()].Offset; + + // Check if loop end is placed before loop header + // In such case add special MBB after loop header and create jump + // from loop end to it + if (OffsetLE < LHOffset) { + LoopEnd = MF->CreateMachineBasicBlock(); + MF->insert(++LastBlock->getIterator(), LoopEnd); + LoopEnd->transferSuccessors(PMBB); + LoopEnd->splice(LoopEnd->end(), PMBB, PII, PMBB->end()); + + MachineBasicBlock::iterator LEI = LoopEnd->end(); + --LEI; + + // Expect jump instruction + assert((LEI->getOpcode() == Xtensa::J) && "Broken hardware loop"); + + // Create block and insert it before loop end address as + // target for jump instruction to avoid premature exit from loop + MachineBasicBlock *BlockForJump = MF->CreateMachineBasicBlock(); + MF->insert(LoopEnd->getIterator(), BlockForJump); + BlockForJump->addSuccessor(LoopEnd); + BuildMI(*BlockForJump, BlockForJump->end(), DL, + TII->get(Xtensa::NOP)); + BuildMI(*PMBB, PMBB->end(), DL, TII->get(Xtensa::J)) + .addMBB(BlockForJump); + PMBB->addSuccessor(BlockForJump); + + BuildMI(*LoopEnd, LoopEnd->begin(), DL, TII->get(Xtensa::LOOPEND)) + .addMBB(LoopEnd); + LoopEnd->addSuccessor(LoopEnd); + Changed = true; + break; + } + + if (PII != PIB) { + LoopEnd = MF->CreateMachineBasicBlock(); + MF->insert(++(PMBB->getIterator()), LoopEnd); + LoopEnd->transferSuccessors(PMBB); + LoopEnd->splice(LoopEnd->end(), PMBB, PII, PMBB->end()); + PMBB->addSuccessor(LoopEnd); + + BuildMI(*LoopEnd, LoopEnd->begin(), DL, TII->get(Xtensa::LOOPEND)) + .addMBB(LoopEnd); + LoopEnd->addSuccessor(LoopEnd); + } else { + BuildMI(*PMBB, PII, DL, TII->get(Xtensa::LOOPEND)).addMBB(PMBB); + PMBB->addSuccessor(PMBB); + BuildMI(*PMBB, PII, DL, TII->get(Xtensa::NOP)); + LoopEnd = PMBB; + } + + Changed = true; + break; + } + } while (PII != PIB); + if (Changed) + break; + } + + assert((Changed) && "Broken hardware loop"); + + if (MII != FirstMI) { + MBB.splice(FirstMI->getIterator(), &MBB, MII); + Offset = BlockInfo[Num].Offset; + switch (PredI1->getOpcode()) { + case Xtensa::L32I_N: + if (PredI1->getOperand(0).getReg() == MII->getOperand(0).getReg()) { + MBB.splice(MII, &MBB, PredI1); + Offset += 2; + } + break; + case Xtensa::L32I: + if (PredI1->getOperand(0).getReg() == MII->getOperand(0).getReg()) { + MBB.splice(MII, &MBB, PredI1); + Offset += 3; + } + break; + } + } + + DebugLoc DL = MII->getDebugLoc(); + + // Fixup Loop alignment + switch (Offset & 0x3) { + case 0x0: + BuildMI(MBB, MII, DL, TII->get(Xtensa::NOP)); + BuildMI(MBB, MII, DL, TII->get(Xtensa::NOP)); + break; + case 0x3: + BuildMI(MBB, MII, DL, TII->get(Xtensa::NOP)); + break; + } + + BuildMI(MBB, MII, DL, TII->get(Xtensa::LOOP)) + .addReg(MII->getOperand(0).getReg()) + .addMBB(LoopEnd); + MBB.erase(MII); + + MF->RenumberBlocks(); + scanFunction(); + AnalyzedMBBs.insert(&MBB); + return true; + } else { + Offset += TII->getInstSizeInBytes(MI); + PredI1 = &MI; + ++MII; + } + } + + return Changed; +} + +bool XtensaFixupHwLoops::processLoop(MachineLoop *L) { + bool Changed = false; + + // Process nested loops first. + for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) { + Changed |= processLoop(*I); + } + + if (Changed) + return true; + + return fixupLoopInstrs(L); +} + +// scanFunction - Do the initial scan of the function, building up +// information about each block. +void XtensaFixupHwLoops::scanFunction() { + BlockInfo.clear(); + BlockInfo.resize(MF->getNumBlockIDs()); + + // First thing, compute the size of all basic blocks, and see if the function + // has any inline assembly in it. If so, we have to be conservative about + // alignment assumptions, as we don't know for sure the size of any + // instructions in the inline assembly. + for (MachineBasicBlock &MBB : *MF) + BlockInfo[MBB.getNumber()].Size = computeBlockSize(MBB); + + // Compute block offsets and known bits. + adjustBlockOffsets(*MF->begin()); +} + +// computeBlockSize - Compute the size for MBB. +uint64_t +XtensaFixupHwLoops::computeBlockSize(const MachineBasicBlock &MBB) const { + uint64_t Size = 0; + for (const MachineInstr &MI : MBB) + if (MI.getOpcode() != Xtensa::LOOPEND) + Size += TII->getInstSizeInBytes(MI); + return Size; +} + +void XtensaFixupHwLoops::adjustBlockOffsets(MachineBasicBlock &Start) { + unsigned PrevNum = Start.getNumber(); + for (auto &MBB : make_range(MachineFunction::iterator(Start), MF->end())) { + unsigned Num = MBB.getNumber(); + if (!Num) // block zero is never changed from offset zero. + continue; + // Get the offset and known bits at the end of the layout predecessor. + // Include the alignment of the current block. + BlockInfo[Num].Offset = BlockInfo[PrevNum].postOffset(MBB); + + PrevNum = Num; + } +} + diff --git a/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp b/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp new file mode 100644 index 0000000000000..a12cfcbb3e524 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp @@ -0,0 +1,335 @@ +//===- XtensaHardwareLoops.cpp - Idenify and generate hardware Loops ------===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains implementation of the pass which optimizes loops . +// +//===----------------------------------------------------------------------===// + +#include "XtensaInstrInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/InitializePasses.h" +#include "llvm/Pass.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace llvm; + +#define DEBUG_TYPE "xtensa-hwloops" +#define MAX_LOOP_SIZE 256 + +namespace llvm { + +FunctionPass *createXtensaHardwareLoops(); +void initializeXtensaHardwareLoopsPass(PassRegistry &); + +} // end namespace llvm + +namespace { + +struct XtensaHardwareLoops : public MachineFunctionPass { + MachineLoopInfo *MLI; + MachineRegisterInfo *MRI; + MachineDominatorTree *MDT; + const XtensaInstrInfo *TII; + const XtensaSubtarget *STI; + SmallPtrSet VisitedMBBs; + +public: + static char ID; + + XtensaHardwareLoops() : MachineFunctionPass(ID) {} + + bool runOnMachineFunction(MachineFunction &MF) override; + + StringRef getPassName() const override { return "Xtensa Hardware Loops"; } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + MachineFunctionPass::getAnalysisUsage(AU); + } + +private: + // Return true if the instruction is not valid within a hardware + // loop. + bool isInvalidLoopOperation(const MachineInstr *MI) const; + + // Return true if the loop contains an instruction that inhibits + // using the hardware loop. + bool containsInvalidInstruction(MachineLoop *L) const; + + // Given a loop, check if we can convert it to a hardware loop. + // If so, then perform the conversion and return true. + bool processLoop(MachineLoop *L); + + bool checkLoopSize(MachineLoop *L); + + bool checkLoopEndDisplacement(MachineFunction &MF, MachineBasicBlock *LH, MachineBasicBlock* LE); +}; + +char XtensaHardwareLoops::ID = 0; + +} // end anonymous namespace + +INITIALIZE_PASS(XtensaHardwareLoops, "hwloops", "Xtensa Hardware Loops", false, + false) + +FunctionPass *llvm::createXtensaHardwareLoops() { + return new XtensaHardwareLoops(); +} + +bool XtensaHardwareLoops::runOnMachineFunction(MachineFunction &MF) { + LLVM_DEBUG(dbgs() << "********* Xtensa Hardware Loops *********\n"); + if (skipFunction(MF.getFunction())) + return false; + + bool Changed = false; + + MLI = &getAnalysis().getLI(); + MRI = &MF.getRegInfo(); + STI = &MF.getSubtarget(); + TII = STI->getInstrInfo(); + + if (!STI->hasLoop()) + return false; + + VisitedMBBs.clear(); + + for (auto &L : *MLI) + if (!L->getParentLoop()) { + Changed |= processLoop(L); + } + + return Changed; +} + +// Return true if the operation is invalid within hardware loop. +bool XtensaHardwareLoops::isInvalidLoopOperation(const MachineInstr *MI) const { + + // Call is not allowed because the callee may use a hardware loop + if (MI->getDesc().isCall()) + return true; + + if ((MI->getOpcode() == Xtensa::LOOP) || + (MI->getOpcode() == Xtensa::LOOPGTZ) || + (MI->getOpcode() == Xtensa::LOOPNEZ)) + return true; + + if (MI->isInlineAsm()) + return true; + + return false; +} + +// Return true if the loop contains an instruction that inhibits +// the use of the hardware loop instruction. +bool XtensaHardwareLoops::containsInvalidInstruction(MachineLoop *L) const { + LLVM_DEBUG(dbgs() << "\nhw_loop head, " + << printMBBReference(**L->block_begin())); + for (MachineBasicBlock *MBB : L->getBlocks()) { + for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end(); + MII != E; ++MII) { + const MachineInstr *MI = &*MII; + if (isInvalidLoopOperation(MI)) { + LLVM_DEBUG(dbgs() << "\nCannot convert to hw_loop due to:"; + MI->dump();); + return true; + } + } + } + return false; +} + +// Check if this loop is suitable for converting to a hardware loop +bool XtensaHardwareLoops::processLoop(MachineLoop *L) { + // This is just for sanity. + assert(L->getHeader() && "Loop without a header?"); + + bool Changed = false; + + // Process nested loops first. + for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) { + Changed |= processLoop(*I); + } + + if (Changed) + return true; + + using instr_iterator = MachineBasicBlock::instr_iterator; + MachineInstr *LII = nullptr; // LOOPINIT instruction + MachineInstr *LEI = nullptr; // LOOPEND instruction + MachineBasicBlock *LEMBB = nullptr; + MachineBasicBlock *PH = L->getLoopPreheader(); + MachineBasicBlock *LastMBB = L->getLoopLatch(); + + // Try to find LOOPEND instruction in the loop latch + for (auto MBI = L->block_begin(), MBIE = L->block_end(); MBI != MBIE; ++MBI) { + if (VisitedMBBs.count(*MBI)) + continue; + for (auto MII = (*MBI)->begin(), MIE = (*MBI)->end(); MII != MIE; ++MII) { + MachineInstr *LMI = &*MII; + if (LMI->getOpcode() == Xtensa::LOOPEND) { + LEI = LMI; + LEMBB = *MBI; + } + } + VisitedMBBs.insert(*MBI); + } + + if (LEI != nullptr) { + MachineBasicBlock *LH = L->getHeader(); + MachineBasicBlock::iterator LHI = LH->getFirstNonPHI(); + + if (!PH) { + llvm_unreachable("Hardware loop predecessor not found"); + return false; + } + + MachineBasicBlock *LIMBB = PH; + + // Try to find LOOPINIT instruction in predecessors chain + while ((LII == nullptr) && (LIMBB != nullptr) && + ((L->getParentLoop() == nullptr) || + (L->getParentLoop()->contains(LIMBB)))) { + for (instr_iterator I = LIMBB->instr_begin(), E = LIMBB->instr_end(); + I != E; ++I) { + MachineInstr *MI = &*I; + if (MI->getOpcode() == Xtensa::LOOPINIT) { + LII = MI; + break; + } + } + if (LII == nullptr) + LIMBB = *LIMBB->pred_begin(); + } + + if (LII == nullptr) { + llvm_unreachable("Hardware loop init instruction not found"); + return false; + } + + DebugLoc DL = LII->getDebugLoc(); + + // If loop is too large or have wrong configuration + // then restore branch instruction + // sub a, a, 1 + // bnez a, LH + if (!checkLoopSize(L) || containsInvalidInstruction(L) || + (LEMBB != LastMBB) || (!checkLoopEndDisplacement(*LH->getParent(), LH, LEMBB))) { + const MCInstrDesc &PD = TII->get(TargetOpcode::PHI); + MachineInstr *NewPN = LH->getParent()->CreateMachineInstr(PD, DL); + LH->insert(LH->begin(), NewPN); + Register PR = MRI->createVirtualRegister(&Xtensa::ARRegClass); + NewPN->addOperand(MachineOperand::CreateReg(PR, true)); + + MachineOperand MO = + MachineOperand::CreateReg(LII->getOperand(0).getReg(), false); + NewPN->addOperand(MO); + NewPN->addOperand(MachineOperand::CreateMBB(PH)); + + Register IndR = MRI->createVirtualRegister(&Xtensa::ARRegClass); + MO = MachineOperand::CreateReg(IndR, false); + NewPN->addOperand(MO); + NewPN->addOperand(MachineOperand::CreateMBB(LastMBB)); + + MachineInstrBuilder MIB = + BuildMI(*LEMBB, LEI, LEI->getDebugLoc(), TII->get(Xtensa::ADDI), IndR) + .addReg(PR) + .addImm(-1); + + MIB = BuildMI(*LEMBB, LEI, LEI->getDebugLoc(), TII->get(Xtensa::BNEZ)) + .addReg(IndR) + .addMBB(LEI->getOperand(0).getMBB()); + LEMBB->erase(LEI); + PH->erase(LII); + return false; + } + + //Place LOOPSTART instruction in loop header + BuildMI(*LH, LHI, DL, TII->get(Xtensa::LOOPSTART)) + .addReg(LII->getOperand(0).getReg()) + .addMBB(LastMBB); + PH->erase(LII); + return true; + } + + return false; +} + +bool XtensaHardwareLoops::checkLoopSize(MachineLoop *L) { + uint64_t LoopSize = 0; + + for (auto *MBB : L->getBlocks()) { + uint64_t BlockSize = 0; + for (const MachineInstr &MI : *MBB) { + uint64_t InstSize = TII->getInstSizeInBytes(MI); + if (MI.isPHI()) + InstSize = 3; + BlockSize += InstSize; + } + LoopSize += BlockSize; + } + + if (LoopSize > MAX_LOOP_SIZE) + return false; + + return true; +} + +bool XtensaHardwareLoops::checkLoopEndDisplacement(MachineFunction &MF, + MachineBasicBlock *LH, + MachineBasicBlock *LE) { + bool isLHVisited = false; + + if (LH == LE) + return true; + + for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { + MachineBasicBlock *MBB = &*I; + if (MBB == LH) + isLHVisited = true; + else if (MBB == LE) { + if (isLHVisited) + return true; + else + return false; + } + } + llvm_unreachable("Wrong hardware loop"); +} + diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 85d0f526ccc75..fdb84fc7cab4a 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -512,29 +512,106 @@ static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } +static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, + bool &Negate) { + switch (N->getOpcode()) { + default: + break; + case ISD::XOR: { + if (!isa(N.getOperand(1))) + return SDValue(); + if (!cast(N.getOperand(1))->isOne()) + return SDValue(); + Negate = !Negate; + return SearchLoopIntrinsic(N.getOperand(0), CC, Imm, Negate); + } + case ISD::SETCC: { + auto *Const = dyn_cast(N.getOperand(1)); + if (!Const) + return SDValue(); + if (Const->isZero()) + Imm = 0; + else if (Const->isOne()) + Imm = 1; + else + return SDValue(); + CC = cast(N.getOperand(2))->get(); + return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate); + } + case ISD::INTRINSIC_W_CHAIN: { + unsigned IntOp = cast(N.getOperand(1))->getZExtValue(); + if (IntOp != Intrinsic::loop_decrement) + return SDValue(); + return N; + } + } + return SDValue(); +} + static SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const XtensaSubtarget &Subtarget) { - if (DCI.isBeforeLegalizeOps()) { - SDValue Chain = N->getOperand(0); + SDValue Chain = N->getOperand(0); + SDLoc DL(N); + SDValue Cond = N->getOperand(1); + SDValue Dest = N->getOperand(2); + ISD::CondCode CC = ISD::SETEQ; + int Imm = 1; + bool Negate = false; + + SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate); + if (Int) { + assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) && + "expected single br user"); + SDNode *Br = *N->use_begin(); + SDValue OtherTarget = Br->getOperand(1); + + if (Negate) + CC = ISD::getSetCCInverse(CC, /* Integer inverse */ MVT::i32); + + auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) { + return (CC == ISD::SETEQ && Imm == 0) || (CC == ISD::SETNE && Imm == 1) || + (CC == ISD::SETLT && Imm == 1) || (CC == ISD::SETULT && Imm == 1); + }; + + auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) { + return (CC == ISD::SETEQ && Imm == 1) || (CC == ISD::SETNE && Imm == 0) || + (CC == ISD::SETGT && Imm == 0) || + (CC == ISD::SETUGT && Imm == 0) || + (CC == ISD::SETGE && Imm == 1) || (CC == ISD::SETUGE && Imm == 1); + }; + + if (IsTrueIfZero(CC, Imm)) { + SDValue NewBrOps[] = {Br->getOperand(0), Dest}; + SDValue NewBr = DAG.getNode(ISD::BR, SDLoc(Br), MVT::Other, NewBrOps); + DAG.ReplaceAllUsesOfValueWith(SDValue(Br, 0), NewBr); + Dest = OtherTarget; + } else if (!IsFalseIfZero(CC, Imm)) { + llvm_unreachable("unsupported condition"); + } - if (N->getOperand(1).getOpcode() != ISD::SETCC) - return SDValue(); + // We now need to make the intrinsic dead (it cannot be instruction + // selected). + DAG.ReplaceAllUsesOfValueWith(Int.getValue(1), Int.getOperand(0)); + assert(Int.getNode()->hasOneUse() && + "Counter decrement has more than one use"); - SDLoc DL(N); - SDValue SetCC = N->getOperand(1); - SDValue Dest = N->getOperand(2); - ISD::CondCode CC = cast(SetCC->getOperand(2))->get(); - SDValue LHS = SetCC->getOperand(0); - SDValue RHS = SetCC->getOperand(1); + return DAG.getNode(XtensaISD::LOOPEND, DL, MVT::Other, N->getOperand(0), + Dest); + } - if (LHS.getValueType() != MVT::i32) - return SDValue(); + if (Cond.getOpcode() != ISD::SETCC) + return SDValue(); - return DAG.getNode(ISD::BR_CC, DL, MVT::isVoid, Chain, DAG.getCondCode(CC), - LHS, RHS, Dest); - } - return SDValue(); + CC = cast(Cond->getOperand(2))->get(); + SDValue LHS = Cond->getOperand(0); + SDValue RHS = Cond->getOperand(1); + + if (LHS.getValueType() != MVT::i32) + return SDValue(); + + return DAG.getNode(ISD::BR_CC, DL, MVT::isVoid, Chain, DAG.getCondCode(CC), + LHS, RHS, Dest); } SDValue XtensaTargetLowering::PerformDAGCombine(SDNode *N, @@ -1741,6 +1818,8 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::CMPOLE"; case XtensaISD::CMPOLT: return "XtensaISD::CMPOLT"; + case XtensaISD::LOOPEND: + return "XtensaISD::LOOPEND"; case XtensaISD::MADD: return "XtensaISD::MADD"; case XtensaISD::MSUB: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 3044c415a220e..b4e4bb17063d2 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -52,6 +52,9 @@ enum { CMPOEQ, CMPOLE, CMPOLT, + + LOOPEND, + // FP multipy-add/sub MADD, MSUB, diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index b4645c59a50aa..d72fb3bf7fcbf 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -282,6 +282,10 @@ bool XtensaInstrInfo::reverseBranchCondition( case Xtensa::BT: Cond[0].setImm(Xtensa::BF); return false; + + case Xtensa::LOOPEND: + return true; + default: llvm_unreachable("Invalid branch condition!"); } @@ -295,6 +299,7 @@ XtensaInstrInfo::getBranchDestBlock(const MachineInstr &MI) const { case Xtensa::JX: return nullptr; case Xtensa::J: + case Xtensa::LOOPEND: return MI.getOperand(0).getMBB(); case Xtensa::BEQ: case Xtensa::BNE: @@ -335,6 +340,11 @@ bool XtensaInstrInfo::isBranchOffsetInRange(unsigned BranchOp, return isIntN(18, BrOffset); case Xtensa::JX: return true; + case Xtensa::LOOPEND: + BrOffset += 4; + BrOffset += 3 * 3; // 2*NOP + LOOP instrucions + assert((BrOffset <= 0) && "Wrong hardware loop"); + return true; case Xtensa::BR_JT: return true; case Xtensa::BEQ: @@ -653,6 +663,9 @@ unsigned XtensaInstrInfo::InsertBranchAtInst(MachineBasicBlock &MBB, case Xtensa::BF: MI = BuildMI(MBB, I, DL, get(BR_C)).addReg(Cond[1].getReg()).addMBB(TBB); break; + case Xtensa::LOOPEND: + MI = BuildMI(MBB, I, DL, get(BR_C)).addMBB(TBB); + break; default: llvm_unreachable("Invalid branch type!"); } @@ -662,6 +675,48 @@ unsigned XtensaInstrInfo::InsertBranchAtInst(MachineBasicBlock &MBB, return Count; } +bool XtensaInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, + Register &SrcReg2, int64_t &Mask, + int64_t &Value) const { + unsigned Opc = MI.getOpcode(); + + switch (Opc) { + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + SrcReg = MI.getOperand(0).getReg(); + SrcReg2 = MI.getOperand(1).getReg(); + Value = 0; + Mask = 0; + return true; + + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + SrcReg = MI.getOperand(0).getReg(); + Value = MI.getOperand(1).getImm(); + Mask = ~0; + return true; + + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + SrcReg = MI.getOperand(0).getReg(); + Value = 0; + Mask = ~0; + return true; + } + + return false; +} + bool XtensaInstrInfo::isBranch(const MachineBasicBlock::iterator &MI, SmallVectorImpl &Cond, const MachineOperand *&Target) const { @@ -670,6 +725,7 @@ bool XtensaInstrInfo::isBranch(const MachineBasicBlock::iterator &MI, case Xtensa::J: case Xtensa::JX: case Xtensa::BR_JT: + case Xtensa::LOOPEND: Cond[0].setImm(OpCode); Target = &MI->getOperand(0); return true; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h index b03a030a6b4f8..0bfe35a3fb741 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h @@ -105,6 +105,10 @@ class XtensaInstrInfo : public XtensaGenInstrInfo { int64_t offset, ArrayRef Cond, DebugLoc DL, int *BytesAdded) const; + bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, + Register &SrcReg2, int64_t &CmpMask, + int64_t &CmpValue) const override; + // Return true if MI is a conditional or unconditional branch. // When returning true, set Cond to the mask of condition-code // values on which the instruction will branch, and set Target diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 89176c2818364..d5479cd1e875d 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1199,31 +1199,48 @@ let usesCustomInserter = 1, isBranch = 1, isTerminator = 1, isBarrier = 1 in { // Loop Instructions //===----------------------------------------------------------------------===// -def LOOP : RRI8_Inst<0x06, (outs), (ins AR:$s, mem8:$uimm8), - "loop\t$$s, $uimm8", []>, Requires<[HasLoop]> { - bits<8> uimm8; +def LOOP : RRI8_Inst<0x06, (outs), (ins AR:$s, ltarget:$target), + "loop\t$s, $target", []>, Requires<[HasLoop]> { + bits<8> target; let r = 0x08; let t = 0x07; - let imm8 = uimm8; + let imm8 = target; } -def LOOPGTZ : RRI8_Inst<0x06, (outs), (ins AR:$s, mem8:$uimm8), - "loopgtz\t$$s, $uimm8", []>, Requires<[HasLoop]> { - bits<8> uimm8; +def LOOPGTZ : RRI8_Inst<0x06, (outs), (ins AR:$s, ltarget:$target), + "loopgtz\t$s, $target", []>, Requires<[HasLoop]> { + bits<8> target; let r = 0x0A; let t = 0x07; - let imm8 = uimm8; + let imm8 = target; } -def LOOPNEZ : RRI8_Inst<0x06, (outs), (ins AR:$s, mem8:$uimm8), - "loopnez\t$$s, $uimm8", []>, Requires<[HasLoop]> { - bits<8> uimm8; +def LOOPNEZ : RRI8_Inst<0x06, (outs), (ins AR:$s, ltarget:$target), + "loopnez\t$s, $target", []>, Requires<[HasLoop]> { + bits<8> target; let r = 0x09; let t = 0x07; - let imm8 = uimm8; + let imm8 = target; +} + +let isTerminator = 1, isBarrier = 1, hasSideEffects = 1, Size = 3 in { + def LOOPINIT : Pseudo<(outs), (ins AR:$elts), + "!loopinit $elts", [(int_set_loop_iterations AR:$elts)]>; +} + +// LOOPSTART pseudo instruction reserves 9 bytes for LOOP operation and NOP operations for possible alignment. +let isTerminator = 1, isBarrier = 1, hasSideEffects = 1, Size = 9 in { + def LOOPSTART : Pseudo<(outs), (ins AR:$s, brtarget:$target), + "!loopstart $s, $target", []>; +} + +// LOOPEND pseudo instruction reserves 6 bytes for Jump and NOP operations. +let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 0, Size = 6 in { + def LOOPEND : Pseudo<(outs), (ins brtarget:$target), + "!loopend $target", [(Xtensa_loopend bb:$target)]>; } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index 31ca787441f8e..9957bf7bd1f47 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -238,7 +238,14 @@ def jumptarget : Operand { let ParserMatchClass = XtensaPCRelTargetAsmOperand; } -def L32Rtarget : Operand { +def ltarget : Operand { + let PrintMethod = "printLoopTarget"; + let EncoderMethod = "getLoopTargetEncoding"; + let DecoderMethod = "decodeLoopOperand"; + let ParserMatchClass = XtensaPCRelTargetAsmOperand; +} + +def L32Rtarget: Operand { let PrintMethod = "printL32RTarget"; let EncoderMethod = "getL32RTargetEncoding"; let DecoderMethod = "decodeL32ROperand"; diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index 4dd1dd27b519d..8cf072d959b2c 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -42,6 +42,8 @@ def SDT_XtensaSRC : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCi def SDT_XtensaEXTUI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; +def SDT_XtensaLoopEnd : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>; + //===----------------------------------------------------------------------===// // Node definitions //===----------------------------------------------------------------------===// @@ -107,3 +109,7 @@ def Xtensa_mem_barrier: SDNode<"XtensaISD::MEMW", SDT_XtensaMEMBARRIER, def Xtensa_rur: SDNode<"XtensaISD::RUR", SDT_XtensaRUR, [SDNPInGlue]>; + +def Xtensa_loopend: SDNode<"XtensaISD::LOOPEND", SDT_XtensaLoopEnd, + [SDNPHasChain, SDNPInGlue]>; + diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index ebba17d389858..7b2f967c5996c 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -15,6 +15,7 @@ #include "XtensaMachineFunctionInfo.h" #include "XtensaTargetMachine.h" #include "XtensaTargetObjectFile.h" +#include "XtensaTargetTransformInfo.h" #include "TargetInfo/XtensaTargetInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" @@ -89,6 +90,11 @@ XtensaTargetMachine::getSubtargetImpl(const Function &F) const { return I.get(); } +TargetTransformInfo +XtensaTargetMachine::getTargetTransformInfo(const Function &F) const { + return TargetTransformInfo(XtensaTTIImpl(this, F)); +} + MachineFunctionInfo *XtensaTargetMachine::createMachineFunctionInfo( BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const { @@ -107,11 +113,21 @@ class XtensaPassConfig : public TargetPassConfig { } void addIRPasses() override; + bool addPreISel() override; bool addInstSelector() override; + void addPreRegAlloc() override; void addPreEmitPass() override; }; } // end anonymous namespace +bool XtensaPassConfig::addPreISel() { + if (TM->getOptLevel() != CodeGenOptLevel::None) { + addPass(createHardwareLoopsLegacyPass()); + } + + return false; +} + bool XtensaPassConfig::addInstSelector() { addPass(createXtensaISelDag(getXtensaTargetMachine(), getOptLevel())); return false; @@ -119,8 +135,13 @@ bool XtensaPassConfig::addInstSelector() { void XtensaPassConfig::addIRPasses() { addPass(createAtomicExpandLegacyPass()); } +void XtensaPassConfig::addPreRegAlloc() { + addPass(createXtensaHardwareLoops()); +} + void XtensaPassConfig::addPreEmitPass() { addPass(createXtensaSizeReductionPass()); + addPass(createXtensaFixupHwLoops()); addPass(&BranchRelaxationPassID); } diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.h b/llvm/lib/Target/Xtensa/XtensaTargetMachine.h index 44df32fc915d9..11ae219fd0176 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.h +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.h @@ -37,6 +37,8 @@ class XtensaTargetMachine : public LLVMTargetMachine { std::optional CM, CodeGenOptLevel OL, bool JIT); + TargetTransformInfo getTargetTransformInfo(const Function &F) const override; + const XtensaSubtarget *getSubtargetImpl(const Function &F) const override; TargetPassConfig *createPassConfig(PassManagerBase &PM) override; diff --git a/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp b/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp new file mode 100644 index 0000000000000..7bdec70504772 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp @@ -0,0 +1,35 @@ +//===- XtensaTargetTransformInfo.cpp - Xtensa specific TTI ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "XtensaTargetTransformInfo.h" + +using namespace llvm; + +#define DEBUG_TYPE "xtensatti" + +static cl::opt DisableLowOverheadLoops( + "disable-xtensa-hwloops", cl::Hidden, cl::init(false), + cl::desc("Disable the generation of hardware loops")); + +bool XtensaTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, + AssumptionCache &AC, + TargetLibraryInfo *LibInfo, + HardwareLoopInfo &HWLoopInfo) { + if (DisableLowOverheadLoops) + return false; + + if (!ST->hasLoop()) + return false; + + LLVMContext &C = L->getHeader()->getContext(); + HWLoopInfo.CounterInReg = false; + HWLoopInfo.IsNestingLegal = false; + HWLoopInfo.CountType = Type::getInt32Ty(C); + HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); + return true; +} diff --git a/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.h b/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.h new file mode 100644 index 0000000000000..81bfbacc0381e --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.h @@ -0,0 +1,51 @@ +//===- XtensaTargetTransformInfo.h - Xtensa specific TTI --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file +/// This file defines a TargetTransformInfo::Concept conforming object specific +/// to the Xtensa target machine. It uses the target's detailed information to +/// provide more precise answers to certain TTI queries, while letting the +/// target independent and default TTI implementations handle the rest. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_XTENSA_XTENSATARGETTRANSFORMINFO_H +#define LLVM_LIB_TARGET_XTENSA_XTENSATARGETTRANSFORMINFO_H + +#include "XtensaSubtarget.h" +#include "XtensaTargetMachine.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/CodeGen/BasicTTIImpl.h" +#include "llvm/IR/Function.h" + +namespace llvm { + +class XtensaTTIImpl : public BasicTTIImplBase { + using BaseT = BasicTTIImplBase; + using TTI = TargetTransformInfo; + + friend BaseT; + + const XtensaSubtarget *ST; + const XtensaTargetLowering *TLI; + + const XtensaSubtarget *getST() const { return ST; } + const XtensaTargetLowering *getTLI() const { return TLI; } + +public: + explicit XtensaTTIImpl(const XtensaTargetMachine *TM, const Function &F) + : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), + TLI(ST->getTargetLowering()) {} + + bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, + AssumptionCache &AC, TargetLibraryInfo *LibInfo, + HardwareLoopInfo &HWLoopInfo); +}; + +} // end namespace llvm + +#endif // LLVM_LIB_TARGET_XTENSA_XTENSATARGETTRANSFORMINFO_H diff --git a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll new file mode 100644 index 0000000000000..e651b8a37e894 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll @@ -0,0 +1,55 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + + +; Function Attrs: norecurse nounwind optsize readnone +define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #0 { +; CHECK-LABEL: test_hwloop: +; CHECK: entry a1, 32 +; CHECK-NEXT: mov.n a8, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: blti a4, 1, .LBB0_7 +; CHECK-NEXT: # %bb.1: # %for.body.preheader +; CHECK-NEXT: movi.n a8, 0 +; CHECK-NEXT: j .LBB0_3 +; CHECK-NEXT: .LBB0_2: # %for.body +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: add.n a2, a9, a2 +; CHECK-NEXT: j .LBB0_5 +; CHECK-NEXT: .LBB0_3: # %for.body +; CHECK-NEXT: # =>This Loop Header: Depth=1 +; CHECK-NEXT: # Child Loop BB0_6 Depth 2 +; CHECK-NEXT: loop a4, .LBB0_6 +; CHECK-NEXT: mov.n a9, a8 +; CHECK-NEXT: bge a8, a2, .LBB0_2 +; CHECK-NEXT: # %bb.4: # %for.body +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: mull a9, a2, a3 +; CHECK-NEXT: j .LBB0_2 +; CHECK-NEXT: .LBB0_5: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: nop +; CHECK-NEXT: .LBB0_6: # Parent Loop BB0_3 Depth=1 +; CHECK-NEXT: # => This Inner Loop Header: Depth=2 +; CHECK-NEXT: j .LBB0_7 +; CHECK-NEXT: .LBB0_7: # %for.cond.cleanup +; CHECK-NEXT: retw.n +entry: + %cmp7 = icmp sgt i32 %n, 0 + br i1 %cmp7, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.body, %entry + %a.addr.0.lcssa = phi i32 [ %a, %entry ], [ %a.addr.1, %for.body ] + ret i32 %a.addr.0.lcssa + +for.body: ; preds = %entry, %for.body + %i.09 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %a.addr.08 = phi i32 [ %a.addr.1, %for.body ], [ %a, %entry ] + %cmp1 = icmp sgt i32 %a.addr.08, 0 + %mul = mul nsw i32 %a.addr.08, %b + %add = select i1 %cmp1, i32 %mul, i32 0 + %a.addr.1 = add nsw i32 %add, %a.addr.08 + %inc = add nuw nsw i32 %i.09, 1 + %cmp = icmp slt i32 %inc, %n + br i1 %cmp, label %for.body, label %for.cond.cleanup +} + diff --git a/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll new file mode 100644 index 0000000000000..f0116b1828c1a --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll @@ -0,0 +1,53 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +; Function Attrs: nounwind optsize +define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #1 { +; CHECK-LABEL: test_hwloop: +; CHECK: entry a1, 32 +; CHECK-NEXT: mov.n a8, a1 +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: blti a4, 1, .LBB0_5 +; CHECK-NEXT: # %bb.1: # %for.body.preheader +; CHECK-NEXT: movi.n a8, 0 +; CHECK-NEXT: j .LBB0_3 +; CHECK-NEXT: .LBB0_2: # %for.body +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: add.n a2, a9, a2 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: addi.n a4, a4, -1 +; CHECK-NEXT: beqz a4, .LBB0_5 +; CHECK-NEXT: .LBB0_3: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: mov.n a9, a8 +; CHECK-NEXT: bge a8, a2, .LBB0_2 +; CHECK-NEXT: # %bb.4: # %for.body +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: mull a9, a2, a3 +; CHECK-NEXT: j .LBB0_2 +; CHECK-NEXT: .LBB0_5: # %for.cond.cleanup +; CHECK-NEXT: retw.n +entry: + %cmp7 = icmp sgt i32 %n, 0 + br i1 %cmp7, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.body, %entry + %a.addr.0.lcssa = phi i32 [ %a, %entry ], [ %a.addr.1, %for.body ] + ret i32 %a.addr.0.lcssa + +for.body: ; preds = %entry, %for.body + %i.09 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %a.addr.08 = phi i32 [ %a.addr.1, %for.body ], [ %a, %entry ] + tail call void asm sideeffect "", ""() #2, !srcloc !2 + %cmp1 = icmp sgt i32 %a.addr.08, 0 + %mul = mul nsw i32 %a.addr.08, %b + %add = select i1 %cmp1, i32 %mul, i32 0 + %a.addr.1 = add nsw i32 %add, %a.addr.08 + %inc = add nuw nsw i32 %i.09, 1 + %cmp = icmp slt i32 %inc, %n + br i1 %cmp, label %for.body, label %for.cond.cleanup + +} + +!2 = !{i32 216} From ff383755f6a85658da7de3dfd1e1afb15ae7019f Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 12:10:19 +0300 Subject: [PATCH 054/289] [Xtensa] Change using of Frame Pointer. Do not use Frame Pointer by default. Also improve storing function argument from a7 register to a8 register. Corrected funnel shift test. --- clang/lib/Driver/ToolChains/CommonArgs.cpp | 1 + llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp | 9 ++++++--- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 2 ++ llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h | 4 ++++ llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll | 3 ++- llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll | 1 - 6 files changed, 15 insertions(+), 5 deletions(-) diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 3de3b30995d8a..4b2badddf8b47 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -104,6 +104,7 @@ static bool useFramePointerForTargetByDefault(const llvm::opt::ArgList &Args, case llvm::Triple::loongarch32: case llvm::Triple::loongarch64: case llvm::Triple::m68k: + case llvm::Triple::xtensa: return !clang::driver::tools::areOptimizationsEnabled(Args); default: break; diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp index ed19719989c0d..de0a50a4c1d35 100644 --- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp @@ -49,6 +49,7 @@ void XtensaFrameLowering::emitPrologue(MachineFunction &MF, MCRegister SP = Xtensa::SP; MCRegister FP = TRI->getFrameRegister(MF); const MCRegisterInfo *MRI = MF.getContext().getRegisterInfo(); + XtensaFunctionInfo *XtensaFI = MF.getInfo(); // First, compute final stack size. uint64_t StackSize = MFI.getStackSize(); @@ -82,9 +83,11 @@ void XtensaFrameLowering::emitPrologue(MachineFunction &MF, // Store FP register in A8, because FP may be used to pass function // arguments - BuildMI(MBB, MBBI, DL, TII.get(Xtensa::OR), Xtensa::A8) - .addReg(FP) - .addReg(FP); + if (XtensaFI->isSaveFrameRegister()) { + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::OR), Xtensa::A8) + .addReg(FP) + .addReg(FP); + } // if framepointer enabled, set it to point to the stack pointer. if (hasFP(MF)) { diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index fdb84fc7cab4a..b5f54af7c5a5f 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -762,6 +762,7 @@ SDValue XtensaTargetLowering::LowerFormalArguments( // so load argument from A8 if (Subtarget.isWinABI() && (VA.getLocReg() == FrameReg)) { Register = MF.addLiveIn(Xtensa::A8, RC); + XtensaFI->setSaveFrameRegister(); } else { Register = MF.addLiveIn(VA.getLocReg(), RC); } @@ -855,6 +856,7 @@ SDValue XtensaTargetLowering::LowerFormalArguments( // so load argument from A8 if (ArgRegs[I] == FrameReg) { RegInfo.addLiveIn(Xtensa::A8, Reg); + XtensaFI->setSaveFrameRegister(); } else { RegInfo.addLiveIn(ArgRegs[I], Reg); } diff --git a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h index ebc99f912ba0e..10d11fcb21bf1 100644 --- a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h +++ b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h @@ -25,6 +25,7 @@ class XtensaFunctionInfo : public MachineFunctionInfo { unsigned VarArgsFirstGPR; int VarArgsStackOffset; unsigned VarArgsFrameIndex; + bool SaveFrameRegister = false; public: explicit XtensaFunctionInfo(const Function &F, const TargetSubtargetInfo *STI) @@ -39,6 +40,9 @@ class XtensaFunctionInfo : public MachineFunctionInfo { // Get and set the frame index of the first stack vararg. unsigned getVarArgsFrameIndex() const { return VarArgsFrameIndex; } void setVarArgsFrameIndex(unsigned FI) { VarArgsFrameIndex = FI; } + + bool isSaveFrameRegister() const { return SaveFrameRegister; } + void setSaveFrameRegister() { SaveFrameRegister = true; } }; } // namespace llvm diff --git a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll index e651b8a37e894..2ad5b57d5e15e 100644 --- a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll +++ b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll @@ -6,7 +6,6 @@ define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #0 { ; CHECK-LABEL: test_hwloop: ; CHECK: entry a1, 32 -; CHECK-NEXT: mov.n a8, a1 ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: blti a4, 1, .LBB0_7 ; CHECK-NEXT: # %bb.1: # %for.body.preheader @@ -19,6 +18,8 @@ define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #0 { ; CHECK-NEXT: .LBB0_3: # %for.body ; CHECK-NEXT: # =>This Loop Header: Depth=1 ; CHECK-NEXT: # Child Loop BB0_6 Depth 2 +; CHECK-NEXT: nop +; CHECK-NEXT: nop ; CHECK-NEXT: loop a4, .LBB0_6 ; CHECK-NEXT: mov.n a9, a8 ; CHECK-NEXT: bge a8, a2, .LBB0_2 diff --git a/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll index f0116b1828c1a..2262ecde5bc9a 100644 --- a/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll +++ b/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll @@ -5,7 +5,6 @@ define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #1 { ; CHECK-LABEL: test_hwloop: ; CHECK: entry a1, 32 -; CHECK-NEXT: mov.n a8, a1 ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: blti a4, 1, .LBB0_5 ; CHECK-NEXT: # %bb.1: # %for.body.preheader From aa083bcf682fda26b4099c85a3e73a31e3991b3d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 12:10:52 +0300 Subject: [PATCH 055/289] esp/maint: Adds Github workfows --- .github/workflows/issue_comment.yml | 19 +++++++++++++++++++ .github/workflows/new_issues.yml | 19 +++++++++++++++++++ .github/workflows/new_prs.yml | 24 ++++++++++++++++++++++++ 3 files changed, 62 insertions(+) create mode 100644 .github/workflows/issue_comment.yml create mode 100644 .github/workflows/new_issues.yml create mode 100644 .github/workflows/new_prs.yml diff --git a/.github/workflows/issue_comment.yml b/.github/workflows/issue_comment.yml new file mode 100644 index 0000000000000..b5c80040fc9ff --- /dev/null +++ b/.github/workflows/issue_comment.yml @@ -0,0 +1,19 @@ +name: Sync issue comments to JIRA + +# This workflow will be triggered when new issue comment is created (including PR comments) +on: issue_comment + +jobs: + sync_issue_comments_to_jira: + name: Sync Issue Comments to Jira + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Sync issue comments to JIRA + uses: espressif/github-actions/sync_issues_to_jira@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + JIRA_PASS: ${{ secrets.JIRA_PASS }} + JIRA_PROJECT: LLVM + JIRA_URL: ${{ secrets.JIRA_URL }} + JIRA_USER: ${{ secrets.JIRA_USER }} diff --git a/.github/workflows/new_issues.yml b/.github/workflows/new_issues.yml new file mode 100644 index 0000000000000..a6602d1c7aa1c --- /dev/null +++ b/.github/workflows/new_issues.yml @@ -0,0 +1,19 @@ +name: Sync issues to Jira + +# This workflow will be triggered when a new issue is opened +on: issues + +jobs: + sync_issues_to_jira: + name: Sync issues to Jira + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Sync GitHub issues to Jira project + uses: espressif/github-actions/sync_issues_to_jira@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + JIRA_PASS: ${{ secrets.JIRA_PASS }} + JIRA_PROJECT: LLVM + JIRA_URL: ${{ secrets.JIRA_URL }} + JIRA_USER: ${{ secrets.JIRA_USER }} diff --git a/.github/workflows/new_prs.yml b/.github/workflows/new_prs.yml new file mode 100644 index 0000000000000..199d58ef87b3f --- /dev/null +++ b/.github/workflows/new_prs.yml @@ -0,0 +1,24 @@ +name: Sync remain PRs to Jira + +# This workflow will be triggered every hour, to sync remaining PRs (i.e. PRs with zero comment) to Jira project +# Note that, PRs can also get synced when new PR comment is created +on: + schedule: + - cron: "0 * * * *" + +jobs: + sync_prs_to_jira: + name: Sync PRs to Jira + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Sync PRs to Jira project + uses: espressif/github-actions/sync_issues_to_jira@master + with: + cron_job: true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + JIRA_PASS: ${{ secrets.JIRA_PASS }} + JIRA_PROJECT: LLVM + JIRA_URL: ${{ secrets.JIRA_URL }} + JIRA_USER: ${{ secrets.JIRA_USER }} From 4798f825173d02adadcc0fb9e761e8c7e4a7af46 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 12:46:48 +0300 Subject: [PATCH 056/289] [Xtensa] Implement esp32 psram cache fixes. --- clang/include/clang/Driver/Options.td | 9 + clang/lib/Driver/ToolChains/Clang.cpp | 32 ++ clang/lib/Driver/ToolChains/Clang.h | 2 + clang/lib/Driver/ToolChains/Xtensa.cpp | 19 + llvm/lib/Target/Xtensa/CMakeLists.txt | 1 + llvm/lib/Target/Xtensa/Xtensa.h | 1 + .../lib/Target/Xtensa/XtensaESP32PSRAMFix.cpp | 353 ++++++++++++++++++ .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 1 + llvm/test/CodeGen/Xtensa/psram_memw.ll | 50 +++ llvm/test/CodeGen/Xtensa/psram_nops.ll | 60 +++ 10 files changed, 528 insertions(+) create mode 100644 llvm/lib/Target/Xtensa/XtensaESP32PSRAMFix.cpp create mode 100644 llvm/test/CodeGen/Xtensa/psram_memw.ll create mode 100644 llvm/test/CodeGen/Xtensa/psram_nops.ll diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 15f9ee75492e3..917c16b779c28 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -241,6 +241,8 @@ def m_ve_Features_Group : OptionGroup<"">, Group, DocName<"VE">; def m_loongarch_Features_Group : OptionGroup<"">, Group, DocName<"LoongArch">; +def m_xtensa_Features_Group : OptionGroup<"">, + Group, DocName<"Xtensa">; def m_libc_Group : OptionGroup<"">, Group, Flags<[HelpHidden]>; @@ -6397,6 +6399,13 @@ def mno_3dnow : Flag<["-"], "mno-3dnow">; def m3dnowa : Flag<["-"], "m3dnowa">; def mno_3dnowa : Flag<["-"], "mno-3dnowa">; +// Xtensa feature flags +def malways_memw : Flag<["-"], "malways-memw">, Group; +def mfix_esp32_psram_cache_issue : Flag<["-"], "mfix-esp32-psram-cache-issue">, Group; +def mfix_esp32_psram_cache_strategy_EQ : Joined<["-"], "mfix-esp32-psram-cache-strategy=">, Group, + HelpText<" Psram cache fix strategies : memw, nops">, + Values<"memw, nops">; + // These are legacy user-facing driver-level option spellings. They are always // aliases for options that are spelled using the more common Unix / GNU flag // style of double-dash and equals-joined flags. diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 8858c318aba7a..b079f47d8f09a 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -1743,6 +1743,10 @@ void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple, case llvm::Triple::ve: AddVETargetArgs(Args, CmdArgs); break; + + case llvm::Triple::xtensa: + AddXtensaTargetArgs(Args, CmdArgs); + break; } } @@ -2414,6 +2418,34 @@ void Clang::AddVETargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const { CmdArgs.push_back("hard"); } +void Clang::AddXtensaTargetArgs(const ArgList &Args, + ArgStringList &CmdArgs) const { + const Driver &D = getToolChain().getDriver(); + + if (Args.getLastArg(options::OPT_malways_memw) != nullptr) { + CmdArgs.push_back("-mllvm"); + CmdArgs.push_back("-malways-memw"); + } + + if (Args.getLastArg(options::OPT_mfix_esp32_psram_cache_issue) != nullptr) { + CmdArgs.push_back("-mllvm"); + CmdArgs.push_back("-mfix-esp32-psram-cache-issue"); + + if (Arg *A = + Args.getLastArg(options::OPT_mfix_esp32_psram_cache_strategy_EQ)) { + StringRef Value = A->getValue(); + if (Value == "memw" || Value == "nops") { + CmdArgs.push_back("-mllvm"); + CmdArgs.push_back( + Args.MakeArgString("-mfix-esp32-psram-cache-strategy=" + Value)); + } else { + D.Diag(diag::err_drv_unsupported_option_argument) + << A->getOption().getName() << Value; + } + } + } +} + void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename, StringRef Target, const InputInfo &Output, const InputInfo &Input, const ArgList &Args) const { diff --git a/clang/lib/Driver/ToolChains/Clang.h b/clang/lib/Driver/ToolChains/Clang.h index 18f6c5ed06a59..5f7e205db5d94 100644 --- a/clang/lib/Driver/ToolChains/Clang.h +++ b/clang/lib/Driver/ToolChains/Clang.h @@ -81,6 +81,8 @@ class LLVM_LIBRARY_VISIBILITY Clang : public Tool { llvm::opt::ArgStringList &CmdArgs) const; void AddVETargetArgs(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const; + void AddXtensaTargetArgs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs) const; enum RewriteKind { RK_None, RK_Fragile, RK_NonFragile }; diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 930449424f1aa..a1149dc6e8438 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -108,18 +108,37 @@ XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, IsIntegratedAsm = false; } + bool IsESP32 = XtensaToolChain::GetTargetCPUVersion(Args) == "esp32"; Multilibs.push_back(Multilib()); + if (IsESP32) + Multilibs.push_back(MultilibBuilder("esp32-psram", {}, {}) + .flag("-mfix-esp32-psram-cache-issue") + .makeMultilib()); + Multilibs.push_back(MultilibBuilder("no-rtti", {}, {}) .flag("-frtti", /*Disallow=*/true) .flag("-fno-rtti") .makeMultilib()); + if (IsESP32) + Multilibs.push_back(MultilibBuilder("esp32-psram/no-rtti", {}, {}) + .flag("-fno-rtti") + .flag("-frtti", /*Disallow=*/true) + .flag("-mfix-esp32-psram-cache-issue") + .makeMultilib()); + Multilib::flags_list Flags; addMultilibFlag( Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, false), "frtti", Flags); + if (IsESP32) + addMultilibFlag(Args.hasFlag(options::OPT_mfix_esp32_psram_cache_issue, + options::OPT_mfix_esp32_psram_cache_issue, + false), + "mfix-esp32-psram-cache-issue", Flags); + Multilibs.select(Flags, SelectedMultilibs); const std::string Slash = XtensaGCCToolchain.Slash; diff --git a/llvm/lib/Target/Xtensa/CMakeLists.txt b/llvm/lib/Target/Xtensa/CMakeLists.txt index 21366425c55ef..261c5548d780d 100644 --- a/llvm/lib/Target/Xtensa/CMakeLists.txt +++ b/llvm/lib/Target/Xtensa/CMakeLists.txt @@ -17,6 +17,7 @@ add_public_tablegen_target(XtensaCommonTableGen) add_llvm_target(XtensaCodeGen XtensaAsmPrinter.cpp XtensaConstantPoolValue.cpp + XtensaESP32PSRAMFix.cpp XtensaFixupHWLoops.cpp XtensaFrameLowering.cpp XtensaHardwareLoops.cpp diff --git a/llvm/lib/Target/Xtensa/Xtensa.h b/llvm/lib/Target/Xtensa/Xtensa.h index 3322d66eb7610..12ab08e914e89 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.h +++ b/llvm/lib/Target/Xtensa/Xtensa.h @@ -28,5 +28,6 @@ FunctionPass *createXtensaISelDag(XtensaTargetMachine &TM, FunctionPass *createXtensaSizeReductionPass(); FunctionPass *createXtensaHardwareLoops(); FunctionPass *createXtensaFixupHwLoops(); +FunctionPass *createXtensaPSRAMCacheFixPass(); } // namespace llvm #endif // LLVM_LIB_TARGET_XTENSA_XTENSA_H diff --git a/llvm/lib/Target/Xtensa/XtensaESP32PSRAMFix.cpp b/llvm/lib/Target/Xtensa/XtensaESP32PSRAMFix.cpp new file mode 100644 index 0000000000000..5f22c2ea0e20c --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaESP32PSRAMFix.cpp @@ -0,0 +1,353 @@ +//===- XtensaPSRAMFIx.cpp - Fixup PSRAM Cache issues --------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "XtensaInstrInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen//MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +#define DEBUG_TYPE "xtensa-fix-esp32-psram-cache-pass" + +enum PSRAMFixChoice { + ESP32_PSRAM_FIX_MEMW, + ESP32_PSRAM_FIX_NOPS +}; + +static cl::opt AlwaysMembarrier("malways-memw", cl::init(false), + cl::Hidden); + +static cl::opt FixESP32PSRAMCacheIssue("mfix-esp32-psram-cache-issue", + cl::init(false), cl::Hidden); + +static cl::opt ESP32PSRAMFixStrat( + "mfix-esp32-psram-cache-strategy", cl::init(ESP32_PSRAM_FIX_MEMW), + cl::desc(""), + cl::values(clEnumValN(ESP32_PSRAM_FIX_MEMW, "memw", ""), + clEnumValN(ESP32_PSRAM_FIX_NOPS, "nops", ""))); + +STATISTIC(NumAdded, "Number of instructions added"); + +class createXtensaPSRAMCacheFix : public MachineFunctionPass { +public: + static char ID; + createXtensaPSRAMCacheFix() : MachineFunctionPass(ID) {} + + const XtensaSubtarget *Subtarget; + static const XtensaInstrInfo *XtensaII; + + bool runOnMachineFunction(MachineFunction &MF) override; + + llvm::StringRef getPassName() const override { + return "Xtensa fix PSRAM cache issue in the ESP32 chips"; + } + +private: + bool xtensaPSRAMCacheFixNopReorg(MachineFunction &MF); + /* + Alternative fix to xtensaPSRAMCacheFixNopReorg. Tries to solve the 32-bit + load/store inversion by explicitly inserting a memory barrier instead of nops. + Slower than nops, but faster than just adding memws everywhere. + */ + bool xtensaPSRAMCacheFixMemwReorg(MachineFunction &MF); + // Emits a memw before every load/store instruction. Hard-handed approach to + // get rid of any pipeline/memory issues... + bool xtensaInsertMemwReorg(MachineFunction &MF); +}; + +char createXtensaPSRAMCacheFix::ID = 0; +const XtensaInstrInfo *createXtensaPSRAMCacheFix::XtensaII; + +// Affected piece of pipeline is 5 entries long; the load/store itself fills +// one. +#define LOAD_STORE_OFF 4 + +bool createXtensaPSRAMCacheFix::xtensaPSRAMCacheFixNopReorg( + MachineFunction &MF) { + MachineFunction::iterator I = MF.begin(), E = MF.end(); + MachineInstr *LastHIQIStore = nullptr; + MachineInstr *StoreInsn = nullptr; + int InsnsSinceStore = 0; + bool Modified = false; + + for (; I != E; ++I) { + MachineBasicBlock &MBB = *I; + MachineBasicBlock::instr_iterator MII = MBB.instr_begin(), + MIE = MBB.instr_end(); + MachineBasicBlock::instr_iterator NextMII; + + // Iterate through the instructions in the basic block + for (; MII != MIE; MII = NextMII) { + MachineInstr *MI = &*MII; + unsigned Opcode = MI->getOpcode(); + NextMII = std::next(MII); + + if (MI->isCall() || MI->isBranch() || MI->isReturn()) { + if (LastHIQIStore) { + DebugLoc dl = LastHIQIStore->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + BuildMI(*LastHIQIStore->getParent(), LastHIQIStore, dl, NewMCID); + LastHIQIStore = nullptr; + Modified = true; + NumAdded++; + } + if (!(MI->isBranch() && (MI->getOpcode() != Xtensa::J) && + (MI->getOpcode() != Xtensa::JX))) { + StoreInsn = nullptr; + } + continue; + } + + switch (Opcode) { + case Xtensa::LSI: + case Xtensa::L32I_N: + case Xtensa::L32I: + case Xtensa::L16SI: + case Xtensa::L16UI: + case Xtensa::L8UI: + if (StoreInsn) { + while (InsnsSinceStore++ < LOAD_STORE_OFF) { + DebugLoc dl = MII->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::NOP); + BuildMI(MBB, MII, dl, NewMCID); + Modified = true; + NumAdded++; + } + } + if (LastHIQIStore) { + DebugLoc dl = LastHIQIStore->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + BuildMI(*LastHIQIStore->getParent(), + std::next(LastHIQIStore->getIterator()), dl, NewMCID); + LastHIQIStore = nullptr; + Modified = true; + NumAdded++; + } + break; + case Xtensa::SSI: + case Xtensa::S32I_N: + case Xtensa::S32I: { + LastHIQIStore = nullptr; + InsnsSinceStore = 0; + StoreInsn = MI; + } break; + case Xtensa::S16I: + case Xtensa::S8I: { + LastHIQIStore = MI; + InsnsSinceStore = 0; + StoreInsn = MI; + } break; + default: + InsnsSinceStore++; + break; + } + } + } + return Modified; +} + +bool createXtensaPSRAMCacheFix::xtensaPSRAMCacheFixMemwReorg( + MachineFunction &MF) { + MachineFunction::iterator I = MF.begin(), E = MF.end(); + MachineInstr *LastHIQIStore = nullptr; + MachineInstr *StoreInsn = nullptr; + bool Modified = false; + + for (; I != E; ++I) { + MachineBasicBlock &MBB = *I; + + MachineBasicBlock::instr_iterator MII = MBB.instr_begin(), + MIE = MBB.instr_end(); + MachineBasicBlock::instr_iterator NextMII; + + // Iterate through the instructions in the basic block + for (; MII != MIE; MII = NextMII) { + NextMII = std::next(MII); + MachineInstr *MI = &*MII; + unsigned Opcode = MI->getOpcode(); + + // Don't process bundled instructions or pseudo operations + if (MI->isBundle() || MI->isTransient()) + continue; + + if (MI->isCall() || MI->isBranch() || MI->isReturn()) { + if (StoreInsn) { + if (!(MI->isBranch() && (MI->getOpcode() != Xtensa::J) && + (MI->getOpcode() != Xtensa::JX))) { + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + MachineBasicBlock::instr_iterator BranchI = MI->getIterator(); + while (((*BranchI).isBranch() || (*BranchI).isCall() || + (*BranchI).isReturn()) && + (BranchI != MBB.instr_begin())) + BranchI = std::prev(BranchI); + + if (BranchI != MBB.instr_begin()) + BranchI = std::next(BranchI); + + BuildMI(MBB, BranchI, dl, NewMCID); + Modified = true; + StoreInsn = nullptr; + NumAdded++; + } + } + if (LastHIQIStore) { + DebugLoc dl = LastHIQIStore->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + BuildMI(*LastHIQIStore->getParent(), + std::next(LastHIQIStore->getIterator()), dl, NewMCID); + LastHIQIStore = nullptr; + Modified = true; + NumAdded++; + } + continue; + } + + switch (Opcode) { + case Xtensa::LSI: + case Xtensa::L32I_N: + case Xtensa::L32I: + case Xtensa::L16SI: + case Xtensa::L16UI: + case Xtensa::L8UI: + if (StoreInsn) { + MachineMemOperand *MMO = *MII->memoperands_begin(); + if (!MMO->isVolatile()) { + DebugLoc dl = MII->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + BuildMI(MBB, MII, dl, NewMCID); + Modified = true; + StoreInsn = nullptr; + NumAdded++; + } + } + if (LastHIQIStore) { + DebugLoc dl = LastHIQIStore->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + BuildMI(*LastHIQIStore->getParent(), + std::next(LastHIQIStore->getIterator()), dl, NewMCID); + LastHIQIStore = nullptr; + Modified = true; + NumAdded++; + } + break; + case Xtensa::SSI: + case Xtensa::S32I_N: + case Xtensa::S32I: { + LastHIQIStore = nullptr; + StoreInsn = MI; + } break; + case Xtensa::S16I: + case Xtensa::S8I: { + MachineMemOperand *MMO = *MII->memoperands_begin(); + if (!MMO->isVolatile()) { + LastHIQIStore = MI; + } + StoreInsn = MI; + } break; + } + } + } + return Modified; +} + +bool createXtensaPSRAMCacheFix::xtensaInsertMemwReorg(MachineFunction &MF) { + MachineFunction::iterator I = MF.begin(), E = MF.end(); + bool Modified = false; + bool HadMemw = false; + + for (; I != E; ++I) { + MachineBasicBlock &MBB = *I; + + MachineBasicBlock::instr_iterator MII = MBB.instr_begin(), + MIE = MBB.instr_end(); + MachineBasicBlock::instr_iterator NextMII; + + // Iterate through the instructions in the basic block + for (; MII != MIE; MII = NextMII) { + NextMII = std::next(MII); + MachineInstr *MI = &*MII; + unsigned Opcode = MI->getOpcode(); + + // Don't process bundled instructions or pseudo operations + if (MI->isBundle() || MI->isTransient()) + continue; + + switch (Opcode) { + case Xtensa::LSI: + case Xtensa::L32I_N: + case Xtensa::L32I: + case Xtensa::L16SI: + case Xtensa::L16UI: + case Xtensa::L8UI: { + MachineMemOperand *MMO = *MII->memoperands_begin(); + if (!MMO->isVolatile() && (!HadMemw)) { + DebugLoc dl = MII->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + BuildMI(MBB, MII, dl, NewMCID); + Modified = true; + NumAdded++; + } + HadMemw = false; + } break; + case Xtensa::SSI: + case Xtensa::S32I_N: + case Xtensa::S32I: + case Xtensa::S16I: + case Xtensa::S8I: { + MachineMemOperand *MMO = *MII->memoperands_begin(); + if (!MMO->isVolatile()) { + DebugLoc dl = MII->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + BuildMI(MBB, NextMII, dl, NewMCID); + Modified = true; + NumAdded++; + } + HadMemw = true; + } break; + default: + HadMemw = false; + break; + } + } + } + return Modified; +} + +bool createXtensaPSRAMCacheFix::runOnMachineFunction(MachineFunction &MF) { + + Subtarget = &static_cast(MF.getSubtarget()); + XtensaII = static_cast(Subtarget->getInstrInfo()); + bool Modified = false; + + if (AlwaysMembarrier) + return xtensaInsertMemwReorg(MF); + + if (!FixESP32PSRAMCacheIssue) + return false; + + if (ESP32PSRAMFixStrat == ESP32_PSRAM_FIX_MEMW) { + Modified = xtensaPSRAMCacheFixMemwReorg(MF); + } else if (ESP32PSRAMFixStrat == ESP32_PSRAM_FIX_NOPS) { + Modified = xtensaPSRAMCacheFixNopReorg(MF); + } + + return Modified; +} + +FunctionPass *llvm::createXtensaPSRAMCacheFixPass() { + return new createXtensaPSRAMCacheFix(); +} + diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 7b2f967c5996c..88940535d74be 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -140,6 +140,7 @@ void XtensaPassConfig::addPreRegAlloc() { } void XtensaPassConfig::addPreEmitPass() { + addPass(createXtensaPSRAMCacheFixPass()); addPass(createXtensaSizeReductionPass()); addPass(createXtensaFixupHwLoops()); addPass(&BranchRelaxationPassID); diff --git a/llvm/test/CodeGen/Xtensa/psram_memw.ll b/llvm/test/CodeGen/Xtensa/psram_memw.ll new file mode 100644 index 0000000000000..f10417ec779e6 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/psram_memw.ll @@ -0,0 +1,50 @@ +; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32 -mfix-esp32-psram-cache-issue -mfix-esp32-psram-cache-strategy=memw %s -o - | FileCheck %s + +@a = dso_local local_unnamed_addr global i32 0, align 4 +@b = dso_local local_unnamed_addr global i32 0, align 4 + +; Function Attrs: nofree norecurse nounwind +define dso_local void @f(i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce, i32 %a4.coerce, i32 %cond) local_unnamed_addr #0 { +entry: + %coerce.val.ip = inttoptr i32 %a1.coerce to i8* + %coerce.val.ip1 = inttoptr i32 %a2.coerce to i16* + %coerce.val.ip2 = inttoptr i32 %a3.coerce to i32* + %coerce.val.ip3 = inttoptr i32 %a4.coerce to i32* + %0 = load i32, i32* %coerce.val.ip2, align 4 + %conv = trunc i32 %0 to i8 + store i8 %conv, i8* %coerce.val.ip, align 1 + %tobool.not = icmp eq i32 %cond, 0 + br i1 %tobool.not, label %if.end, label %if.then +; CHECK: s8i a8, a2, 0 +; CHECK: memw + +if.then: ; preds = %entry + %1 = load i32, i32* %coerce.val.ip2, align 4 + %conv8 = trunc i32 %1 to i16 + store i16 %conv8, i16* %coerce.val.ip1, align 2 + %2 = load i32, i32* %coerce.val.ip3, align 4 + store i32 %2, i32* %coerce.val.ip2, align 4 + %conv10 = trunc i32 %2 to i8 + store i8 %conv10, i8* %coerce.val.ip, align 1 + br label %return +; CHECK: l32i.n a8, a4, 0 +; CHECK: s16i a8, a3, 0 +; CHECK: memw +; CHECK: memw +; CHECK: l32i.n a8, a5, 0 +; CHECK: s32i.n a8, a4, 0 +; CHECK: s8i a8, a2, 0 +; CHECK: memw + +if.end: ; preds = %entry + %3 = load i32, i32* %coerce.val.ip3, align 4 + %conv9 = trunc i32 %3 to i16 + store i16 %conv9, i16* %coerce.val.ip1, align 2 + br label %return +; CHECK: l32i.n a8, a5, 0 +; CHECK: s16i a8, a3, 0 +; CHECK: memw + +return: ; preds = %if.then, %if.end + ret void +} diff --git a/llvm/test/CodeGen/Xtensa/psram_nops.ll b/llvm/test/CodeGen/Xtensa/psram_nops.ll new file mode 100644 index 0000000000000..ece7d6f6432c4 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/psram_nops.ll @@ -0,0 +1,60 @@ +; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32 -mfix-esp32-psram-cache-issue -mfix-esp32-psram-cache-strategy=nops %s -o - | FileCheck %s + +@a = dso_local local_unnamed_addr global i32 0, align 4 +@b = dso_local local_unnamed_addr global i32 0, align 4 + +; Function Attrs: nofree norecurse nounwind +define dso_local void @f(i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce, i32 %a4.coerce, i32 %cond) local_unnamed_addr #0 { +entry: + %coerce.val.ip = inttoptr i32 %a1.coerce to i8* + %coerce.val.ip1 = inttoptr i32 %a2.coerce to i16* + %coerce.val.ip2 = inttoptr i32 %a3.coerce to i32* + %coerce.val.ip3 = inttoptr i32 %a4.coerce to i32* + %0 = load i32, i32* %coerce.val.ip2, align 4 + %conv = trunc i32 %0 to i8 + store i8 %conv, i8* %coerce.val.ip, align 1 + %tobool.not = icmp eq i32 %cond, 0 + br i1 %tobool.not, label %if.end, label %if.then +; CHECK: l32i.n a8, a4, 0 +; CHECK: memw +; CHECK: s8i a8, a2, 0 + + +if.then: ; preds = %entry + %1 = load i32, i32* %coerce.val.ip2, align 4 + %conv8 = trunc i32 %1 to i16 + store i16 %conv8, i16* %coerce.val.ip1, align 2 + %2 = load i32, i32* %coerce.val.ip3, align 4 + store i32 %2, i32* %coerce.val.ip2, align 4 + %conv10 = trunc i32 %2 to i8 + store i8 %conv10, i8* %coerce.val.ip, align 1 + br label %return +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: l32i.n a8, a4, 0 +; CHECK: s16i a8, a3, 0 +; CHECK: memw +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: l32i.n a8, a5, 0 +; CHECK: s32i.n a8, a4, 0 +; CHECK: memw +; CHECK: s8i a8, a2, 0 + +if.end: ; preds = %entry + %3 = load i32, i32* %coerce.val.ip3, align 4 + %conv9 = trunc i32 %3 to i16 + store i16 %conv9, i16* %coerce.val.ip1, align 2 + br label %return +; CHECK: l32i.n a8, a5, 0 +; CHECK: memw +; CHECK: s16i a8, a3, 0 + + +return: ; preds = %if.then, %if.end + ret void +} From 41b711fca31203596c28c9077a2d31024f194784 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 12:57:07 +0300 Subject: [PATCH 057/289] [Xtensa] Fix Hardware Loop optimization. --- .../lib/Target/Xtensa/XtensaHardwareLoops.cpp | 133 ++++++++++++------ llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 1 - 2 files changed, 93 insertions(+), 41 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp b/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp index a12cfcbb3e524..af35b8af1a1bb 100644 --- a/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp +++ b/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp @@ -194,8 +194,10 @@ bool XtensaHardwareLoops::processLoop(MachineLoop *L) { MachineInstr *LII = nullptr; // LOOPINIT instruction MachineInstr *LEI = nullptr; // LOOPEND instruction MachineBasicBlock *LEMBB = nullptr; - MachineBasicBlock *PH = L->getLoopPreheader(); + MachineBasicBlock *LH = L->getHeader(); MachineBasicBlock *LastMBB = L->getLoopLatch(); + std::vector LoopInitInsts; + std::map LoopInitMap; // Try to find LOOPEND instruction in the loop latch for (auto MBI = L->block_begin(), MBIE = L->block_end(); MBI != MBIE; ++MBI) { @@ -207,40 +209,56 @@ bool XtensaHardwareLoops::processLoop(MachineLoop *L) { LEI = LMI; LEMBB = *MBI; } + // Collect LOOPINIT instructions inside the loop + if (LMI->getOpcode() == Xtensa::LOOPINIT) { + LoopInitInsts.push_back(LMI); + MachineBasicBlock *SB = LMI->getParent(); + while (!SB->isSuccessor(LH)) { + for (auto SBI : SB->successors()) { + if (!L->contains(SBI)) + continue; + SB = SBI; + break; + } + if (!L->contains(SB)) + llvm_unreachable("Wrong hardware loop"); + } + LoopInitMap[SB] = LMI; + } } VisitedMBBs.insert(*MBI); } if (LEI != nullptr) { - MachineBasicBlock *LH = L->getHeader(); MachineBasicBlock::iterator LHI = LH->getFirstNonPHI(); - - if (!PH) { - llvm_unreachable("Hardware loop predecessor not found"); - return false; - } - - MachineBasicBlock *LIMBB = PH; - - // Try to find LOOPINIT instruction in predecessors chain - while ((LII == nullptr) && (LIMBB != nullptr) && - ((L->getParentLoop() == nullptr) || - (L->getParentLoop()->contains(LIMBB)))) { - for (instr_iterator I = LIMBB->instr_begin(), E = LIMBB->instr_end(); - I != E; ++I) { - MachineInstr *MI = &*I; - if (MI->getOpcode() == Xtensa::LOOPINIT) { - LII = MI; - break; + MachineBasicBlock *LIMBB = nullptr; + + // Collect LOOPINIT instructions in predecessors from outter loop + for (auto PBI : LH->predecessors()) { + if (L->contains(PBI)) + continue; + LIMBB = PBI; + LII = nullptr; + // Try to find LOOPINIT instructions in predecessor + while ((LII == nullptr) && (LIMBB != nullptr) && + ((L->getParentLoop() == nullptr) || + (L->getParentLoop()->contains(LIMBB)))) { + for (instr_iterator I = LIMBB->instr_begin(), E = LIMBB->instr_end(); + I != E; ++I) { + MachineInstr *MI = &*I; + if (MI->getOpcode() == Xtensa::LOOPINIT) { + LII = MI; + break; + } } + if (LII == nullptr) + LIMBB = *LIMBB->pred_begin(); } - if (LII == nullptr) - LIMBB = *LIMBB->pred_begin(); - } - - if (LII == nullptr) { - llvm_unreachable("Hardware loop init instruction not found"); - return false; + if (LII == nullptr) { + llvm_unreachable("Hardware loop init instruction not found"); + return false; + } + LoopInitMap[PBI] = LII; } DebugLoc DL = LII->getDebugLoc(); @@ -250,22 +268,30 @@ bool XtensaHardwareLoops::processLoop(MachineLoop *L) { // sub a, a, 1 // bnez a, LH if (!checkLoopSize(L) || containsInvalidInstruction(L) || - (LEMBB != LastMBB) || (!checkLoopEndDisplacement(*LH->getParent(), LH, LEMBB))) { + (LEMBB != LastMBB) || + (!checkLoopEndDisplacement(*LH->getParent(), LH, LEMBB))) { const MCInstrDesc &PD = TII->get(TargetOpcode::PHI); MachineInstr *NewPN = LH->getParent()->CreateMachineInstr(PD, DL); LH->insert(LH->begin(), NewPN); Register PR = MRI->createVirtualRegister(&Xtensa::ARRegClass); NewPN->addOperand(MachineOperand::CreateReg(PR, true)); - MachineOperand MO = - MachineOperand::CreateReg(LII->getOperand(0).getReg(), false); - NewPN->addOperand(MO); - NewPN->addOperand(MachineOperand::CreateMBB(PH)); - Register IndR = MRI->createVirtualRegister(&Xtensa::ARRegClass); - MO = MachineOperand::CreateReg(IndR, false); - NewPN->addOperand(MO); - NewPN->addOperand(MachineOperand::CreateMBB(LastMBB)); + + for (auto PB : LH->predecessors()) { + + if (LoopInitMap.find(PB) != LoopInitMap.end()) { + MachineOperand MO = MachineOperand::CreateReg( + LoopInitMap[PB]->getOperand(0).getReg(), false); + NewPN->addOperand(MO); + NewPN->addOperand(MachineOperand::CreateMBB(PB)); + LoopInitMap[PB]->getParent()->erase(LoopInitMap[PB]); + } else { + MachineOperand MO = MachineOperand::CreateReg(IndR, false); + NewPN->addOperand(MO); + NewPN->addOperand(MachineOperand::CreateMBB(PB)); + } + } MachineInstrBuilder MIB = BuildMI(*LEMBB, LEI, LEI->getDebugLoc(), TII->get(Xtensa::ADDI), IndR) @@ -276,15 +302,42 @@ bool XtensaHardwareLoops::processLoop(MachineLoop *L) { .addReg(IndR) .addMBB(LEI->getOperand(0).getMBB()); LEMBB->erase(LEI); - PH->erase(LII); return false; } - //Place LOOPSTART instruction in loop header + // If several LOOPINIT instructions are dicovered then create PHI + // function + if (LoopInitMap.size() > 1) { + const MCInstrDesc &PD = TII->get(TargetOpcode::PHI); + MachineInstr *NewPN = LH->getParent()->CreateMachineInstr(PD, DL); + LH->insert(LH->begin(), NewPN); + Register PR = MRI->createVirtualRegister(&Xtensa::ARRegClass); + NewPN->addOperand(MachineOperand::CreateReg(PR, true)); + + for (auto PB : LH->predecessors()) { + + if (LoopInitMap.find(PB) != LoopInitMap.end()) { + MachineOperand MO = MachineOperand::CreateReg( + LoopInitMap[PB]->getOperand(0).getReg(), false); + NewPN->addOperand(MO); + NewPN->addOperand(MachineOperand::CreateMBB(PB)); + LoopInitMap[PB]->getParent()->erase(LoopInitMap[PB]); + } else { + MachineOperand MO = MachineOperand::CreateReg(PR, false); + NewPN->addOperand(MO); + NewPN->addOperand(MachineOperand::CreateMBB(PB)); + } + } + LII = NewPN; + } + BuildMI(*LH, LHI, DL, TII->get(Xtensa::LOOPSTART)) .addReg(LII->getOperand(0).getReg()) - .addMBB(LastMBB); - PH->erase(LII); + .addMBB(LEMBB); + + if (LII->getOpcode() == Xtensa::LOOPINIT) + LII->getParent()->erase(LII); + return true; } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index d72fb3bf7fcbf..ca68dec3f10ea 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -342,7 +342,6 @@ bool XtensaInstrInfo::isBranchOffsetInRange(unsigned BranchOp, return true; case Xtensa::LOOPEND: BrOffset += 4; - BrOffset += 3 * 3; // 2*NOP + LOOP instrucions assert((BrOffset <= 0) && "Wrong hardware loop"); return true; case Xtensa::BR_JT: From 90fcf34326627eccdb739ad775cc71b9701593e1 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:00:35 +0300 Subject: [PATCH 058/289] [Xtensa] Support 'f' Inline Assembly Constraint This adds the 'f' inline assembly constraint, as supported by GCC. An 'f'-constrained operand is passed in a floating point register. This patch adds support in both the clang frontend, and LLVM itself. --- clang/lib/Basic/Targets/Xtensa.h | 1 + clang/test/CodeGen/xtensa-inline-asm.c | 13 +++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 13 ++++++++++- .../CodeGen/Xtensa/inline-asm-constraints.ll | 23 +++++++++++++++++++ .../test/CodeGen/Xtensa/inline-asm-invalid.ll | 8 +++++++ 5 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 clang/test/CodeGen/xtensa-inline-asm.c create mode 100644 llvm/test/CodeGen/Xtensa/inline-asm-constraints.ll create mode 100644 llvm/test/CodeGen/Xtensa/inline-asm-invalid.ll diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index a9bf0f8cc8f58..dfe3080482ce2 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -81,6 +81,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { default: return false; case 'a': + case 'f': Info.setAllowsRegister(); return true; } diff --git a/clang/test/CodeGen/xtensa-inline-asm.c b/clang/test/CodeGen/xtensa-inline-asm.c new file mode 100644 index 0000000000000..2ebf175a5a050 --- /dev/null +++ b/clang/test/CodeGen/xtensa-inline-asm.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -triple xtensa -O1 -emit-llvm %s -o - \ +// RUN: | FileCheck %s + +// Test Xtensa specific inline assembly constraints. + +float f; +void test_f() { +// CHECK-LABEL: define dso_local void @test_f() local_unnamed_addr #0 { +// CHECK: [[FLT_ARG:%[a-zA-Z_0-9]+]] = load float, ptr @f +// CHECK: call void asm sideeffect "", "f"(float [[FLT_ARG]]) + asm volatile ("" :: "f"(f)); +} + diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index b5f54af7c5a5f..6abb405e85ce4 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -390,6 +390,7 @@ XtensaTargetLowering::getConstraintType(StringRef Constraint) const { switch (Constraint[0]) { case 'a': case 'd': + case 'f': case 'r': return C_RegisterClass; @@ -410,6 +411,8 @@ XtensaTargetLowering::getSingleConstraintMatchWeight( if (CallOperandVal == NULL) return CW_Default; + Type *type = CallOperandVal->getType(); + // Look at the constraint type. switch (*constraint) { default: @@ -419,9 +422,14 @@ XtensaTargetLowering::getSingleConstraintMatchWeight( case 'a': case 'd': case 'r': - if (CallOperandVal->getType()->isIntegerTy()) + if (type->isIntegerTy()) + weight = CW_Register; + break; + case 'f': + if (type->isFloatingPointTy()) weight = CW_Register; break; + } return weight; } @@ -438,6 +446,9 @@ XtensaTargetLowering::getRegForInlineAsmConstraint( case 'd': // Data register (equivalent to 'r') case 'r': // General-purpose register return std::make_pair(0U, &Xtensa::ARRegClass); + case 'f': // Floating-point register + if (Subtarget.hasSingleFloat()) + return std::make_pair(0U, &Xtensa::FPRRegClass); } } return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); diff --git a/llvm/test/CodeGen/Xtensa/inline-asm-constraints.ll b/llvm/test/CodeGen/Xtensa/inline-asm-constraints.ll new file mode 100644 index 0000000000000..7dbb0f07debda --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/inline-asm-constraints.ll @@ -0,0 +1,23 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=Xtensa %s + + +@gf = external global float + +define float @constraint_f_float(float %a) nounwind { +; Xtensa-LABEL: constraint_f_float: +; Xtensa: # %bb.0: +; Xtensa-NEXT: entry a1, 32 +; Xtensa-NEXT: wfr f8, a2 +; Xtensa-NEXT: l32r a8, .LCPI0_0 +; Xtensa-NEXT: lsi f9, a8, 0 +; Xtensa-NEXT: #APP +; Xtensa-NEXT: add.s f8, f8, f9 +; Xtensa-NEXT: #NO_APP +; Xtensa-NEXT: rfr a2, f8 +; Xtensa-NEXT: retw + %1 = load float, float* @gf + %2 = tail call float asm "add.s $0, $1, $2", "=f,f,f"(float %a, float %1) + ret float %2 +} + diff --git a/llvm/test/CodeGen/Xtensa/inline-asm-invalid.ll b/llvm/test/CodeGen/Xtensa/inline-asm-invalid.ll new file mode 100644 index 0000000000000..260429d933446 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/inline-asm-invalid.ll @@ -0,0 +1,8 @@ +; RUN: not llc -mtriple=xtensa -mcpu=generic < %s 2>&1 | FileCheck %s + +define void @constraint_f() nounwind { +; CHECK: error: couldn't allocate input reg for constraint 'f' + tail call void asm "add.s f0, f1, $0", "f"(float 0.0) + ret void +} + From cb25420475735efc48fec1ed8dd475d1197bf8cd Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:01:17 +0300 Subject: [PATCH 059/289] [Xtensa] Correction of the PSRAM fix pass. --- .../lib/Target/Xtensa/XtensaESP32PSRAMFix.cpp | 58 +++++++++++-------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaESP32PSRAMFix.cpp b/llvm/lib/Target/Xtensa/XtensaESP32PSRAMFix.cpp index 5f22c2ea0e20c..0c4433ed00122 100644 --- a/llvm/lib/Target/Xtensa/XtensaESP32PSRAMFix.cpp +++ b/llvm/lib/Target/Xtensa/XtensaESP32PSRAMFix.cpp @@ -223,14 +223,16 @@ bool createXtensaPSRAMCacheFix::xtensaPSRAMCacheFixMemwReorg( case Xtensa::L16UI: case Xtensa::L8UI: if (StoreInsn) { - MachineMemOperand *MMO = *MII->memoperands_begin(); - if (!MMO->isVolatile()) { - DebugLoc dl = MII->getDebugLoc(); - const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); - BuildMI(MBB, MII, dl, NewMCID); - Modified = true; - StoreInsn = nullptr; - NumAdded++; + if (!MII->memoperands_empty()) { + MachineMemOperand *MMO = *MII->memoperands_begin(); + if (!MMO->isVolatile()) { + DebugLoc dl = MII->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + BuildMI(MBB, MII, dl, NewMCID); + Modified = true; + StoreInsn = nullptr; + NumAdded++; + } } } if (LastHIQIStore) { @@ -251,9 +253,11 @@ bool createXtensaPSRAMCacheFix::xtensaPSRAMCacheFixMemwReorg( } break; case Xtensa::S16I: case Xtensa::S8I: { - MachineMemOperand *MMO = *MII->memoperands_begin(); - if (!MMO->isVolatile()) { - LastHIQIStore = MI; + if (!MII->memoperands_empty()) { + MachineMemOperand *MMO = *MII->memoperands_begin(); + if (!MMO->isVolatile()) { + LastHIQIStore = MI; + } } StoreInsn = MI; } break; @@ -292,13 +296,15 @@ bool createXtensaPSRAMCacheFix::xtensaInsertMemwReorg(MachineFunction &MF) { case Xtensa::L16SI: case Xtensa::L16UI: case Xtensa::L8UI: { - MachineMemOperand *MMO = *MII->memoperands_begin(); - if (!MMO->isVolatile() && (!HadMemw)) { - DebugLoc dl = MII->getDebugLoc(); - const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); - BuildMI(MBB, MII, dl, NewMCID); - Modified = true; - NumAdded++; + if (!MII->memoperands_empty()) { + MachineMemOperand *MMO = *MII->memoperands_begin(); + if (!MMO->isVolatile() && (!HadMemw)) { + DebugLoc dl = MII->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + BuildMI(MBB, MII, dl, NewMCID); + Modified = true; + NumAdded++; + } } HadMemw = false; } break; @@ -307,13 +313,15 @@ bool createXtensaPSRAMCacheFix::xtensaInsertMemwReorg(MachineFunction &MF) { case Xtensa::S32I: case Xtensa::S16I: case Xtensa::S8I: { - MachineMemOperand *MMO = *MII->memoperands_begin(); - if (!MMO->isVolatile()) { - DebugLoc dl = MII->getDebugLoc(); - const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); - BuildMI(MBB, NextMII, dl, NewMCID); - Modified = true; - NumAdded++; + if (!MII->memoperands_empty()) { + MachineMemOperand *MMO = *MII->memoperands_begin(); + if (!MMO->isVolatile()) { + DebugLoc dl = MII->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::MEMW); + BuildMI(MBB, NextMII, dl, NewMCID); + Modified = true; + NumAdded++; + } } HadMemw = true; } break; From ed4dd685241ba301c77ce87ca44df4b4684d66eb Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:01:47 +0300 Subject: [PATCH 060/289] [Xtensa] Correction of the hardware loop instrinsics detection. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 6abb405e85ce4..5f7986c53d8c3 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -272,6 +272,9 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setTargetDAGCombine(ISD::FADD); setTargetDAGCombine(ISD::FSUB); + } + + if (Subtarget.hasSingleFloat() || Subtarget.hasLoop()) { setTargetDAGCombine(ISD::BRCOND); } From d7784a50cb494e0d8a2b3d03842b44d7c3570bfa Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:12:42 +0300 Subject: [PATCH 061/289] [Xtensa] Correction of the ESP32-S2 target. The ESP32-S2 chip includes Xtensa ISA extension which helps to work with GPIO, so we add instructions description and test. Add MEMCTL feature to ESP32-S2 target. Implement Xtensa illegal instructions with tests. --- .../Xtensa/AsmParser/XtensaAsmParser.cpp | 5 ++ .../Disassembler/XtensaDisassembler.cpp | 8 +++ .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 11 ++++ .../Xtensa/MCTargetDesc/XtensaInstPrinter.h | 1 + .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 17 ++++++ llvm/lib/Target/Xtensa/Xtensa.td | 9 ++- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 55 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaOperands.td | 7 +++ llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 5 ++ llvm/test/MC/Xtensa/Core/processor-control.s | 4 ++ llvm/test/MC/Xtensa/xtensa-esp32s2-valid.s | 21 +++++++ llvm/test/MC/Xtensa/xtensa-valid-density.s | 9 +++ 13 files changed, 151 insertions(+), 2 deletions(-) create mode 100644 llvm/test/MC/Xtensa/xtensa-esp32s2-valid.s create mode 100644 llvm/test/MC/Xtensa/xtensa-valid-density.s diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 8e8090cc79e6e..11470a275bd2e 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -349,6 +349,8 @@ struct XtensaOperand : public MCParsedAsmOperand { bool isseimm7_22() const { return isImm(7, 22); } + bool isSelect_256() const { return isImm(0, 255); } + /// getStartLoc - Gets location of the first token of this operand SMLoc getStartLoc() const override { return StartLoc; } /// getEndLoc - Gets location of the last token of this operand @@ -618,6 +620,9 @@ bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_Invalidseimm7_22: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [7, 22]"); + case Match_InvalidSelect_256: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 255]"); } report_fatal_error("Unknown match type detected!"); diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 2835e682ed199..11e71fe8b0922 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -569,6 +569,14 @@ static DecodeStatus decodeSeimm7_22Operand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeSelect_256Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + static int64_t TableB4const[16] = {-1, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256}; static DecodeStatus decodeB4constOperand(MCInst &Inst, uint64_t Imm, diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index 8d5e56b35b51c..0960c73dba937 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -439,3 +439,14 @@ void XtensaInstPrinter::printSeimm7_22_AsmOperand(const MCInst *MI, int OpNum, } else printOperand(MI, OpNum, O); } + +void XtensaInstPrinter::printSelect_256_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 255) && + "Invalid argument, value must be in range [0,255]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index 62b080c635706..b103fb5dc9e01 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -73,6 +73,7 @@ class XtensaInstPrinter : public MCInstPrinter { void printB4const_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printB4constu_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printSeimm7_22_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printSelect_256_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); }; } // end namespace llvm diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 14f9026a1ea4b..73039c2a44480 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -142,6 +142,11 @@ class XtensaMCCodeEmitter : public MCCodeEmitter { uint32_t getSeimm7_22OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + + uint32_t getSelect_256OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + }; } // namespace @@ -593,4 +598,16 @@ XtensaMCCodeEmitter::getSeimm7_22OpValue(const MCInst &MI, unsigned OpNo, return res; } +uint32_t +XtensaMCCodeEmitter::getSelect_256OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint32_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 255)) && "Unexpected operand value!"); + + return Res; +} + #include "XtensaGenMCCodeEmitter.inc" diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 3d4c21f45af30..29467a346d045 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -153,6 +153,11 @@ def FeatureMiscSR : SubtargetFeature<"miscsr", "HasMiscSR", "true", def HasMiscSR : Predicate<"Subtarget->hasMiscSR()">, AssemblerPredicate<(all_of FeatureMiscSR)>; +def FeatureESP32S2Ops : SubtargetFeature<"esp32s2", "HasESP32S2Ops", "true", + "Support Xtensa esp32-s2 ISA extension">; +def HasESP32S2Ops : Predicate<"Subtarget->hasESP32S2Ops()">, + AssemblerPredicate<(all_of FeatureESP32S2Ops)>; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// @@ -170,8 +175,8 @@ def : Proc<"esp8266", [FeatureDensity, FeatureNSA, FeatureMul32, FeatureExtended FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeatureRegionProtection, FeaturePRID]>; def : Proc<"esp32-s2", [FeatureDensity, FeatureWindowed, FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureTHREADPTR, FeatureDiv32, - FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, - FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR]>; + FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, + FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureESP32S2Ops]>; //===----------------------------------------------------------------------===// // Register File Description diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index d5479cd1e875d..8a533be9194bf 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1401,6 +1401,27 @@ def WITLB : RRR_Inst<0x00, 0x00, 0x05, (outs AR:$t), (ins AR:$s), let r = 0x6; } +//===----------------------------------------------------------------------===// +// Illegal instructions +//===----------------------------------------------------------------------===// + +let isBarrier = 1, isTerminator = 1 in { + def ILL : CALLX_Inst<0x00, 0x00, 0x00, (outs), (ins), + "ill", []> { + let m = 0x0; + let n = 0x0; + let r = 0; + let s = 0; + } + + def ILL_N : RRRN_Inst<0x0C, (outs), (ins), + "ill.n", []>, Requires<[HasDensity]> { + let r = 0xf; + let s = 0x0; + let t = 0x6; + } +} + //===----------------------------------------------------------------------===// // Atomic patterns //===----------------------------------------------------------------------===// @@ -1535,6 +1556,40 @@ let usesCustomInserter = 1, Predicates = [HasS32C1I] in { [(set AR:$dst, (atomic_load_umax_i32 AR:$ptr, AR:$arg))]>; } +//===----------------------------------------------------------------------===// +// Xtensa ESP32S2 Instructions +//===----------------------------------------------------------------------===// +let Predicates = [HasESP32S2Ops] in { + def WR_MASK_GPIO_OUT : RRR_Inst<0x0, 0x06, 0x0, (outs), (ins AR:$s, AR:$t), + "wr_mask_gpio_out\t$s, $t", []> { + let r = 0x2; + } + + def SET_BIT_GPIO_OUT : RRR_Inst<0x0, 0x06, 0x0, (outs), (ins select_256:$imm), + "set_bit_gpio_out\t$imm", []> { + bits<8> imm; + + let r = 0x1; + let s = imm{7-4}; + let t = imm{3-0}; + } + + def CLR_BIT_GPIO_OUT : RRR_Inst<0x0, 0x06, 0x0, (outs), (ins select_256:$imm), + "clr_bit_gpio_out\t$imm", []> { + bits<8> imm; + + let r = 0x0; + let s = imm{7-4}; + let t = imm{3-0}; + } + + def GET_GPIO_IN : RRR_Inst<0x0, 0x06, 0x0, (outs AR:$t), (ins), + "get_gpio_in\t$t", []> { + let r = 0x3; + let s = 0x0; + } +} + //===----------------------------------------------------------------------===// // DSP Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index 9957bf7bd1f47..620aeee000518 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -175,6 +175,13 @@ def seimm7_22: Immediate= 7 && Imm <= 22; }], "Seimm7_22_As let DecoderMethod = "decodeSeimm7_22Operand"; } +// select_256 predicate - Immediate in the range [0,255] +def Select_256_AsmOperand: ImmAsmOperand<"Select_256">; +def select_256: Immediate= 0 && Imm <= 255; }], "Select_256_AsmOperand"> { + let EncoderMethod = "getSelect_256OpValue"; + let DecoderMethod = "decodeSelect_256Operand"; +} + //===----------------------------------------------------------------------===// // Memory address operands //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 2856860756757..2127436a3d8d9 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -57,6 +57,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasPRID = false; HasRegionProtection = false; HasMiscSR = false; + HasESP32S2Ops = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index b5f48e547dea7..67540e81fbc89 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -117,6 +117,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa Miscellaneous Special Reigsiters option bool HasMiscSR; + // Enable Xtensa esp32-s2 ISA extension + bool HasESP32S2Ops; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -196,6 +199,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasMiscSR() const { return HasMiscSR; } + bool hasESP32S2Ops() const { return HasESP32S2Ops; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; diff --git a/llvm/test/MC/Xtensa/Core/processor-control.s b/llvm/test/MC/Xtensa/Core/processor-control.s index 1e7bac5789276..ebbc577db7722 100644 --- a/llvm/test/MC/Xtensa/Core/processor-control.s +++ b/llvm/test/MC/Xtensa/Core/processor-control.s @@ -15,6 +15,10 @@ dsync # CHECK: encoding: [0x20,0x20,0x00] esync +# CHECK-INST: ill +# CHECK: encoding: [0x00,0x00,0x00] +ill + # Instruction format RRR # CHECK-INST: isync # CHECK: encoding: [0x00,0x20,0x00] diff --git a/llvm/test/MC/Xtensa/xtensa-esp32s2-valid.s b/llvm/test/MC/Xtensa/xtensa-esp32s2-valid.s new file mode 100644 index 0000000000000..9c998e919c81e --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-esp32s2-valid.s @@ -0,0 +1,21 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+esp32s2 -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +.align 4 +LBL0: + +# CHECK-INST: clr_bit_gpio_out 52 +# CHECK: encoding: [0x40,0x03,0x06] +clr_bit_gpio_out 52 + +# CHECK-INST: get_gpio_in a2 +# CHECK: encoding: [0x20,0x30,0x06] +get_gpio_in a2 + +# CHECK-INST: set_bit_gpio_out 18 +# CHECK: encoding: [0x20,0x11,0x06] +set_bit_gpio_out 18 + +# CHECK-INST: wr_mask_gpio_out a3, a2 +# CHECK: encoding: [0x20,0x23,0x06] +wr_mask_gpio_out a3, a2 diff --git a/llvm/test/MC/Xtensa/xtensa-valid-density.s b/llvm/test/MC/Xtensa/xtensa-valid-density.s new file mode 100644 index 0000000000000..fc5457ce82ddc --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-valid-density.s @@ -0,0 +1,9 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+density -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +.align 4 +LBL0: + +# CHECK-INST: ill.n +# CHECK: encoding: [0x6c,0xf0] +ill.n From 072c3e98b48f4aa625381747dd5039f9f4f4dfd1 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:18:27 +0300 Subject: [PATCH 062/289] [Xtensa] Implement ESP32-S3 target. Implement support of the ESP32-S3 chip in clang and llvm. The ESP32-S3 chip includes Xtensa ISA extension which helps to work with GPIO, so we add instructions description and test. --- clang/lib/Basic/Targets/Xtensa.h | 1 + clang/lib/Driver/ToolChains/Xtensa.cpp | 2 ++ .../Xtensa/AsmParser/XtensaAsmParser.cpp | 8 ++++- llvm/lib/Target/Xtensa/Xtensa.td | 11 ++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 34 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 5 +++ llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s | 21 ++++++++++++ 8 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index dfe3080482ce2..2bf3f742d1765 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -97,6 +97,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { .Case("esp32", true) .Case("esp8266", true) .Case("esp32-s2", true) + .Case("esp32-s3", true) .Case("generic", true) .Default(false); } diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index a1149dc6e8438..673adde65ba23 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -46,6 +46,8 @@ XtensaGCCToolchainDetector::XtensaGCCToolchainDetector( ToolchainName = "xtensa-esp32-elf"; else if (CPUName == "esp32-s2") ToolchainName = "xtensa-esp32s2-elf"; + else if (CPUName == "esp32-s3") + ToolchainName = "xtensa-esp32s3-elf"; else if (CPUName == "esp8266") ToolchainName = "xtensa-lx106-elf"; diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 11470a275bd2e..97be4b5d20596 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -983,6 +983,7 @@ bool XtensaAsmParser::checkRegister(unsigned RegNo) { unsigned NumMiscSR = 0; bool IsESP32 = false; bool IsESP32_S2 = false; + bool IsESP32_S3 = false; bool Res = true; // Assume that CPU is esp32 by default @@ -996,6 +997,11 @@ bool XtensaAsmParser::checkRegister(unsigned RegNo) { NumTimers = 3; NumMiscSR = 4; IsESP32_S2 = true; + } else if (CPU == "esp32-s3") { + NumIntLevels = 6; + NumTimers = 3; + NumMiscSR = 4; + IsESP32_S3 = true; } else if (CPU == "esp8266") { NumIntLevels = 2; NumTimers = 1; @@ -1119,7 +1125,7 @@ bool XtensaAsmParser::checkRegister(unsigned RegNo) { Res = hasTHREADPTR(); break; case Xtensa::GPIO_OUT: - Res = IsESP32_S2; + Res = IsESP32_S2 || IsESP32_S3; break; case Xtensa::EXPSTATE: Res = IsESP32; diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 29467a346d045..5d468294d175d 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -158,6 +158,11 @@ def FeatureESP32S2Ops : SubtargetFeature<"esp32s2", "HasESP32S2Ops", "tru def HasESP32S2Ops : Predicate<"Subtarget->hasESP32S2Ops()">, AssemblerPredicate<(all_of FeatureESP32S2Ops)>; +def FeatureESP32S3Ops : SubtargetFeature<"esp32s3", "HasESP32S3Ops", "true", + "Support Xtensa esp32-s3 ISA extension">; +def HasESP32S3Ops : Predicate<"Subtarget->hasESP32S3Ops()">, + AssemblerPredicate<(all_of FeatureESP32S3Ops)>; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// @@ -178,6 +183,12 @@ def : Proc<"esp32-s2", [FeatureDensity, FeatureWindowed, FeatureSEXT, FeatureNSA FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureESP32S2Ops]>; +def : Proc<"esp32-s3", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, + FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, + FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, + FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, + FeatureESP32S3Ops]>; + //===----------------------------------------------------------------------===// // Register File Description //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 8a533be9194bf..58750e6162e53 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1590,6 +1590,40 @@ let Predicates = [HasESP32S2Ops] in { } } +//===----------------------------------------------------------------------===// +// Xtensa ESP32S3 Instructions +//===----------------------------------------------------------------------===// +let Predicates = [HasESP32S3Ops] in { + def EE_WR_MASK_GPIO_OUT : RRR_Inst<0x04, 0x02, 0x07, (outs), (ins AR:$t, AR:$s), + "ee.wr_mask_gpio_out\t$t, $s", []> { + let r = 0x4; + } + + def EE_SET_BIT_GPIO_OUT : RRR_Inst<0x04, 0x05, 0x07, (outs), (ins select_256:$imm), + "ee.set_bit_gpio_out\t$imm", []> { + bits<8> imm; + + let r = 0x4; + let s = imm{7-4}; + let t = imm{3-0}; + } + + def EE_CLR_BIT_GPIO_OUT : RRR_Inst<0x04, 0x06, 0x07, (outs), (ins select_256:$imm), + "ee.clr_bit_gpio_out\t$imm", []> { + bits<8> imm; + + let r = 0x4; + let s = imm{7-4}; + let t = imm{3-0}; + } + + def EE_GET_GPIO_IN : RRR_Inst<0x04, 0x05, 0x06, (outs AR:$t), (ins), + "ee.get_gpio_in\t$t", []> { + let r = 0x0; + let s = 0x8; + } +} + //===----------------------------------------------------------------------===// // DSP Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 2127436a3d8d9..6bd468181d0be 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -58,6 +58,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasRegionProtection = false; HasMiscSR = false; HasESP32S2Ops = false; + HasESP32S3Ops = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index 67540e81fbc89..85cdb739734a7 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -120,6 +120,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa esp32-s2 ISA extension bool HasESP32S2Ops; + // Enable Xtensa esp32-s3 ISA extension + bool HasESP32S3Ops; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -201,6 +204,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasESP32S2Ops() const { return HasESP32S2Ops; } + bool hasESP32S3Ops() const { return HasESP32S3Ops; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; diff --git a/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s b/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s new file mode 100644 index 0000000000000..50037ea38df15 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s @@ -0,0 +1,21 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+esp32s3 -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +.align 4 +LBL0: + +# CHECK-INST: ee.clr_bit_gpio_out 52 +# CHECK: encoding: [0x44,0x43,0x76] +ee.clr_bit_gpio_out 52 + +# CHECK-INST: ee.get_gpio_in a2 +# CHECK: encoding: [0x24,0x08,0x65] +ee.get_gpio_in a2 + +# CHECK-INST: ee.set_bit_gpio_out 18 +# CHECK: encoding: [0x24,0x41,0x75] +ee.set_bit_gpio_out 18 + +# CHECK-INST: ee.wr_mask_gpio_out a3, a2 +# CHECK: encoding: [0x34,0x42,0x72] +ee.wr_mask_gpio_out a3, a2 From 8bc37cf1c79300066045f18d84f37095c109c471 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:18:53 +0300 Subject: [PATCH 063/289] [Xtensa] Define register type for CC. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 10 ++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 5f7986c53d8c3..f92acd4932a84 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -339,6 +339,16 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, computeRegisterProperties(STI.getRegisterInfo()); } +/// Return the register type for a given MVT +MVT XtensaTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, + CallingConv::ID CC, + EVT VT) const { + if (VT.isFloatingPoint()) + return MVT::i32; + + return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); +} + bool XtensaTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const { if (!VT.isSimple()) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index b4e4bb17063d2..db1384dab03b1 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -101,6 +101,10 @@ class XtensaTargetLowering : public TargetLowering { return LHSTy.getSizeInBits() <= 32 ? MVT::i32 : MVT::i64; } + /// Return the register type for a given MVT + MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, + EVT VT) const override; + EVT getSetCCResultType(const DataLayout &, LLVMContext &, EVT VT) const override { if (!VT.isVector()) From 3e34835c3f054dfb206071f0addc5ce3cbc3f60e Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 17 Sep 2024 23:47:12 +0300 Subject: [PATCH 064/289] [Xtensa] Fix FP instructions --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 3 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 30 +++++++++++-------- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index f92acd4932a84..7f18ea22db077 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -1204,7 +1204,7 @@ SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op, SDValue TargetCC_FP = DAG.getConstant(CC, DL, MVT::i32); if (LHS.getValueType() == MVT::f32 || TrueValue.getValueType() == MVT::f32) - return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueValue.getValueType(), + return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueValue.getValueType(), LHS, RHS, TrueValue, FalseValue, (LHS.getValueType() == MVT::f32) ? TargetCC_FP : TargetCC); @@ -1214,7 +1214,6 @@ SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op, SDValue XtensaTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - EVT Ty = Op.getOperand(0).getValueType(); SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); ISD::CondCode CC = cast(Op.getOperand(2))->get(); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 58750e6162e53..051f4591ac527 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -991,10 +991,12 @@ def UN_S : FCompare<0x01, 0x0b, "un.s", Xtensa_cmpuo, 1>; def ABS_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "abs.s\t$r, $s", - [(set FPR:$r, (fabs FPR:$s))]> { + [(set FPR:$r, (fabs FPR:$s))]>, Requires<[HasSingleFloat]> { let t = 0x01; } +def : Pat<(fabs FPR:$s), (ABS_S $s)>; + def ADDEXP_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "addexp.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0E; @@ -1029,7 +1031,7 @@ def DIVN_S : RRR_Inst<0x00, 0x0A, 0x07, (outs FPR:$r), (ins FPR:$s, FPR:$t), "divn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; def FLOAT_S : RRR_Inst<0x00, 0x0A, 0x0c, (outs FPR:$r), (ins AR:$s, uimm4:$imm), - "float.s\t$r, $s, $imm", []> { + "float.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { bits<4> imm; let t = imm; @@ -1059,6 +1061,10 @@ def MADD_S : RRR_Inst<0x00, 0x0A, 0x04, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR: let Constraints = "$r = $a"; } +// fmadd: r1 * r2 + r3 +def : Pat<(fma FPR:$r1, FPR:$r2, FPR:$r3), + (MADD_S $r3, $r1, $r2)>; + def MKDADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "mkdadj.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0D; @@ -1110,7 +1116,7 @@ def NEXP01_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), def NEG_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "neg.s\t$r, $s", - [(set FPR:$r, (fneg FPR:$s))]> { + [(set FPR:$r, (fneg FPR:$s))]>, Requires<[HasSingleFloat]> { let t = 0x06; } @@ -1121,7 +1127,7 @@ def RECIP0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), def RFR : RRR_Inst<0x00, 0x0A, 0x0f, (outs AR:$r), (ins FPR:$s), "rfr\t$r, $s", - [(set AR:$r, (bitconvert FPR:$s))]> { + [(set AR:$r, (bitconvert FPR:$s))]>, Requires<[HasSingleFloat]> { let t = 0x04; } @@ -1143,16 +1149,16 @@ def SQRT0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), } def TRUNC_S : RRR_Inst<0x00, 0x0A, 0x09, (outs AR:$r), (ins FPR:$s, uimm4:$imm), - "trunc.s\t$r, $s, $imm", []> { + "trunc.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { bits<4> imm; let t = imm; } -def : Pat<(i32 (fp_to_sint FPR:$s)), (TRUNC_S FPR:$s, 0)>; +def : Pat<(i32 (any_fp_to_sint FPR:$s)), (TRUNC_S FPR:$s, 0)>; def UFLOAT_S : RRR_Inst<0x00, 0x0A, 0x0D, (outs FPR:$r), (ins AR:$s, uimm4:$imm), - "ufloat.s\t$r, $s, $imm", []> { + "ufloat.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { bits<4> imm; let t = imm; @@ -1161,22 +1167,22 @@ def UFLOAT_S : RRR_Inst<0x00, 0x0A, 0x0D, (outs FPR:$r), (ins AR:$s, uimm4:$imm) def : Pat<(f32 (uint_to_fp AR:$s)), (UFLOAT_S AR:$s, 0)>; def UTRUNC_S : RRR_Inst<0x00, 0x0A, 0x0e, (outs AR:$r), (ins FPR:$s, uimm4:$imm), - "utrunc.s\t$r, $s, $imm", []> { + "utrunc.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { bits<4> imm; let t = imm; } -def : Pat<(i32 (fp_to_uint FPR:$s)), (UTRUNC_S FPR:$s, 0)>; +def : Pat<(i32 (any_fp_to_uint FPR:$s)), (UTRUNC_S FPR:$s, 0)>; def WFR : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins AR:$s), "wfr\t$r, $s", - [(set FPR:$r, (bitconvert AR:$s))]> { + [(set FPR:$r, (bitconvert AR:$s))]>, Requires<[HasSingleFloat]> { let t = 0x05; } // FP select operations -let usesCustomInserter = 1 in { +let usesCustomInserter = 1, Predicates = [HasSingleFloat] in { def SELECT_CC_FP_INT : Pseudo<(outs AR:$dst), (ins FPR:$lhs, FPR:$rhs, AR:$t, AR:$f, i32imm:$cond), "!select_cc_fp_int $dst, $lhs, $rhs, $t, $f, $cond", [(set AR:$dst, (Xtensa_select_cc_fp FPR:$lhs, FPR:$rhs, AR:$t, AR:$f, imm:$cond))]>; @@ -1189,7 +1195,7 @@ let usesCustomInserter = 1 in { } // FP brcc pesudo operation -let usesCustomInserter = 1, isBranch = 1, isTerminator = 1, isBarrier = 1 in { +let usesCustomInserter = 1, isBranch = 1, isTerminator = 1, isBarrier = 1, Predicates = [HasSingleFloat] in { def BRCC_FP : Pseudo<(outs), (ins i32imm:$cond, FPR:$lhs, FPR:$rhs, brtarget:$target), "!brcc_fp $cond, $lhs, $rhs, $target", [(Xtensa_brcc_fp imm:$cond, FPR:$lhs, FPR:$rhs, bb:$target)]>; From 71e51f06cf3b8896387df4b7412d367b3e5a255e Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 17 Sep 2024 23:54:02 +0300 Subject: [PATCH 065/289] [Xtensa] Correcting FP instructions and intrinsics. Correcting FP instruction descriptions. Implement lowering of the fma, powf and other FP intrinsics. Add test for base FP intrinsics. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 6 - llvm/test/CodeGen/Xtensa/float-intrinsics.ll | 363 ++++++++++++++++++ 2 files changed, 363 insertions(+), 6 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/float-intrinsics.ll diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 7f18ea22db077..00a1e7a2f0ab3 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -183,8 +183,6 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, for (unsigned I = MVT::FIRST_FP_VALUETYPE; I <= MVT::LAST_FP_VALUETYPE; ++I) { MVT VT = MVT::SimpleValueType(I); if (isTypeLegal(VT)) { - // We can use FI for FRINT. - // setOperationAction(ISD::FRINT, VT, Legal); if (VT.getSizeInBits() == 32 && Subtarget.hasSingleFloat()) { setOperationAction(ISD::FABS, VT, Legal); setOperationAction(ISD::FADD, VT, Legal); @@ -201,14 +199,10 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::FSUB, VT, Expand); } - // TODO: once implemented in InstrInfo uncomment - setOperationAction(ISD::FSQRT, VT, Expand); - // No special instructions for these. setOperationAction(ISD::FCBRT, VT, Expand); setOperationAction(ISD::FCEIL, VT, Expand); setOperationAction(ISD::FCOPYSIGN, VT, Expand); - setOperationAction(ISD::FSIN, VT, Expand); setOperationAction(ISD::FCOS, VT, Expand); setOperationAction(ISD::FDIV, VT, Expand); setOperationAction(ISD::FEXP, VT, Expand); diff --git a/llvm/test/CodeGen/Xtensa/float-intrinsics.ll b/llvm/test/CodeGen/Xtensa/float-intrinsics.ll new file mode 100644 index 0000000000000..256a1dee2abf8 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/float-intrinsics.ll @@ -0,0 +1,363 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +declare float @llvm.sqrt.f32(float) + +define float @sqrt_f32(float %a) nounwind { +; XTENSA: .literal .LCPI0_0, sqrtf +; XTENSA-LABEL: sqrt_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; + %1 = call float @llvm.sqrt.f32(float %a) + ret float %1 +} + +declare float @llvm.powi.f32(float, i32) + +define float @powi_f32(float %a, i32 %b) nounwind { +; XTENSA: .literal .LCPI1_0, __powisf2 +; XTENSA-LABEL: powi_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.powi.f32(float %a, i32 %b) + ret float %1 +} + +declare float @llvm.sin.f32(float) + +define float @sin_f32(float %a) nounwind { +; XTENSA: .literal .LCPI2_0, sinf +; XTENSA-LABEL: sin_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.sin.f32(float %a) + ret float %1 +} + +declare float @llvm.cos.f32(float) + +define float @cos_f32(float %a) nounwind { +; XTENSA: .literal .LCPI3_0, cosf +; XTENSA-LABEL: cos_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.cos.f32(float %a) + ret float %1 +} + +declare float @llvm.pow.f32(float, float) + +define float @pow_f32(float %a, float %b) nounwind { +; XTENSA: .literal .LCPI4_0, powf +; XTENSA-LABEL: pow_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + %1 = call float @llvm.pow.f32(float %a, float %b) + ret float %1 +} + +declare float @llvm.exp.f32(float) + +define float @exp_f32(float %a) nounwind { +; XTENSA: .literal .LCPI5_0, expf +; XTENSA-LABEL: exp_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + %1 = call float @llvm.exp.f32(float %a) + ret float %1 +} + +declare float @llvm.exp2.f32(float) + +define float @exp2_f32(float %a) nounwind { +; XTENSA: .literal .LCPI6_0, exp2 +; XTENSA-LABEL: exp2_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + %1 = call float @llvm.exp2.f32(float %a) + ret float %1 +} + +declare float @llvm.log.f32(float) + +define float @log_f32(float %a) nounwind { +; XTENSA: .literal .LCPI7_0, log +; XTENSA-LABEL: log_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + %1 = call float @llvm.log.f32(float %a) + ret float %1 +} + +declare float @llvm.log10.f32(float) + +define float @log10_f32(float %a) nounwind { +; XTENSA: .literal .LCPI8_0, log10 +; XTENSA-LABEL: log10_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + %1 = call float @llvm.log10.f32(float %a) + ret float %1 +} + +declare float @llvm.log2.f32(float) + +define float @log2_f32(float %a) nounwind { +; XTENSA: .literal .LCPI9_0, log2 +; XTENSA-LABEL: log2_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + %1 = call float @llvm.log2.f32(float %a) + ret float %1 +} + +declare float @llvm.fma.f32(float, float, float) + +define float @fma_f32(float %a, float %b, float %c) nounwind { +; XTENSA-LABEL: fma_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: wfr f8, a3 +; XTENSA-NEXT: wfr f9, a2 +; XTENSA-NEXT: wfr f10, a4 +; XTENSA-NEXT: madd.s f10, f9, f8 +; XTENSA-NEXT: rfr a2, f10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.fma.f32(float %a, float %b, float %c) + ret float %1 +} + +declare float @llvm.minnum.f32(float, float) + +define float @minnum_f32(float %a, float %b) nounwind { +; XTENSA: .literal .LCPI11_0, fminf +; XTENSA-LABEL: minnum_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.minnum.f32(float %a, float %b) + ret float %1 +} + +declare float @llvm.maxnum.f32(float, float) + +define float @maxnum_f32(float %a, float %b) nounwind { +; XTENSA: .literal .LCPI12_0, fmaxf +; XTENSA-LABEL: maxnum_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.maxnum.f32(float %a, float %b) + ret float %1 +} + +declare float @llvm.fabs.f32(float) + +define float @fabs_f32(float %a) nounwind { +; XTENSA: .literal .LCPI13_0, 2147483647 +; XTENSA-LABEL: fabs_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: and a2, a2, a8 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.fabs.f32(float %a) + ret float %1 +} + +declare float @llvm.copysign.f32(float, float) + +define float @copysign_f32(float %a, float %b) nounwind { +; XTENSA: .literal .LCPI14_0, -2147483648 +; XTENSA: .literal .LCPI14_1, 2147483647 +; XTENSA-LABEL: copysign_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: and a8, a3, a8 +; XTENSA-NEXT: l32r a9, .LCPI14_1 +; XTENSA-NEXT: and a9, a2, a9 +; XTENSA-NEXT: wfr f8, a9 +; XTENSA-NEXT: movi.n a9, 0 +; XTENSA-NEXT: beq a8, a9, .LBB14_2 +; XTENSA-NEXT: # %bb.1: +; XTENSA-NEXT: neg.s f8, f8 +; XTENSA-NEXT: .LBB14_2: +; XTENSA-NEXT: rfr a2, f8 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.copysign.f32(float %a, float %b) + ret float %1 +} + +declare float @llvm.floor.f32(float) + +define float @floor_f32(float %a) nounwind { +; XTENSA: .literal .LCPI15_0, floor +; XTENSA-LABEL: floor_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.floor.f32(float %a) + ret float %1 +} + +declare float @llvm.ceil.f32(float) + +define float @ceil_f32(float %a) nounwind { +; XTENSA: .literal .LCPI16_0, ceil +; XTENSA-LABEL: ceil_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.ceil.f32(float %a) + ret float %1 +} + +declare float @llvm.trunc.f32(float) + +define float @trunc_f32(float %a) nounwind { +; XTENSA: .literal .LCPI17_0, trunc +; XTENSA-LABEL: trunc_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.trunc.f32(float %a) + ret float %1 +} + +declare float @llvm.rint.f32(float) + +define float @rint_f32(float %a) nounwind { +; XTENSA: .literal .LCPI18_0, rint +; XTENSA-LABEL: rint_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.rint.f32(float %a) + ret float %1 +} + +declare float @llvm.nearbyint.f32(float) + +define float @nearbyint_f32(float %a) nounwind { +; XTENSA: .literal .LCPI19_0, nearbyint +; XTENSA-LABEL: nearbyint_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.nearbyint.f32(float %a) + ret float %1 +} + +declare float @llvm.round.f32(float) + +define float @round_f32(float %a) nounwind { +; XTENSA: .literal .LCPI20_0, round +; XTENSA-LABEL: round_f32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n + + %1 = call float @llvm.round.f32(float %a) + ret float %1 +} From 31cf08ba6edeb1f7bd5c61b316d5765afb910119 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:20:45 +0300 Subject: [PATCH 066/289] [Xtensa] Implement MUL16 feature. --- llvm/lib/Target/Xtensa/Xtensa.td | 21 +++++++++++++-------- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 11 +++++++++++ llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 5 +++++ llvm/test/MC/Xtensa/xtensa-valid-mul16.s | 14 ++++++++++++++ 5 files changed, 44 insertions(+), 8 deletions(-) create mode 100644 llvm/test/MC/Xtensa/xtensa-valid-mul16.s diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 5d468294d175d..a953bf031a6e5 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -52,6 +52,11 @@ def FeatureNSA : SubtargetFeature<"nsa", "HasNSA", "true", def HasNSA : Predicate<"Subtarget->hasNSA()">, AssemblerPredicate<(all_of FeatureNSA)>; +def FeatureMul16 : SubtargetFeature<"mul16", "HasMul16", "true", + "Enable Xtensa Mul16 option">; +def HasMul16 : Predicate<"Subtarget->hasMul16()">, + AssemblerPredicate<(all_of FeatureMul16)>; + def FeatureMul32 : SubtargetFeature<"mul32", "HasMul32", "true", "Enable Xtensa Mul32 option">; def HasMul32 : Predicate<"Subtarget->hasMul32()">, @@ -171,20 +176,20 @@ class Proc Features> def : Proc<"generic", []>; -def : Proc<"esp32", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, - FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, +def : Proc<"esp32", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, FeatureSEXT, + FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR]>; -def : Proc<"esp8266", [FeatureDensity, FeatureNSA, FeatureMul32, FeatureExtendedL32R, FeatureDebug, FeatureException, FeatureHighPriInterrupts, - FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeatureRegionProtection, FeaturePRID]>; +def : Proc<"esp8266", [FeatureDensity, FeatureNSA, FeatureMul16, FeatureMul32, FeatureExtendedL32R, FeatureDebug, FeatureException, + FeatureHighPriInterrupts, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeatureRegionProtection, FeaturePRID]>; -def : Proc<"esp32-s2", [FeatureDensity, FeatureWindowed, FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureTHREADPTR, FeatureDiv32, - FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, +def : Proc<"esp32-s2", [FeatureDensity, FeatureWindowed, FeatureSEXT, FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureTHREADPTR, + FeatureDiv32, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureESP32S2Ops]>; -def : Proc<"esp32-s3", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, - FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, +def : Proc<"esp32-s3", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, FeatureSEXT, + FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureESP32S3Ops]>; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 051f4591ac527..92ee844801b12 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1274,6 +1274,17 @@ def NSAU : RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), let r = 0xF; } +//===----------------------------------------------------------------------===// +// Mul16 Instructions +//===----------------------------------------------------------------------===// + +let Predicates = [HasMul16] in { + def MUL16S : RRR_Inst<0x00, 0x01, 0x0D, (outs AR:$r), (ins AR:$s, AR:$t), + "mul16s\t$r, $s, $t", []>; + def MUL16U : RRR_Inst<0x00, 0x01, 0x0C, (outs AR:$r), (ins AR:$s, AR:$t), + "mul16u\t$r, $s, $t", []>; +} + //===----------------------------------------------------------------------===// // Mul32 Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 6bd468181d0be..ec57ff2bd23db 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -37,6 +37,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasLoop = false; HasSEXT = false; HasNSA = false; + HasMul16 = false; HasMul32 = false; HasMul32High = false; HasDiv32 = false; diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index 85cdb739734a7..d9a423d232ea4 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -57,6 +57,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa NSA option bool HasNSA; + // Enable Xtensa Mul16 option + bool HasMul16; + // Enable Xtensa Mul32 option bool HasMul32; @@ -162,6 +165,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasNSA() const { return HasNSA; } + bool hasMul16() const { return HasMul16; } + bool hasMul32() const { return HasMul32; } bool hasMul32High() const { return HasMul32High; } diff --git a/llvm/test/MC/Xtensa/xtensa-valid-mul16.s b/llvm/test/MC/Xtensa/xtensa-valid-mul16.s new file mode 100644 index 0000000000000..4a6c525191f87 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-valid-mul16.s @@ -0,0 +1,14 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+mul16 -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + + +.align 4 +LBL0: + +# CHECK-INST: mul16s a2, a3, a4 +# CHECK: encoding: [0x40,0x23,0xd1] + mul16s a2, a3, a4 + +# CHECK-INST: mul16u a2, a3, a4 +# CHECK: encoding: [0x40,0x23,0xc1] + mul16u a2, a3, a4 From 292da8483851e81cb6a9edb7c565b0f450686ad2 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:31:50 +0300 Subject: [PATCH 067/289] [Xtensa] Add a no-op -mlongcalls option for better compatibility. Many projects targeting Xtensa architecture use GCC-specific -mlongcalls option. The current behavior of LLVM for Xtensa is equivalent to this option being set, so accept this option without changing the behavior. --- clang/include/clang/Driver/Options.td | 1 + 1 file changed, 1 insertion(+) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 917c16b779c28..09497e8d0d100 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -6405,6 +6405,7 @@ def mfix_esp32_psram_cache_issue : Flag<["-"], "mfix-esp32-psram-cache-issue">, def mfix_esp32_psram_cache_strategy_EQ : Joined<["-"], "mfix-esp32-psram-cache-strategy=">, Group, HelpText<" Psram cache fix strategies : memw, nops">, Values<"memw, nops">; +def mlongcalls : Flag<["-"], "mlongcalls">, Group; // These are legacy user-facing driver-level option spellings. They are always // aliases for options that are spelled using the more common Unix / GNU flag From ea93248b0a4a7e04da253bad64db02a8acd1d699 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:32:31 +0300 Subject: [PATCH 068/289] [Xtensa] Add atomicrmw_xchg test and fix atomic_swap. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 2 +- llvm/test/CodeGen/Xtensa/atomicrmw.ll | 103 ++++++++++++++++++ 2 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 llvm/test/CodeGen/Xtensa/atomicrmw.ll diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 00a1e7a2f0ab3..04378ff50319a 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -2308,7 +2308,7 @@ XtensaTargetLowering::emitAtomicSwap(MachineInstr &MI, MachineBasicBlock *BB, unsigned R8 = MRI.createVirtualRegister(RC); BuildMI(*BB, St, DL, TII.get(Xtensa::SSR)).addReg(BitOffs); - BuildMI(*BB, St, DL, TII.get(Xtensa::SLL), R8).addReg(AtomValLoop); + BuildMI(*BB, St, DL, TII.get(Xtensa::SRL), R8).addReg(AtomValLoop); if (isByteOperand) { BuildMI(*BB, St, DL, TII.get(Xtensa::SEXT), Res.getReg()) diff --git a/llvm/test/CodeGen/Xtensa/atomicrmw.ll b/llvm/test/CodeGen/Xtensa/atomicrmw.ll new file mode 100644 index 0000000000000..f2b7526e33e84 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/atomicrmw.ll @@ -0,0 +1,103 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=CHECK-XTENSA %s + +define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind { +; CHECK-XTENSA-LABEL: atomicrmw_xchg_i8_seq_cst: +; CHECK-XTENSA: # %bb.0: +; CHECK-XTENSA-NEXT: entry a1, 32 +; CHECK-XTENSA-NEXT: memw +; CHECK-XTENSA-NEXT: movi.n a8, 3 +; CHECK-XTENSA-NEXT: and a8, a8, a2 +; CHECK-XTENSA-NEXT: sub a9, a2, a8 +; CHECK-XTENSA-NEXT: slli a8, a8, 3 +; CHECK-XTENSA-NEXT: movi a10, 255 +; CHECK-XTENSA-NEXT: ssl a8 +; CHECK-XTENSA-NEXT: movi.n a11, -1 +; CHECK-XTENSA-NEXT: sll a10, a10 +; CHECK-XTENSA-NEXT: xor a11, a10, a11 +; CHECK-XTENSA-NEXT: l32i.n a12, a9, 0 +; CHECK-XTENSA-NEXT: sll a12, a3 +; CHECK-XTENSA-NEXT: l32i.n a13, a9, 0 +; CHECK-XTENSA-NEXT: and a14, a13, a10 +; CHECK-XTENSA-NEXT: .LBB0_1: # =>This Loop Header: Depth=1 +; CHECK-XTENSA-NEXT: # Child Loop BB0_2 Depth 2 +; CHECK-XTENSA-NEXT: mov.n a13, a14 +; CHECK-XTENSA-NEXT: memw +; CHECK-XTENSA-NEXT: l32i.n a14, a9, 0 +; CHECK-XTENSA-NEXT: and a7, a14, a11 +; CHECK-XTENSA-NEXT: .LBB0_2: # Parent Loop BB0_1 Depth=1 +; CHECK-XTENSA-NEXT: # => This Inner Loop Header: Depth=2 +; CHECK-XTENSA-NEXT: mov.n a15, a7 +; CHECK-XTENSA-NEXT: or a14, a12, a15 +; CHECK-XTENSA-NEXT: or a7, a13, a15 +; CHECK-XTENSA-NEXT: wsr a7, scompare1 +; CHECK-XTENSA-NEXT: s32c1i a14, a9, 0 +; CHECK-XTENSA-NEXT: beq a7, a14, .LBB0_4 +; CHECK-XTENSA-NEXT: # %bb.3: # in Loop: Header=BB0_2 Depth=2 +; CHECK-XTENSA-NEXT: and a7, a14, a11 +; CHECK-XTENSA-NEXT: bne a7, a15, .LBB0_2 +; CHECK-XTENSA-NEXT: .LBB0_4: # in Loop: Header=BB0_1 Depth=1 +; CHECK-XTENSA-NEXT: and a14, a14, a10 +; CHECK-XTENSA-NEXT: bne a14, a13, .LBB0_1 +; CHECK-XTENSA-NEXT: # %bb.5: +; CHECK-XTENSA-NEXT: ssr a8 +; CHECK-XTENSA-NEXT: srl a8, a14 +; CHECK-XTENSA-NEXT: sext a2, a8, 7 +; CHECK-XTENSA-NEXT: memw +; CHECK-XTENSA-NEXT: retw.n + + %1 = atomicrmw xchg i8* %a, i8 %b seq_cst + ret i8 %1 +} + +define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind { +; CHECK-XTENSA-LABEL: atomicrmw_xchg_i16_seq_cst: +; CHECK-XTENSA: # %bb.0: +; CHECK-XTENSA-NEXT: entry a1, 32 +; CHECK-XTENSA-NEXT: memw +; CHECK-XTENSA-NEXT: movi.n a8, 3 +; CHECK-XTENSA-NEXT: and a8, a8, a2 +; CHECK-XTENSA-NEXT: sub a9, a2, a8 +; CHECK-XTENSA-NEXT: slli a8, a8, 3 +; CHECK-XTENSA-NEXT: movi.n a10, 1 +; CHECK-XTENSA-NEXT: slli a10, a10, 16 +; CHECK-XTENSA-NEXT: addi.n a10, a10, -1 +; CHECK-XTENSA-NEXT: ssl a8 +; CHECK-XTENSA-NEXT: movi.n a11, -1 +; CHECK-XTENSA-NEXT: sll a10, a10 +; CHECK-XTENSA-NEXT: xor a11, a10, a11 +; CHECK-XTENSA-NEXT: l32i.n a12, a9, 0 +; CHECK-XTENSA-NEXT: sll a12, a3 +; CHECK-XTENSA-NEXT: l32i.n a13, a9, 0 +; CHECK-XTENSA-NEXT: and a14, a13, a10 +; CHECK-XTENSA-NEXT: .LBB1_1: # =>This Loop Header: Depth=1 +; CHECK-XTENSA-NEXT: # Child Loop BB1_2 Depth 2 +; CHECK-XTENSA-NEXT: mov.n a13, a14 +; CHECK-XTENSA-NEXT: memw +; CHECK-XTENSA-NEXT: l32i.n a14, a9, 0 +; CHECK-XTENSA-NEXT: and a7, a14, a11 +; CHECK-XTENSA-NEXT: .LBB1_2: # Parent Loop BB1_1 Depth=1 +; CHECK-XTENSA-NEXT: # => This Inner Loop Header: Depth=2 +; CHECK-XTENSA-NEXT: mov.n a15, a7 +; CHECK-XTENSA-NEXT: or a14, a12, a15 +; CHECK-XTENSA-NEXT: or a7, a13, a15 +; CHECK-XTENSA-NEXT: wsr a7, scompare1 +; CHECK-XTENSA-NEXT: s32c1i a14, a9, 0 +; CHECK-XTENSA-NEXT: beq a7, a14, .LBB1_4 +; CHECK-XTENSA-NEXT: # %bb.3: # in Loop: Header=BB1_2 Depth=2 +; CHECK-XTENSA-NEXT: and a7, a14, a11 +; CHECK-XTENSA-NEXT: bne a7, a15, .LBB1_2 +; CHECK-XTENSA-NEXT: .LBB1_4: # in Loop: Header=BB1_1 Depth=1 +; CHECK-XTENSA-NEXT: and a14, a14, a10 +; CHECK-XTENSA-NEXT: bne a14, a13, .LBB1_1 +; CHECK-XTENSA-NEXT: # %bb.5: +; CHECK-XTENSA-NEXT: ssr a8 +; CHECK-XTENSA-NEXT: srl a8, a14 +; CHECK-XTENSA-NEXT: sext a2, a8, 15 +; CHECK-XTENSA-NEXT: memw +; CHECK-XTENSA-NEXT: retw.n + + %1 = atomicrmw xchg i16* %a, i16 %b seq_cst + ret i16 %1 +} From eb0f35e2453503cddc6b55dad053ca1338576b3d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:40:36 +0300 Subject: [PATCH 069/289] [Xtensa] Initialize MCSubtargetInfo with esp32. Initialize Xtensa MCSubtargetInfo with esp32 subtarget by default. --- llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp index d80b98e6f56dd..799db9483c7fd 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp @@ -65,6 +65,8 @@ static MCRegisterInfo *createXtensaMCRegisterInfo(const Triple &TT) { static MCSubtargetInfo * createXtensaMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) { + if (CPU.empty()) + CPU = "esp32"; return createXtensaMCSubtargetInfoImpl(TT, CPU, CPU, FS); } From 803efc08e6b6ed30fa13bd7813e3b115bb93329d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:43:14 +0300 Subject: [PATCH 070/289] [Xtensa] Correction of the Hardware Loop pass. Update loop counter via a phi instruction. This improvement fix case when loop have multiple enters. --- .../lib/Target/Xtensa/XtensaHardwareLoops.cpp | 182 ++++++++++++------ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 64 +++++- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 4 + llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 7 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 14 +- llvm/lib/Target/Xtensa/XtensaOperators.td | 7 +- .../Xtensa/XtensaTargetTransformInfo.cpp | 2 +- 7 files changed, 202 insertions(+), 78 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp b/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp index af35b8af1a1bb..baba53e70e9af 100644 --- a/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp +++ b/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp @@ -67,6 +67,7 @@ struct XtensaHardwareLoops : public MachineFunctionPass { MachineRegisterInfo *MRI; MachineDominatorTree *MDT; const XtensaInstrInfo *TII; + const TargetRegisterInfo *TRI; const XtensaSubtarget *STI; SmallPtrSet VisitedMBBs; @@ -99,7 +100,10 @@ struct XtensaHardwareLoops : public MachineFunctionPass { bool checkLoopSize(MachineLoop *L); - bool checkLoopEndDisplacement(MachineFunction &MF, MachineBasicBlock *LH, MachineBasicBlock* LE); + bool checkLoopEndDisplacement(MachineFunction &MF, MachineBasicBlock *LH, + MachineBasicBlock *LE); + + void revertNonLoops(MachineFunction &M); }; char XtensaHardwareLoops::ID = 0; @@ -119,11 +123,11 @@ bool XtensaHardwareLoops::runOnMachineFunction(MachineFunction &MF) { return false; bool Changed = false; - MLI = &getAnalysis().getLI(); MRI = &MF.getRegInfo(); STI = &MF.getSubtarget(); TII = STI->getInstrInfo(); + TRI = STI->getRegisterInfo(); if (!STI->hasLoop()) return false; @@ -135,6 +139,8 @@ bool XtensaHardwareLoops::runOnMachineFunction(MachineFunction &MF) { Changed |= processLoop(L); } + revertNonLoops(MF); + return Changed; } @@ -191,27 +197,33 @@ bool XtensaHardwareLoops::processLoop(MachineLoop *L) { return true; using instr_iterator = MachineBasicBlock::instr_iterator; - MachineInstr *LII = nullptr; // LOOPINIT instruction - MachineInstr *LEI = nullptr; // LOOPEND instruction - MachineBasicBlock *LEMBB = nullptr; + MachineInstr *LII = nullptr; // LOOPINIT instruction + MachineInstr *LDECI = nullptr; // LOOPDEC instruction + MachineInstr *LBRI = nullptr; // LOOPBR instruction + MachineBasicBlock *LDECMBB = nullptr; + MachineBasicBlock *LBRMBB = nullptr; MachineBasicBlock *LH = L->getHeader(); MachineBasicBlock *LastMBB = L->getLoopLatch(); - std::vector LoopInitInsts; std::map LoopInitMap; - // Try to find LOOPEND instruction in the loop latch + // Try to find LOOPENDDEC instruction in the loop latch for (auto MBI = L->block_begin(), MBIE = L->block_end(); MBI != MBIE; ++MBI) { if (VisitedMBBs.count(*MBI)) continue; for (auto MII = (*MBI)->begin(), MIE = (*MBI)->end(); MII != MIE; ++MII) { MachineInstr *LMI = &*MII; - if (LMI->getOpcode() == Xtensa::LOOPEND) { - LEI = LMI; - LEMBB = *MBI; + if (LMI->getOpcode() == Xtensa::LOOPDEC) { + LDECI = LMI; + LDECMBB = *MBI; + } + + if (LMI->getOpcode() == Xtensa::LOOPBR) { + LBRI = LMI; + LBRMBB = *MBI; } + // Collect LOOPINIT instructions inside the loop if (LMI->getOpcode() == Xtensa::LOOPINIT) { - LoopInitInsts.push_back(LMI); MachineBasicBlock *SB = LMI->getParent(); while (!SB->isSuccessor(LH)) { for (auto SBI : SB->successors()) { @@ -229,10 +241,17 @@ bool XtensaHardwareLoops::processLoop(MachineLoop *L) { VisitedMBBs.insert(*MBI); } - if (LEI != nullptr) { - MachineBasicBlock::iterator LHI = LH->getFirstNonPHI(); + if ((LBRI != nullptr) && (LDECI != nullptr)) { MachineBasicBlock *LIMBB = nullptr; + for (const auto &Use : MRI->use_operands(LDECI->getOperand(0).getReg())) { + const MachineInstr *UseMI = Use.getParent(); + if ((UseMI != LBRI) && (UseMI->getOpcode() != TargetOpcode::PHI)) { + LLVM_DEBUG(dbgs() << "Xtensa Loops: Unable to remove LoopDec.\n"); + return false; + } + } + // Collect LOOPINIT instructions in predecessors from outter loop for (auto PBI : LH->predecessors()) { if (L->contains(PBI)) @@ -268,76 +287,72 @@ bool XtensaHardwareLoops::processLoop(MachineLoop *L) { // sub a, a, 1 // bnez a, LH if (!checkLoopSize(L) || containsInvalidInstruction(L) || - (LEMBB != LastMBB) || - (!checkLoopEndDisplacement(*LH->getParent(), LH, LEMBB))) { - const MCInstrDesc &PD = TII->get(TargetOpcode::PHI); - MachineInstr *NewPN = LH->getParent()->CreateMachineInstr(PD, DL); - LH->insert(LH->begin(), NewPN); - Register PR = MRI->createVirtualRegister(&Xtensa::ARRegClass); - NewPN->addOperand(MachineOperand::CreateReg(PR, true)); - - Register IndR = MRI->createVirtualRegister(&Xtensa::ARRegClass); + (LBRMBB != LastMBB) || + (!checkLoopEndDisplacement(*LH->getParent(), LH, LBRMBB))) { for (auto PB : LH->predecessors()) { - if (LoopInitMap.find(PB) != LoopInitMap.end()) { - MachineOperand MO = MachineOperand::CreateReg( - LoopInitMap[PB]->getOperand(0).getReg(), false); - NewPN->addOperand(MO); - NewPN->addOperand(MachineOperand::CreateMBB(PB)); + Register Elts = LoopInitMap[PB]->getOperand(1).getReg(); + Register Def = LoopInitMap[PB]->getOperand(0).getReg(); + + for (auto &Use : make_early_inc_range(MRI->use_operands(Def))) { + Use.setReg(Elts); + } LoopInitMap[PB]->getParent()->erase(LoopInitMap[PB]); - } else { - MachineOperand MO = MachineOperand::CreateReg(IndR, false); - NewPN->addOperand(MO); - NewPN->addOperand(MachineOperand::CreateMBB(PB)); } } - MachineInstrBuilder MIB = - BuildMI(*LEMBB, LEI, LEI->getDebugLoc(), TII->get(Xtensa::ADDI), IndR) - .addReg(PR) - .addImm(-1); - - MIB = BuildMI(*LEMBB, LEI, LEI->getDebugLoc(), TII->get(Xtensa::BNEZ)) - .addReg(IndR) - .addMBB(LEI->getOperand(0).getMBB()); - LEMBB->erase(LEI); + Register IndR = LDECI->getOperand(0).getReg(); + Register PR = LDECI->getOperand(1).getReg(); + + BuildMI(*LDECMBB, LDECI, LDECI->getDebugLoc(), TII->get(Xtensa::ADDI), + IndR) + .addReg(PR) + .addImm(-1); + BuildMI(*LBRMBB, LBRI, LBRI->getDebugLoc(), TII->get(Xtensa::BNEZ)) + .addReg(IndR) + .addMBB(LBRI->getOperand(1).getMBB()); + LDECMBB->erase(LDECI); + LBRMBB->erase(LBRI); return false; } - // If several LOOPINIT instructions are dicovered then create PHI - // function - if (LoopInitMap.size() > 1) { - const MCInstrDesc &PD = TII->get(TargetOpcode::PHI); - MachineInstr *NewPN = LH->getParent()->CreateMachineInstr(PD, DL); - LH->insert(LH->begin(), NewPN); - Register PR = MRI->createVirtualRegister(&Xtensa::ARRegClass); - NewPN->addOperand(MachineOperand::CreateReg(PR, true)); - - for (auto PB : LH->predecessors()) { + MachineInstr *PN = nullptr; - if (LoopInitMap.find(PB) != LoopInitMap.end()) { - MachineOperand MO = MachineOperand::CreateReg( - LoopInitMap[PB]->getOperand(0).getReg(), false); - NewPN->addOperand(MO); - NewPN->addOperand(MachineOperand::CreateMBB(PB)); - LoopInitMap[PB]->getParent()->erase(LoopInitMap[PB]); - } else { - MachineOperand MO = MachineOperand::CreateReg(PR, false); - NewPN->addOperand(MO); - NewPN->addOperand(MachineOperand::CreateMBB(PB)); - } + for (auto &Use : MRI->use_operands(LDECI->getOperand(0).getReg())) { + MachineInstr *UseMI = Use.getParent(); + if (UseMI->getOpcode() == TargetOpcode::PHI) { + PN = UseMI; } - LII = NewPN; } + assert(((PN != nullptr) && (PN->getParent() == LH)) && + "Expected PHI node successor of the LOOPEND instruction in loop " + "header"); + LII = PN; + + Register EltsDec = LDECI->getOperand(0).getReg(); + Register Elts = LDECI->getOperand(1).getReg(); + + for (MachineOperand &MO : PN->operands()) { + if (!MO.isReg() || MO.getReg() != EltsDec) + continue; + MO.substVirtReg(Elts, 0, *TRI); + } + LDECMBB->erase(LDECI); + + MachineBasicBlock::iterator LHI = LH->getFirstNonPHI(); + BuildMI(*LH, LHI, DL, TII->get(Xtensa::LOOPSTART)) .addReg(LII->getOperand(0).getReg()) - .addMBB(LEMBB); + .addMBB(LBRMBB); if (LII->getOpcode() == Xtensa::LOOPINIT) LII->getParent()->erase(LII); + BuildMI(*LBRMBB, LBRI, DL, TII->get(Xtensa::LOOPEND)).addMBB(LH); + LBRMBB->erase(LBRI); + return true; } @@ -386,3 +401,44 @@ bool XtensaHardwareLoops::checkLoopEndDisplacement(MachineFunction &MF, llvm_unreachable("Wrong hardware loop"); } +void XtensaHardwareLoops::revertNonLoops(MachineFunction &MF) { + for (MachineFunction::iterator I = MF.begin(); I != MF.end(); ++I) { + MachineBasicBlock &MBB = *I; + + for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); MII != E; + ++MII) { + MachineInstr *MI = &*MII; + if (MI->getOpcode() == Xtensa::LOOPINIT) { + MachineInstr *LI = MI; + MachineBasicBlock *LIMBB = LI->getParent(); + Register Elts = LI->getOperand(1).getReg(); + Register Def = LI->getOperand(0).getReg(); + for (auto &Use : make_early_inc_range(MRI->use_operands(Def))) { + Use.setReg(Elts); + } + --MII; + LIMBB->erase(LI); + } else if (MI->getOpcode() == Xtensa::LOOPDEC) { + MachineInstr *LEI = MI; + MachineBasicBlock *LEMBB = LEI->getParent(); + Register IndR = LEI->getOperand(0).getReg(); + Register PR = LEI->getOperand(1).getReg(); + + BuildMI(*LEMBB, LEI, LEI->getDebugLoc(), TII->get(Xtensa::ADDI), IndR) + .addReg(PR) + .addImm(-1); + --MII; + LEMBB->erase(LEI); + } else if (MI->getOpcode() == Xtensa::LOOPBR) { + MachineInstr *LBRI = MI; + MachineBasicBlock *LBRMBB = LBRI->getParent(); + + BuildMI(*LBRMBB, LBRI, LBRI->getDebugLoc(), TII->get(Xtensa::BNEZ)) + .addReg(LBRI->getOperand(0).getReg()) + .addMBB(LBRI->getOperand(1).getMBB()); + --MII; + LBRMBB->erase(LBRI); + } + } + } +} diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 04378ff50319a..e40b5b368a465 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -268,10 +268,14 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setTargetDAGCombine(ISD::FSUB); } - if (Subtarget.hasSingleFloat() || Subtarget.hasLoop()) { + if (Subtarget.hasSingleFloat()) { setTargetDAGCombine(ISD::BRCOND); } + if (Subtarget.hasLoop()) { + setTargetDAGCombine(ISD::BR_CC); + } + // Needed so that we don't try to implement f128 constant loads using // a load-and-extend of a f80 constant (in cases where the constant // would fit in an f80). @@ -558,7 +562,7 @@ static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, } case ISD::INTRINSIC_W_CHAIN: { unsigned IntOp = cast(N.getOperand(1))->getZExtValue(); - if (IntOp != Intrinsic::loop_decrement) + if (IntOp != Intrinsic::loop_decrement_reg) return SDValue(); return N; } @@ -566,17 +570,28 @@ static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, return SDValue(); } -static SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG, +static SDValue PerformHWLoopCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const XtensaSubtarget &Subtarget) { SDValue Chain = N->getOperand(0); SDLoc DL(N); - SDValue Cond = N->getOperand(1); - SDValue Dest = N->getOperand(2); + SDValue Cond; + SDValue Dest; ISD::CondCode CC = ISD::SETEQ; int Imm = 1; bool Negate = false; + assert(N->getOpcode() == ISD::BR_CC && "Expected BR_CC!"); + CC = cast(N->getOperand(1))->get(); + Cond = N->getOperand(2); + Dest = N->getOperand(4); + if (auto *Const = dyn_cast(N->getOperand(3))) { + if (!Const->isOne() && !Const->isZero()) + return SDValue(); + Imm = Const->getZExtValue(); + } else + return SDValue(); + SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate); if (Int) { assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR) && @@ -607,16 +622,39 @@ static SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG, } else if (!IsFalseIfZero(CC, Imm)) { llvm_unreachable("unsupported condition"); } + SDLoc dl(Int); + SDValue Elements = Int.getOperand(2); + SDValue Size = DAG.getTargetConstant( + cast(Int.getOperand(3))->getZExtValue(), dl, MVT::i32); + SDValue Args[] = { + Int.getOperand(0), + Elements, + Size, + }; + SDValue LoopDec = DAG.getNode(XtensaISD::LOOPDEC, dl, + DAG.getVTList(MVT::i32, MVT::Other), Args); // We now need to make the intrinsic dead (it cannot be instruction // selected). - DAG.ReplaceAllUsesOfValueWith(Int.getValue(1), Int.getOperand(0)); - assert(Int.getNode()->hasOneUse() && - "Counter decrement has more than one use"); + DAG.ReplaceAllUsesWith(Int.getNode(), LoopDec.getNode()); + + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + SDValue(LoopDec.getNode(), 1), Chain); - return DAG.getNode(XtensaISD::LOOPEND, DL, MVT::Other, N->getOperand(0), - Dest); + SDValue EndArgs[] = {Chain, SDValue(LoopDec.getNode(), 0), Dest}; + return DAG.getNode(XtensaISD::LOOPBR, dl, MVT::Other, EndArgs); } + return SDValue(); +} + +static SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + SDValue Chain = N->getOperand(0); + SDLoc DL(N); + SDValue Cond = N->getOperand(1); + SDValue Dest = N->getOperand(2); + ISD::CondCode CC = ISD::SETEQ; if (Cond.getOpcode() != ISD::SETCC) return SDValue(); @@ -644,6 +682,8 @@ SDValue XtensaTargetLowering::PerformDAGCombine(SDNode *N, return performADDCombine(N, DAG, DCI, Subtarget); case ISD::FSUB: return performSUBCombine(N, DAG, DCI, Subtarget); + case ISD::BR_CC: + return PerformHWLoopCombine(N, DAG, DCI, Subtarget); case ISD::BRCOND: return PerformBRCONDCombine(N, DAG, DCI, Subtarget); } @@ -1837,6 +1877,10 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::CMPOLE"; case XtensaISD::CMPOLT: return "XtensaISD::CMPOLT"; + case XtensaISD::LOOPBR: + return "XtensaISD::LOOPBR"; + case XtensaISD::LOOPDEC: + return "XtensaISD::LOOPDEC"; case XtensaISD::LOOPEND: return "XtensaISD::LOOPEND"; case XtensaISD::MADD: diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index db1384dab03b1..3cc5528f10145 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -53,6 +53,10 @@ enum { CMPOLE, CMPOLT, + // Branch at the end of the loop, uses result of the LOOPDEC + LOOPBR, + // Decrement loop counter + LOOPDEC, LOOPEND, // FP multipy-add/sub diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index ca68dec3f10ea..b02b03363530d 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -284,6 +284,7 @@ bool XtensaInstrInfo::reverseBranchCondition( return false; case Xtensa::LOOPEND: + case Xtensa::LOOPBR: return true; default: @@ -321,6 +322,7 @@ XtensaInstrInfo::getBranchDestBlock(const MachineInstr &MI) const { case Xtensa::BNEZ: case Xtensa::BLTZ: case Xtensa::BGEZ: + case Xtensa::LOOPBR: return MI.getOperand(1).getMBB(); case Xtensa::BT: @@ -341,6 +343,7 @@ bool XtensaInstrInfo::isBranchOffsetInRange(unsigned BranchOp, case Xtensa::JX: return true; case Xtensa::LOOPEND: + case Xtensa::LOOPBR: BrOffset += 4; assert((BrOffset <= 0) && "Wrong hardware loop"); return true; @@ -656,6 +659,7 @@ unsigned XtensaInstrInfo::InsertBranchAtInst(MachineBasicBlock &MBB, case Xtensa::BNEZ: case Xtensa::BLTZ: case Xtensa::BGEZ: + case Xtensa::LOOPBR: MI = BuildMI(MBB, I, DL, get(BR_C)).addReg(Cond[1].getReg()).addMBB(TBB); break; case Xtensa::BT: @@ -663,7 +667,7 @@ unsigned XtensaInstrInfo::InsertBranchAtInst(MachineBasicBlock &MBB, MI = BuildMI(MBB, I, DL, get(BR_C)).addReg(Cond[1].getReg()).addMBB(TBB); break; case Xtensa::LOOPEND: - MI = BuildMI(MBB, I, DL, get(BR_C)).addMBB(TBB); + MI = BuildMI(MBB, I, DL, get(Xtensa::LOOPEND)).addMBB(TBB); break; default: llvm_unreachable("Invalid branch type!"); @@ -752,6 +756,7 @@ bool XtensaInstrInfo::isBranch(const MachineBasicBlock::iterator &MI, case Xtensa::BNEZ: case Xtensa::BLTZ: case Xtensa::BGEZ: + case Xtensa::LOOPBR: Cond[0].setImm(OpCode); Target = &MI->getOperand(1); return true; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 92ee844801b12..7ce22ce9c6142 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1233,8 +1233,8 @@ def LOOPNEZ : RRI8_Inst<0x06, (outs), (ins AR:$s, ltarget:$target), } let isTerminator = 1, isBarrier = 1, hasSideEffects = 1, Size = 3 in { - def LOOPINIT : Pseudo<(outs), (ins AR:$elts), - "!loopinit $elts", [(int_set_loop_iterations AR:$elts)]>; + def LOOPINIT : Pseudo<(outs AR:$elts), (ins AR:$eltsin), + "!loopinit $elts, $eltsin", [(set AR:$elts, (int_start_loop_iterations AR:$eltsin))]>; } // LOOPSTART pseudo instruction reserves 9 bytes for LOOP operation and NOP operations for possible alignment. @@ -1249,6 +1249,16 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 0, Size = "!loopend $target", [(Xtensa_loopend bb:$target)]>; } +let isTerminator = 1, isBarrier = 1, hasSideEffects = 1, Size = 3 in { + def LOOPDEC : Pseudo<(outs AR:$eltsout), (ins AR:$eltsin), + "!loopdec $eltsout, $eltsin", [(set AR:$eltsout, (Xtensa_loopdec AR:$eltsin))]>; +} + +let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 0, Size = 3 in { + def LOOPBR : Pseudo<(outs), (ins AR:$elts, brtarget:$target), + "!loopbr $elts, $target", [(Xtensa_loopbr AR:$elts, bb:$target)]>; +} + //===----------------------------------------------------------------------===// // SEXT Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index 8cf072d959b2c..f5136c8038273 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -43,6 +43,8 @@ def SDT_XtensaEXTUI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCi SDTCisVT<2, i32>, SDTCisVT<3, i32>]>; def SDT_XtensaLoopEnd : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>; +def SDT_XtensaLoopDec : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; +def SDT_XtensaLoopBr : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisVT<1, OtherVT>]>; //===----------------------------------------------------------------------===// // Node definitions @@ -112,4 +114,7 @@ def Xtensa_rur: SDNode<"XtensaISD::RUR", SDT_XtensaRUR, def Xtensa_loopend: SDNode<"XtensaISD::LOOPEND", SDT_XtensaLoopEnd, [SDNPHasChain, SDNPInGlue]>; - +def Xtensa_loopdec: SDNode<"XtensaISD::LOOPDEC", SDT_XtensaLoopDec, + [SDNPHasChain, SDNPInGlue]>; +def Xtensa_loopbr: SDNode<"XtensaISD::LOOPBR", SDT_XtensaLoopBr, + [SDNPHasChain, SDNPInGlue]>; diff --git a/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp b/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp index 7bdec70504772..62ad8b6b00997 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp @@ -27,7 +27,7 @@ bool XtensaTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, return false; LLVMContext &C = L->getHeader()->getContext(); - HWLoopInfo.CounterInReg = false; + HWLoopInfo.CounterInReg = true; HWLoopInfo.IsNestingLegal = false; HWLoopInfo.CountType = Type::getInt32Ty(C); HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); From c6a6e27e55ce49cb3eaa5e4e5930e80ca6f8293c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:47:17 +0300 Subject: [PATCH 071/289] [Xtensa] Initial porting compiler-rt library for Xtensa. --- compiler-rt/cmake/Modules/CompilerRTUtils.cmake | 2 ++ compiler-rt/cmake/base-config-ix.cmake | 2 ++ compiler-rt/cmake/builtin-config-ix.cmake | 3 ++- compiler-rt/lib/builtins/CMakeLists.txt | 2 ++ compiler-rt/lib/builtins/crtbegin.c | 14 ++++++++++++++ 5 files changed, 22 insertions(+), 1 deletion(-) diff --git a/compiler-rt/cmake/Modules/CompilerRTUtils.cmake b/compiler-rt/cmake/Modules/CompilerRTUtils.cmake index 379e2c25949cb..e5b65aecf4568 100644 --- a/compiler-rt/cmake/Modules/CompilerRTUtils.cmake +++ b/compiler-rt/cmake/Modules/CompilerRTUtils.cmake @@ -223,6 +223,8 @@ macro(detect_target_arch) add_default_target_arch(sparc) elseif(__WEBASSEMBLY32) add_default_target_arch(wasm32) + elseif(__XTENSA) + add_default_target_arch(xtensa) elseif(__WEBASSEMBLY64) add_default_target_arch(wasm64) elseif(__VE) diff --git a/compiler-rt/cmake/base-config-ix.cmake b/compiler-rt/cmake/base-config-ix.cmake index 5a97992756a9c..ae1a97f1fd6c9 100644 --- a/compiler-rt/cmake/base-config-ix.cmake +++ b/compiler-rt/cmake/base-config-ix.cmake @@ -284,6 +284,8 @@ macro(test_targets) test_target_arch(riscv64 "" "") elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "wasm32") test_target_arch(wasm32 "" "--target=wasm32-unknown-unknown") + elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "xtensa") + test_target_arch(xtensa "" "--target=xtensa") elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "wasm64") test_target_arch(wasm64 "" "--target=wasm64-unknown-unknown") elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "ve") diff --git a/compiler-rt/cmake/builtin-config-ix.cmake b/compiler-rt/cmake/builtin-config-ix.cmake index 1f63e158409ca..0e3c7f85470d7 100644 --- a/compiler-rt/cmake/builtin-config-ix.cmake +++ b/compiler-rt/cmake/builtin-config-ix.cmake @@ -76,6 +76,7 @@ set(SPARCV9 sparcv9) set(WASM32 wasm32) set(WASM64 wasm64) set(VE ve) +set(XTENSA xtensa) if(APPLE) set(ARM64 arm64 arm64e) @@ -87,7 +88,7 @@ set(ALL_BUILTIN_SUPPORTED_ARCH ${X86} ${X86_64} ${AMDGPU} ${ARM32} ${ARM64} ${AVR} ${HEXAGON} ${MIPS32} ${MIPS64} ${NVPTX} ${PPC32} ${PPC64} ${RISCV32} ${RISCV64} ${SPARC} ${SPARCV9} - ${WASM32} ${WASM64} ${VE} ${LOONGARCH64}) + ${WASM32} ${WASM64} ${VE} ${LOONGARCH64} ${XTENSA}) include(CompilerRTUtils) include(CompilerRTDarwinUtils) diff --git a/compiler-rt/lib/builtins/CMakeLists.txt b/compiler-rt/lib/builtins/CMakeLists.txt index e0b2d08c20775..df6ccb1ad4007 100644 --- a/compiler-rt/lib/builtins/CMakeLists.txt +++ b/compiler-rt/lib/builtins/CMakeLists.txt @@ -757,6 +757,8 @@ set(riscv64_SOURCES set(sparc_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) set(sparcv9_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) +set(xtensa_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) + set(wasm32_SOURCES ${GENERIC_TF_SOURCES} ${GENERIC_SOURCES} diff --git a/compiler-rt/lib/builtins/crtbegin.c b/compiler-rt/lib/builtins/crtbegin.c index a0860ca12ea03..fb22c0035ddc9 100644 --- a/compiler-rt/lib/builtins/crtbegin.c +++ b/compiler-rt/lib/builtins/crtbegin.c @@ -73,6 +73,13 @@ __asm__(".pushsection .init,\"ax\",@progbits\n\t" __asm__(".pushsection .init,\"ax\",@progbits\n\t" "call __do_init\n\t" ".popsection"); +#elif defined(__xtensa__) +__asm__(".pushsection .init.literal,\"ax\",@progbits\n\t" + ".popsection\n\t" + ".pushsection .init,\"ax\",@progbits\n\t" + "movi a8, __do_init\n\t" + "callx8 a8\n\t" + ".popsection"); #else #error "crtbegin without .init_fini array unimplemented for this architecture" #endif // CRT_HAS_INITFINI_ARRAY @@ -130,6 +137,13 @@ __asm__(".pushsection .fini,\"ax\",@progbits\n\t" __asm__(".pushsection .fini,\"ax\",@progbits\n\t" "call __do_fini\n\t" ".popsection"); +#elif defined(__xtensa__) +__asm__(".pushsection .fini.literal,\"ax\",@progbits\n\t" + ".popsection\n\t" + ".pushsection .fini,\"ax\",@progbits\n\t" + "movi a8, __do_fini\n\t" + "callx8 a8\n\t" + ".popsection"); #else #error "crtbegin without .init_fini array unimplemented for this architecture" #endif // CRT_HAS_INIT_FINI_ARRAY From 9cf8481d39203009a4fecbd3ca6c4a8613fca242 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 13:56:46 +0300 Subject: [PATCH 072/289] [Xtensa] Add support of "-mcpu" option. Implement support of the "-mcpu" option. Support Xtensa features in clang. Add macros definintion in XtensaTargetInfo based on Xtensa features. --- clang/lib/Basic/Targets/Xtensa.cpp | 54 ++++++++++- clang/lib/Basic/Targets/Xtensa.h | 23 +++-- clang/lib/Driver/ToolChains/CommonArgs.cpp | 4 + clang/lib/Driver/ToolChains/Xtensa.cpp | 42 +++++++-- clang/lib/Driver/ToolChains/Xtensa.h | 7 +- clang/test/Driver/xtensa-cpus.c | 36 +++++++ clang/test/Misc/target-invalid-cpu-note.c | 4 + .../llvm/TargetParser/XtensaTargetParser.def | 83 +++++++++++++++++ .../llvm/TargetParser/XtensaTargetParser.h | 71 ++++++++++++++ llvm/include/module.modulemap | 1 + .../Xtensa/AsmParser/XtensaAsmParser.cpp | 14 +-- .../Disassembler/XtensaDisassembler.cpp | 8 +- .../MCTargetDesc/XtensaMCTargetDesc.cpp | 4 + llvm/lib/Target/Xtensa/Xtensa.td | 18 ++-- .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 12 ++- llvm/lib/TargetParser/CMakeLists.txt | 1 + llvm/lib/TargetParser/XtensaTargetParser.cpp | 93 +++++++++++++++++++ .../secondary/llvm/lib/TargetParser/BUILD.gn | 1 + 18 files changed, 439 insertions(+), 37 deletions(-) create mode 100644 clang/test/Driver/xtensa-cpus.c create mode 100644 llvm/include/llvm/TargetParser/XtensaTargetParser.def create mode 100644 llvm/include/llvm/TargetParser/XtensaTargetParser.h create mode 100644 llvm/lib/TargetParser/XtensaTargetParser.cpp diff --git a/clang/lib/Basic/Targets/Xtensa.cpp b/clang/lib/Basic/Targets/Xtensa.cpp index 6ca5cba2f6aec..3bc8cc531069d 100644 --- a/clang/lib/Basic/Targets/Xtensa.cpp +++ b/clang/lib/Basic/Targets/Xtensa.cpp @@ -33,8 +33,60 @@ ArrayRef XtensaTargetInfo::getTargetBuiltins() const { void XtensaTargetInfo::getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const { - Builder.defineMacro("__Xtensa__"); + Builder.defineMacro("__ELF__"); Builder.defineMacro("__xtensa__"); Builder.defineMacro("__XTENSA__"); Builder.defineMacro("__XTENSA_EL__"); + if (HasWindowed) + Builder.defineMacro("__XTENSA_WINDOWED_ABI__"); + else + Builder.defineMacro("__XTENSA_CALL0_ABI__"); + if (!HasFP) + Builder.defineMacro("__XTENSA_SOFT_FLOAT__"); +} + +void XtensaTargetInfo::fillValidCPUList( + SmallVectorImpl &Values) const { + llvm::Xtensa::fillValidCPUList(Values); +} + +bool XtensaTargetInfo::initFeatureMap( + llvm::StringMap &Features, DiagnosticsEngine &Diags, StringRef CPU, + const std::vector &FeaturesVec) const { + + // Assume that by default cpu is esp32 + if (CPU.empty()) + CPU = "esp32"; + + CPU = llvm::Xtensa::getBaseName(CPU); + + SmallVector CPUFeatures; + llvm::Xtensa::getCPUFeatures(CPU, CPUFeatures); + + for (auto Feature : CPUFeatures) + if (Feature[0] == '+') + Features[Feature.drop_front(1)] = true; + + return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec); +} + +/// Return true if has this feature, need to sync with handleTargetFeatures. +bool XtensaTargetInfo::hasFeature(StringRef Feature) const { + return llvm::StringSwitch(Feature) + .Case("fp", HasFP) + .Case("windowed", HasWindowed) + .Default(false); +} + +/// Perform initialization based on the user configured set of features. +bool XtensaTargetInfo::handleTargetFeatures(std::vector &Features, + DiagnosticsEngine &Diags) { + for (const auto &Feature : Features) { + if (Feature == "+fp") + HasFP = true; + else if (Feature == "+windowed") + HasWindowed = true; + } + + return true; } diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index 2bf3f742d1765..26dba0225014f 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -20,6 +20,7 @@ #include "llvm/ADT/StringSwitch.h" #include "llvm/TargetParser/Triple.h" #include "llvm/Support/Compiler.h" +#include "llvm/TargetParser/XtensaTargetParser.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/MacroBuilder.h" @@ -30,6 +31,8 @@ namespace targets { class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { std::string CPU; + bool HasFP = false; + bool HasWindowed = false; public: XtensaTargetInfo(const llvm::Triple &Triple, const TargetOptions &) @@ -93,19 +96,25 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { } bool isValidCPUName(StringRef Name) const override { - return llvm::StringSwitch(Name) - .Case("esp32", true) - .Case("esp8266", true) - .Case("esp32-s2", true) - .Case("esp32-s3", true) - .Case("generic", true) - .Default(false); + return llvm::Xtensa::parseCPUKind(Name) != llvm::Xtensa::CK_INVALID; } bool setCPU(const std::string &Name) override { CPU = Name; return isValidCPUName(Name); } + + void fillValidCPUList(SmallVectorImpl &Values) const override; + + bool + initFeatureMap(llvm::StringMap &Features, DiagnosticsEngine &Diags, + StringRef CPU, + const std::vector &FeaturesVec) const override; + + bool hasFeature(StringRef Feature) const override; + + bool handleTargetFeatures(std::vector &Features, + DiagnosticsEngine &Diags) override; }; } // namespace targets } // namespace clang diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 4b2badddf8b47..77d29cef1eeb1 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -23,6 +23,7 @@ #include "Hexagon.h" #include "MSP430.h" #include "Solaris.h" +#include "Xtensa.h" #include "clang/Basic/CharInfo.h" #include "clang/Basic/CodeGenOptions.h" #include "clang/Basic/LangOptions.h" @@ -761,6 +762,9 @@ void tools::getTargetFeatures(const Driver &D, const llvm::Triple &Triple, case llvm::Triple::loongarch64: loongarch::getLoongArchTargetFeatures(D, Triple, Args, Features); break; + case llvm::Triple::xtensa: + xtensa::getXtensaTargetFeatures(D, Triple, Args, Features); + break; } for (auto Feature : unifyTargetFeatures(Features)) { diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 673adde65ba23..c65caee7d6db4 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -22,6 +22,7 @@ #include "llvm/Option/ArgList.h" #include "llvm/Support/Path.h" #include "llvm/Support/VirtualFileSystem.h" +#include "llvm/TargetParser/XtensaTargetParser.h" #include using namespace clang::driver; @@ -44,9 +45,9 @@ XtensaGCCToolchainDetector::XtensaGCCToolchainDetector( if (CPUName == "esp32") ToolchainName = "xtensa-esp32-elf"; - else if (CPUName == "esp32-s2") + else if (CPUName == "esp32-s2" || CPUName =="esp32s2") ToolchainName = "xtensa-esp32s2-elf"; - else if (CPUName == "esp32-s3") + else if (CPUName == "esp32-s3" || CPUName == "esp32s3") ToolchainName = "xtensa-esp32s3-elf"; else if (CPUName == "esp8266") ToolchainName = "xtensa-lx106-elf"; @@ -157,11 +158,11 @@ XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, } Tool *XtensaToolChain::buildLinker() const { - return new tools::Xtensa::Linker(*this); + return new tools::xtensa::Linker(*this); } Tool *XtensaToolChain::buildAssembler() const { - return new tools::Xtensa::Assembler(*this); + return new tools::xtensa::Assembler(*this); } void XtensaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, @@ -226,7 +227,7 @@ const StringRef XtensaToolChain::GetTargetCPUVersion(const ArgList &Args) { return "esp32"; } -void tools::Xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, +void tools::xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, @@ -270,7 +271,7 @@ void tools::Xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, JA, *this, ResponseFileSupport::AtFileCurCP(), Asm, CmdArgs, Inputs)); } -void Xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, +void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, const ArgList &Args, @@ -303,3 +304,32 @@ void Xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, std::make_unique(JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker), CmdArgs, Inputs)); } + +// Get features by CPU name +static void getXtensaFeaturesFromMcpu(const Driver &D, + const llvm::opt::ArgList &Args, + const llvm::opt::Arg *A, StringRef Mcpu, + std::vector &Features) { + if (llvm::Xtensa::parseCPUKind(Mcpu) == llvm::Xtensa::CK_INVALID) { + D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args); + } else { + SmallVector CPUFeatures; + llvm::Xtensa::getCPUFeatures(Mcpu, CPUFeatures); + for (auto &F : CPUFeatures) { + Features.push_back(F); + } + } +} + +// Xtensa target features. +void xtensa::getXtensaTargetFeatures(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args, + std::vector &Features) { + if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) + getXtensaFeaturesFromMcpu(D, Args, A, A->getValue(), Features); + + // Now add any that the user explicitly requested on the command line, + // which may override the defaults. + handleTargetFeaturesGroup(D, Triple, Args, Features, + options::OPT_m_xtensa_Features_Group); +} diff --git a/clang/lib/Driver/ToolChains/Xtensa.h b/clang/lib/Driver/ToolChains/Xtensa.h index 663dc63f6d279..f040a7d333c91 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.h +++ b/clang/lib/Driver/ToolChains/Xtensa.h @@ -61,7 +61,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaToolChain : public Generic_ELF { } // end namespace toolchains namespace tools { -namespace Xtensa { +namespace xtensa { class LLVM_LIBRARY_VISIBILITY Linker : public Tool { public: Linker(const ToolChain &TC) @@ -86,7 +86,10 @@ class LLVM_LIBRARY_VISIBILITY Assembler : public Tool { const char *LinkingOutput) const override; }; -} // end namespace Xtensa +void getXtensaTargetFeatures(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args, + std::vector &Features); +} // end namespace xtensa } // end namespace tools } // end namespace driver } // end namespace clang diff --git a/clang/test/Driver/xtensa-cpus.c b/clang/test/Driver/xtensa-cpus.c new file mode 100644 index 0000000000000..7a93f4cba4b0c --- /dev/null +++ b/clang/test/Driver/xtensa-cpus.c @@ -0,0 +1,36 @@ +// Check target CPUs are correctly passed. + +// RUN: %clang -target xtensa -### -c %s 2>&1 -mcpu=esp8266 | FileCheck -check-prefix=MCPU-ESP8266 %s +// MCPU-ESP8266: "-target-cpu" "esp8266" +// MCPU-ESP8266: "-target-feature" "+density" "-target-feature" "+nsa" "-target-feature" "+mul32" "-target-feature" "+extendedl32r" +// MCPU-ESP8266: "-target-feature" "+debug" "-target-feature" "+exception" "-target-feature" "+highpriinterrupts" +// MCPU-ESP8266: "-target-feature" "+interrupt" "-target-feature" "+rvector" "-target-feature" "+timerint" "-target-feature" "+prid" +// MCPU-ESP8266: "-target-feature" "+regprotect" + +// RUN: %clang -target xtensa -### -c %s 2>&1 -mcpu=esp32 | FileCheck -check-prefix=MCPU-ESP32 %s +// MCPU-ESP32: "-target-cpu" "esp32" +// MCPU-ESP32: "-target-feature" "+density" "-target-feature" "+fp" "-target-feature" "+windowed" "-target-feature" "+bool" +// MCPU-ESP32: "-target-feature" "+loop" "-target-feature" "+sext" "-target-feature" "+nsa" "-target-feature" "+mul32" +// MCPU-ESP32: "-target-feature" "+mul32high" "-target-feature" "+div32" "-target-feature" "+mac16" "-target-feature" "+dfpaccel" +// MCPU-ESP32: "-target-feature" "+s32c1i" "-target-feature" "+threadptr" "-target-feature" "+atomctl" "-target-feature" "+memctl" +// MCPU-ESP32: "-target-feature" "+debug" "-target-feature" "+exception" "-target-feature" "+highpriinterrupts" +// MCPU-ESP32: "-target-feature" "+coprocessor" "-target-feature" "+interrupt" "-target-feature" "+rvector" "-target-feature" "+timerint" +// MCPU-ESP32: "-target-feature" "+prid" "-target-feature" "+regprotect" "-target-feature" "+miscsr" + +// RUN: %clang -target xtensa -### -c %s 2>&1 -mcpu=esp32s2 | FileCheck -check-prefix=MCPU-ESP32S2 %s +// MCPU-ESP32S2: "-target-cpu" "esp32s2" +// MCPU-ESP32S2: "-target-feature" "+density" "-target-feature" "+windowed" "-target-feature" "+sext" "-target-feature" "+nsa" +// MCPU-ESP32S2: "-target-feature" "+mul32" "-target-feature" "+mul32high" "-target-feature" "+div32" "-target-feature" "+threadptr" +// MCPU-ESP32S2: "-target-feature" "+memctl" "-target-feature" "+debug" "-target-feature" "+exception" "-target-feature" "+highpriinterrupts" +// MCPU-ESP32S2: "-target-feature" "+coprocessor" "-target-feature" "+interrupt" "-target-feature" "+rvector" "-target-feature" "+timerint" +// MCPU-ESP32S2: "-target-feature" "+prid" "-target-feature" "+regprotect" "-target-feature" "+miscsr" "-target-feature" "+esp32s2" + +// RUN: %clang -target xtensa -### -c %s 2>&1 -mcpu=esp32s3 | FileCheck -check-prefix=MCPU-ESP32S3 %s +// MCPU-ESP32S3: "-target-cpu" "esp32s3" +// MCPU-ESP32S3: "-target-feature" "+density" "-target-feature" "+fp" "-target-feature" "+windowed" "-target-feature" "+bool" +// MCPU-ESP32S3: "-target-feature" "+loop" "-target-feature" "+sext" "-target-feature" "+nsa" "-target-feature" "+mul32" +// MCPU-ESP32S3: "-target-feature" "+mul32high" "-target-feature" "+div32" "-target-feature" "+mac16" "-target-feature" "+dfpaccel" +// MCPU-ESP32S3: "-target-feature" "+s32c1i" "-target-feature" "+threadptr" "-target-feature" "+atomctl" "-target-feature" "+memctl" +// MCPU-ESP32S3: "-target-feature" "+debug" "-target-feature" "+exception" "-target-feature" "+highpriinterrupts" +// MCPU-ESP32S3: "-target-feature" "+coprocessor" "-target-feature" "+interrupt" "-target-feature" "+rvector" "-target-feature" "+timerint" +// MCPU-ESP32S3: "-target-feature" "+prid" "-target-feature" "+regprotect" "-target-feature" "+miscsr" "-target-feature" "+esp32s3" diff --git a/clang/test/Misc/target-invalid-cpu-note.c b/clang/test/Misc/target-invalid-cpu-note.c index 6fd71bb82381a..0b529f0861d46 100644 --- a/clang/test/Misc/target-invalid-cpu-note.c +++ b/clang/test/Misc/target-invalid-cpu-note.c @@ -94,3 +94,7 @@ // RUN: not %clang_cc1 -triple riscv64 -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE-RISCV64 // TUNE-RISCV64: error: unknown target CPU 'not-a-cpu' // TUNE-RISCV64-NEXT: note: valid target CPU values are: generic-rv64, rocket-rv64, sifive-p450, sifive-p670, sifive-s21, sifive-s51, sifive-s54, sifive-s76, sifive-u54, sifive-u74, sifive-x280, spacemit-x60, syntacore-scr3-rv64, veyron-v1, xiangshan-nanhu, generic, rocket, sifive-7-series{{$}} + +// RUN: not %clang_cc1 -triple xtensa -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE-XTENSA +// TUNE-XTENSA: error: unknown target CPU 'not-a-cpu' +// TUNE-XTENSA: note: valid target CPU values are: generic, esp8266, esp32, esp32s2, esp32-s2, esp32s3, esp32-s3 diff --git a/llvm/include/llvm/TargetParser/XtensaTargetParser.def b/llvm/include/llvm/TargetParser/XtensaTargetParser.def new file mode 100644 index 0000000000000..e46020700f2e2 --- /dev/null +++ b/llvm/include/llvm/TargetParser/XtensaTargetParser.def @@ -0,0 +1,83 @@ +//===- XtensaTargetParser.def - Xtensa target parsing defines ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file provides defines to build up the Xtensa target parser's logic. +// +//===----------------------------------------------------------------------===// + +#ifndef XTENSA_FEATURE +#define XTENSA_FEATURE(ID, STR) +#endif + +XTENSA_FEATURE(FK_DENSITY, "density") +XTENSA_FEATURE(FK_FP, "fp") +XTENSA_FEATURE(FK_WINDOWED, "windowed") +XTENSA_FEATURE(FK_BOOLEAN, "bool") +XTENSA_FEATURE(FK_LOOP, "loop") +XTENSA_FEATURE(FK_SEXT, "sext") +XTENSA_FEATURE(FK_NSA, "nsa") +XTENSA_FEATURE(FK_MUL32, "mul32") +XTENSA_FEATURE(FK_MUL32HIGH, "mul32high") +XTENSA_FEATURE(FK_DIV32, "div32") +XTENSA_FEATURE(FK_MAC16, "mac16") +XTENSA_FEATURE(FK_DFPACCEL, "dfpaccel") +XTENSA_FEATURE(FK_S32C1I, "s32c1i") +XTENSA_FEATURE(FK_THREADPTR, "threadptr") +XTENSA_FEATURE(FK_EXTENDEDL32R, "extendedl32r") +XTENSA_FEATURE(FK_ATOMCTL, "atomctl") +XTENSA_FEATURE(FK_MEMCTL, "memctl") +XTENSA_FEATURE(FK_DEBUG, "debug") +XTENSA_FEATURE(FK_EXCEPTION, "exception") +XTENSA_FEATURE(FK_HIGHPRIINTERRUPTS, "highpriinterrupts") +XTENSA_FEATURE(FK_COPROCESSOR, "coprocessor") +XTENSA_FEATURE(FK_INTERRUPT, "interrupt") +XTENSA_FEATURE(FK_RVECTOR, "rvector") +XTENSA_FEATURE(FK_TIMERINT, "timerint") +XTENSA_FEATURE(FK_PRID, "prid") +XTENSA_FEATURE(FK_REGPROTECT, "regprotect") +XTENSA_FEATURE(FK_MISCSR, "miscsr") +XTENSA_FEATURE(FK_ESP32S2OPS, "esp32s2") +XTENSA_FEATURE(FK_ESP32S3OPS, "esp32s3") + +#undef XTENSA_FEATURE + +#ifndef XTENSA_CPU +#define XTENSA_CPU(ENUM, NAME, FEATURES) +#endif + +XTENSA_CPU(INVALID, {"invalid"}, FK_INVALID) +XTENSA_CPU(GENERIC, {"generic"}, FK_NONE) +XTENSA_CPU(ESP8266, {"esp8266"}, + (FK_DENSITY | FK_NSA | FK_MUL32 | FK_EXTENDEDL32R | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | + FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_REGPROTECT | FK_PRID)) +XTENSA_CPU(ESP32, {"esp32"}, + (FK_DENSITY | FK_FP | FK_LOOP | FK_MAC16 | FK_WINDOWED | FK_BOOLEAN | + FK_SEXT | FK_NSA | FK_MUL32 | FK_MUL32HIGH | FK_DFPACCEL | FK_S32C1I | FK_THREADPTR | FK_DIV32 | + FK_ATOMCTL | FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | + FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR)) +XTENSA_CPU(ESP32S2, {"esp32s2"}, + (FK_DENSITY | FK_WINDOWED | FK_SEXT | FK_NSA | FK_MUL32 | FK_MUL32HIGH | FK_THREADPTR | FK_DIV32 | + FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | FK_INTERRUPT | + FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | FK_ESP32S2OPS)) +XTENSA_CPU(ESP32S3, {"esp32s3"}, + (FK_DENSITY | FK_FP | FK_LOOP | FK_MAC16 | FK_WINDOWED | FK_BOOLEAN | + FK_SEXT | FK_NSA | FK_MUL32 | FK_MUL32HIGH | FK_DFPACCEL | FK_S32C1I | FK_THREADPTR | FK_DIV32 | + FK_ATOMCTL | FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | + FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | + FK_ESP32S3OPS)) + +#undef XTENSA_CPU + +#ifndef XTENSA_CPU_ALIAS +#define XTENSA_CPU_ALIAS(NAME, ALTNMAME) +#endif + +XTENSA_CPU_ALIAS("esp32s2", "esp32-s2") +XTENSA_CPU_ALIAS("esp32s3", "esp32-s3") + +#undef XTENSA_CPU_ALIAS diff --git a/llvm/include/llvm/TargetParser/XtensaTargetParser.h b/llvm/include/llvm/TargetParser/XtensaTargetParser.h new file mode 100644 index 0000000000000..b2d642b2d63ef --- /dev/null +++ b/llvm/include/llvm/TargetParser/XtensaTargetParser.h @@ -0,0 +1,71 @@ +//==-- XtensaTargetParser - Parser for Xtensa features --*- C++ -*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements a target parser to recognise Xtensa hardware features +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGETPARSER_XTENSATARGETPARSER_H +#define LLVM_TARGETPARSER_XTENSATARGETPARSER_H + +#include "llvm/TargetParser/Triple.h" +#include + +namespace llvm { +class StringRef; + +namespace Xtensa { + +enum CPUKind : unsigned { +#define XTENSA_CPU(ENUM, NAME, FEATURES) CK_##ENUM, +#include "XtensaTargetParser.def" +}; + +enum FeatureKind : uint64_t { + FK_INVALID = 0, + FK_NONE = 1, + FK_FP = 1 << 1, + FK_WINDOWED = 1 << 2, + FK_BOOLEAN = 1 << 3, + FK_DENSITY = 1 << 4, + FK_LOOP = 1 << 5, + FK_SEXT = 1 << 6, + FK_NSA = 1 << 7, + FK_MUL32 = 1 << 8, + FK_MUL32HIGH = 1 << 9, + FK_DIV32 = 1 << 10, + FK_MAC16 = 1 << 11, + FK_DFPACCEL = 1 << 12, + FK_S32C1I = 1 << 13, + FK_THREADPTR = 1 << 14, + FK_EXTENDEDL32R = 1 << 15, + FK_ATOMCTL = 1 << 16, + FK_MEMCTL = 1 << 17, + FK_DEBUG = 1 << 18, + FK_EXCEPTION = 1 << 19, + FK_HIGHPRIINTERRUPTS = 1 << 20, + FK_COPROCESSOR = 1 << 21, + FK_INTERRUPT = 1 << 22, + FK_RVECTOR = 1 << 23, + FK_TIMERINT = 1 << 24, + FK_PRID = 1 << 25, + FK_REGPROTECT = 1 << 26, + FK_MISCSR = 1 << 27, + FK_ESP32S2OPS = 1 << 28, + FK_ESP32S3OPS = 1 << 29 +}; + +CPUKind parseCPUKind(StringRef CPU); +StringRef getBaseName(StringRef CPU); +void getCPUFeatures(StringRef CPU, SmallVectorImpl &Features); +void fillValidCPUList(SmallVectorImpl &Values); + +} // namespace Xtensa +} // namespace llvm + +#endif // LLVM_SUPPORT_XTENSATARGETPARSER_H diff --git a/llvm/include/module.modulemap b/llvm/include/module.modulemap index b00da6d7cd28c..290f73a047f00 100644 --- a/llvm/include/module.modulemap +++ b/llvm/include/module.modulemap @@ -404,6 +404,7 @@ module LLVM_Utils { textual header "llvm/TargetParser/X86TargetParser.def" textual header "llvm/TargetParser/LoongArchTargetParser.def" textual header "llvm/TargetParser/PPCTargetParser.def" + textual header "TargetParser/XtensaTargetParser.def" } // This part of the module is usable from both C and C++ code. diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 97be4b5d20596..648d58e8c18a4 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -982,8 +982,8 @@ bool XtensaAsmParser::checkRegister(unsigned RegNo) { unsigned NumTimers = 0; unsigned NumMiscSR = 0; bool IsESP32 = false; - bool IsESP32_S2 = false; - bool IsESP32_S3 = false; + bool IsESP32S2 = false; + bool IsESP32S3 = false; bool Res = true; // Assume that CPU is esp32 by default @@ -992,16 +992,16 @@ bool XtensaAsmParser::checkRegister(unsigned RegNo) { NumTimers = 3; NumMiscSR = 4; IsESP32 = true; - } else if (CPU == "esp32-s2") { + } else if (CPU == "esp32s2") { NumIntLevels = 6; NumTimers = 3; NumMiscSR = 4; - IsESP32_S2 = true; - } else if (CPU == "esp32-s3") { + IsESP32S2 = true; + } else if (CPU == "esp32s3") { NumIntLevels = 6; NumTimers = 3; NumMiscSR = 4; - IsESP32_S3 = true; + IsESP32S3 = true; } else if (CPU == "esp8266") { NumIntLevels = 2; NumTimers = 1; @@ -1125,7 +1125,7 @@ bool XtensaAsmParser::checkRegister(unsigned RegNo) { Res = hasTHREADPTR(); break; case Xtensa::GPIO_OUT: - Res = IsESP32_S2 || IsESP32_S3; + Res = IsESP32S2 || IsESP32S3; break; case Xtensa::EXPSTATE: Res = IsESP32; diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 11e71fe8b0922..a55a13340075b 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -154,7 +154,7 @@ bool CheckRegister(unsigned RegNo, MCSubtargetInfo STI) { unsigned NumTimers = 0; unsigned NumMiscSR = 0; bool IsESP32 = false; - bool IsESP32_S2 = false; + bool IsESP32S2 = false; bool Res = true; // Assume that CPU is esp32 by default @@ -163,11 +163,11 @@ bool CheckRegister(unsigned RegNo, MCSubtargetInfo STI) { NumTimers = 3; NumMiscSR = 4; IsESP32 = true; - } else if (CPU == "esp32-s2") { + } else if (CPU == "esp32s2") { NumIntLevels = 6; NumTimers = 3; NumMiscSR = 4; - IsESP32_S2 = true; + IsESP32S2 = true; } else if (CPU == "esp8266") { NumIntLevels = 2; NumTimers = 1; @@ -291,7 +291,7 @@ bool CheckRegister(unsigned RegNo, MCSubtargetInfo STI) { Res = STI.getFeatureBits()[Xtensa::FeatureTHREADPTR]; break; case Xtensa::GPIO_OUT: - Res = IsESP32_S2; + Res = IsESP32S2; break; case Xtensa::EXPSTATE: Res = IsESP32; diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp index 799db9483c7fd..09ebd1850906e 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp @@ -67,6 +67,10 @@ static MCSubtargetInfo * createXtensaMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) { if (CPU.empty()) CPU = "esp32"; + else if (CPU == "esp32-s2") + CPU = "esp32s2"; + else if (CPU == "esp32-s3") + CPU = "esp32s3"; return createXtensaMCSubtargetInfoImpl(TT, CPU, CPU, FS); } diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index a953bf031a6e5..abf189cf43e97 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -184,15 +184,15 @@ def : Proc<"esp32", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC def : Proc<"esp8266", [FeatureDensity, FeatureNSA, FeatureMul16, FeatureMul32, FeatureExtendedL32R, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeatureRegionProtection, FeaturePRID]>; -def : Proc<"esp32-s2", [FeatureDensity, FeatureWindowed, FeatureSEXT, FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureTHREADPTR, - FeatureDiv32, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, - FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureESP32S2Ops]>; - -def : Proc<"esp32-s3", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, FeatureSEXT, - FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, - FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, - FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, - FeatureESP32S3Ops]>; +def : Proc<"esp32s2", [FeatureDensity, FeatureWindowed, FeatureSEXT, FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureTHREADPTR, + FeatureDiv32, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, + FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureESP32S2Ops]>; + +def : Proc<"esp32s3", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, FeatureSEXT, + FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, + FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, + FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, + FeatureESP32S3Ops]>; //===----------------------------------------------------------------------===// // Register File Description diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 88940535d74be..2a2569e633770 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -49,6 +49,16 @@ static std::unique_ptr createTLOF() { return std::make_unique(); } +static StringRef getCPUName(StringRef CPU) { + if (CPU.empty()) + CPU = "esp32"; + else if (CPU == "esp32-s2") + CPU = "esp32s2"; + else if (CPU == "esp32-s3") + CPU = "esp32s3"; + return CPU; +} + XtensaTargetMachine::XtensaTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, @@ -69,7 +79,7 @@ XtensaTargetMachine::XtensaTargetMachine(const Target &T, const Triple &TT, std::optional RM, std::optional CM, CodeGenOptLevel OL, bool JIT) - : XtensaTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {} + : XtensaTargetMachine(T, TT, getCPUName(CPU), FS, Options, RM, CM, OL, JIT, true) {} const XtensaSubtarget * XtensaTargetMachine::getSubtargetImpl(const Function &F) const { diff --git a/llvm/lib/TargetParser/CMakeLists.txt b/llvm/lib/TargetParser/CMakeLists.txt index 4b5d582d57a42..87308c93d2150 100644 --- a/llvm/lib/TargetParser/CMakeLists.txt +++ b/llvm/lib/TargetParser/CMakeLists.txt @@ -26,6 +26,7 @@ add_llvm_component_library(LLVMTargetParser TargetParser.cpp Triple.cpp X86TargetParser.cpp + XtensaTargetParser.cpp ADDITIONAL_HEADER_DIRS Unix diff --git a/llvm/lib/TargetParser/XtensaTargetParser.cpp b/llvm/lib/TargetParser/XtensaTargetParser.cpp new file mode 100644 index 0000000000000..c3cc59ed84bc0 --- /dev/null +++ b/llvm/lib/TargetParser/XtensaTargetParser.cpp @@ -0,0 +1,93 @@ +//==-- XtensaTargetParser - Parser for Xtensa features ------------*- C++ -*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements a target parser to recognise Xtensa hardware features +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/TargetParser/XtensaTargetParser.h" + +namespace llvm { + +namespace Xtensa { +struct CPUInfo { + StringLiteral Name; + CPUKind Kind; + uint64_t Features; +}; + +struct FeatureName { + uint64_t ID; + const char *NameCStr; + size_t NameLength; + + StringRef getName() const { return StringRef(NameCStr, NameLength); } +}; + +const FeatureName XtensaFeatureNames[] = { +#define XTENSA_FEATURE(ID, NAME) {ID, "+" NAME, sizeof(NAME)}, +#include "llvm/TargetParser/XtensaTargetParser.def" +}; + +constexpr CPUInfo XtensaCPUInfo[] = { +#define XTENSA_CPU(ENUM, NAME, FEATURES) {NAME, CK_##ENUM, FEATURES}, +#include "llvm/TargetParser/XtensaTargetParser.def" +}; + +StringRef getBaseName(StringRef CPU){ + return llvm::StringSwitch(CPU) +#define XTENSA_CPU_ALIAS(NAME, ANAME) .Case(ANAME, NAME) +#include "llvm/TargetParser/XtensaTargetParser.def" + .Default(CPU); +} + +StringRef getAliasName(StringRef CPU){ + return llvm::StringSwitch(CPU) +#define XTENSA_CPU_ALIAS(NAME, ANAME) .Case(NAME, ANAME) +#include "llvm/TargetParser/XtensaTargetParser.def" + .Default(CPU); +} + +CPUKind parseCPUKind(StringRef CPU) { + CPU = getBaseName(CPU); + return llvm::StringSwitch(CPU) +#define XTENSA_CPU(ENUM, NAME, FEATURES) .Case(NAME, CK_##ENUM) +#include "llvm/TargetParser/XtensaTargetParser.def" + .Default(CK_INVALID); +} + +//Get all features for the CPU +void getCPUFeatures(StringRef CPU, SmallVectorImpl &Features) { + CPU = getBaseName(CPU); + auto I = llvm::find_if(XtensaCPUInfo, + [&](const CPUInfo &CI) { return CI.Name == CPU; }); + assert(I != std::end(XtensaCPUInfo) && "CPU not found!"); + uint64_t Bits = I->Features; + + for (const auto &F : XtensaFeatureNames) { + if ((Bits & F.ID) == F.ID) + Features.push_back(F.getName()); + } +} + +//Find all valid CPUs +void fillValidCPUList(SmallVectorImpl &Values) { + for (const auto &C : XtensaCPUInfo) { + if (C.Kind != CK_INVALID) { + Values.emplace_back(C.Name); + StringRef Name = getAliasName(C.Name); + if (Name != C.Name) + Values.emplace_back(Name); + } + } +} + +} // namespace Xtensa +} // namespace llvm diff --git a/llvm/utils/gn/secondary/llvm/lib/TargetParser/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/TargetParser/BUILD.gn index 31919badac7be..06408383f2253 100644 --- a/llvm/utils/gn/secondary/llvm/lib/TargetParser/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/TargetParser/BUILD.gn @@ -19,5 +19,6 @@ static_library("TargetParser") { "TargetParser.cpp", "Triple.cpp", "X86TargetParser.cpp", + "XtensaTargetParser.cpp", ] } From 03d4d99fcba1325fd8a0fca1850d78bc7fbc9f12 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:03:44 +0300 Subject: [PATCH 073/289] [Xtensa] Improve Xtensa multilib support in clang. Use GCCInstallationDetector in Xtensa toolchain instead of XtensaGCCToolchainDetector for initialization of the gcc environment. Add xtensa toolchain test tree with multilib subdirectories. --- clang/lib/Driver/ToolChains/Gnu.cpp | 46 ++++- clang/lib/Driver/ToolChains/Xtensa.cpp | 179 ++++++------------ clang/lib/Driver/ToolChains/Xtensa.h | 23 +-- .../bin/xtensa-esp32-elf-ld | 1 + .../lib/gcc/xtensa-esp32-elf/8.4.0/crtbegin.o | 0 .../lib/gcc/xtensa-esp32-elf/8.4.0/crtend.o | 0 .../8.4.0/esp32-psram/crtbegin.o | 0 .../8.4.0/esp32-psram/crtend.o | 0 .../8.4.0/esp32-psram/no-rtti/crtbegin.o | 0 .../8.4.0/esp32-psram/no-rtti/crtend.o | 0 .../xtensa-esp32-elf/8.4.0/no-rtti/crtbegin.o | 0 .../xtensa-esp32-elf/8.4.0/no-rtti/crtend.o | 0 .../xtensa-esp32-elf/lib/crt0.o | 0 .../xtensa-esp32-elf/lib/esp32-psram/crt0.o | 0 .../lib/esp32-psram/no-rtti/crt0.o | 0 .../xtensa-esp32-elf/lib/no-rtti/crt0.o | 0 clang/test/Driver/xtensa-toolchain.c | 42 ++++ 17 files changed, 150 insertions(+), 141 deletions(-) create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32-elf-ld create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/no-rtti/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/no-rtti/crt0.o create mode 100644 clang/test/Driver/xtensa-toolchain.c diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index 3961c8e7e35de..8530dc95ce9f1 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -1941,6 +1941,46 @@ static void findRISCVMultilibs(const Driver &D, Result.Multilibs = RISCVMultilibs; } +static void findXtensaMultilibs(const Driver &D, + const llvm::Triple &TargetTriple, StringRef Path, + const ArgList &Args, DetectedMultilibs &Result) { + + MultilibSet XtensaMultilibs = MultilibSet(); + bool IsESP32 = Args.getLastArgValue(options::OPT_mcpu_EQ, "esp32") == "esp32"; + + XtensaMultilibs.push_back(Multilib()); + if (IsESP32) + XtensaMultilibs.push_back(MultilibBuilder("esp32-psram", {}, {}) + .flag("-mfix-esp32-psram-cache-issue") + .makeMultilib()); + + XtensaMultilibs.push_back(MultilibBuilder("no-rtti", {}, {}) + .flag("-frtti", /*Disallow=*/true) + .flag("-fno-rtti") + .makeMultilib()); + + if (IsESP32) + XtensaMultilibs.push_back(MultilibBuilder("esp32-psram/no-rtti", {}, {}) + .flag("-fno-rtti") + .flag("-frtti", /*Disallow=*/true) + .flag("-mfix-esp32-psram-cache-issue") + .makeMultilib()); + + Multilib::flags_list Flags; + addMultilibFlag( + Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, false), "frtti", + Flags); + + if (IsESP32) + addMultilibFlag(Args.hasFlag(options::OPT_mfix_esp32_psram_cache_issue, + options::OPT_mfix_esp32_psram_cache_issue, + false), + "mfix-esp32-psram-cache-issue", Flags); + + if (XtensaMultilibs.select(Flags, Result.SelectedMultilibs)) + Result.Multilibs = XtensaMultilibs; +} + static bool findBiarchMultilibs(const Driver &D, const llvm::Triple &TargetTriple, StringRef Path, const ArgList &Args, @@ -2577,7 +2617,9 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( "s390x-redhat-linux"}; static const char *const XtensaLibDirs[] = {"/lib"}; - static const char *const XtensaTriples[] = {"xtensa-unknown-elf"}; + static const char *const XtensaTriples[] = { + "xtensa-unknown-elf", "xtensa-esp32-elf", "xtensa-esp32s2-elf", + "xtensa-esp32s3-elf"}; using std::begin; using std::end; @@ -2888,6 +2930,8 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs( findMSP430Multilibs(D, TargetTriple, Path, Args, Detected); } else if (TargetArch == llvm::Triple::avr) { // AVR has no multilibs. + } else if (TargetArch == llvm::Triple::xtensa) { + findXtensaMultilibs(D, TargetTriple, Path, Args, Detected); } else if (!findBiarchMultilibs(D, TargetTriple, Path, Args, NeedsBiarchSuffix, Detected)) { return false; diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index c65caee7d6db4..6f076d061d645 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -10,7 +10,6 @@ #include "Xtensa.h" #include "CommonArgs.h" -#include "clang/Driver/InputInfo.h" #include "clang/Basic/Cuda.h" #include "clang/Config/config.h" #include "clang/Driver/Compilation.h" @@ -33,58 +32,26 @@ using namespace llvm::opt; using tools::addMultilibFlag; -XtensaGCCToolchainDetector::XtensaGCCToolchainDetector( - const Driver &D, const llvm::Triple &HostTriple, - const llvm::opt::ArgList &Args) { - std::string InstalledDir; - InstalledDir = D.Dir; - StringRef CPUName = XtensaToolChain::GetTargetCPUVersion(Args); - std::string Dir; - std::string ToolchainName; - std::string ToolchainDir; - - if (CPUName == "esp32") - ToolchainName = "xtensa-esp32-elf"; - else if (CPUName == "esp32-s2" || CPUName =="esp32s2") - ToolchainName = "xtensa-esp32s2-elf"; - else if (CPUName == "esp32-s3" || CPUName == "esp32s3") - ToolchainName = "xtensa-esp32s3-elf"; - else if (CPUName == "esp8266") - ToolchainName = "xtensa-lx106-elf"; - - Slash = llvm::sys::path::get_separator().str(); - - ToolchainDir = InstalledDir + Slash + ".."; - Dir = ToolchainDir + Slash + "lib" + Slash + "gcc" + Slash + ToolchainName + - Slash; - GCCLibAndIncVersion = ""; - - if (D.getVFS().exists(Dir)) { - std::error_code EC; - for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(Dir, EC), LE; - !EC && LI != LE; LI = LI.increment(EC)) { - StringRef VersionText = llvm::sys::path::filename(LI->path()); - auto GCCVersion = Generic_GCC::GCCVersion::Parse(VersionText); - if (GCCVersion.Major == -1) - continue; - GCCLibAndIncVersion = GCCVersion.Text; - } - if (GCCLibAndIncVersion == "") - llvm_unreachable("Unexpected Xtensa GCC toolchain version"); - - } else { - // Unable to find Xtensa GCC toolchain; - GCCToolchainName = ""; - return; - } - GCCToolchainDir = ToolchainDir; - GCCToolchainName = ToolchainName; -} - /// Xtensa Toolchain XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, const ArgList &Args) - : Generic_ELF(D, Triple, Args), XtensaGCCToolchain(D, getTriple(), Args) { + : Generic_ELF(D, Triple, Args) { + + GCCInstallation.init(Triple, Args); + + if (!GCCInstallation.isValid()) { + llvm_unreachable("Unexpected Xtensa GCC toolchain version"); + } + + Multilibs = GCCInstallation.getMultilibs(); + SelectedMultilibs.assign({GCCInstallation.getMultilib()}); + + GCCLibAndIncVersion = GCCInstallation.getVersion().Text; + GCCToolchainName = GCCInstallation.getTriple().str(); + SmallString<128> Path(GCCInstallation.getParentLibPath()); + llvm::sys::path::append(Path, ".."); + GCCToolchainDir = Path.c_str(); + for (auto *A : Args) { std::string Str = A->getAsString(Args); if (!Str.compare("-mlongcalls")) @@ -111,50 +78,18 @@ XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, IsIntegratedAsm = false; } - bool IsESP32 = XtensaToolChain::GetTargetCPUVersion(Args) == "esp32"; - Multilibs.push_back(Multilib()); - - if (IsESP32) - Multilibs.push_back(MultilibBuilder("esp32-psram", {}, {}) - .flag("-mfix-esp32-psram-cache-issue") - .makeMultilib()); - - Multilibs.push_back(MultilibBuilder("no-rtti", {}, {}) - .flag("-frtti", /*Disallow=*/true) - .flag("-fno-rtti") - .makeMultilib()); - - if (IsESP32) - Multilibs.push_back(MultilibBuilder("esp32-psram/no-rtti", {}, {}) - .flag("-fno-rtti") - .flag("-frtti", /*Disallow=*/true) - .flag("-mfix-esp32-psram-cache-issue") - .makeMultilib()); - - Multilib::flags_list Flags; - addMultilibFlag( - Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, false), "frtti", - Flags); - - if (IsESP32) - addMultilibFlag(Args.hasFlag(options::OPT_mfix_esp32_psram_cache_issue, - options::OPT_mfix_esp32_psram_cache_issue, - false), - "mfix-esp32-psram-cache-issue", Flags); - - Multilibs.select(Flags, SelectedMultilibs); - - const std::string Slash = XtensaGCCToolchain.Slash; - std::string Libs = - XtensaGCCToolchain.GCCToolchainDir + Slash + "lib" + Slash + "gcc" + - Slash + XtensaGCCToolchain.GCCToolchainName + Slash + - XtensaGCCToolchain.GCCLibAndIncVersion + SelectedMultilibs.back().gccSuffix(); - getFilePaths().push_back(Libs); - - Libs = XtensaGCCToolchain.GCCToolchainDir + Slash + - XtensaGCCToolchain.GCCToolchainName + Slash + "lib" + - SelectedMultilibs.back().gccSuffix(); - getFilePaths().push_back(Libs); + SmallString<128> Libs1(GCCToolchainDir); + llvm::sys::path::append(Libs1, "lib", "gcc", GCCToolchainName, + GCCLibAndIncVersion); + if (!SelectedMultilibs.back().gccSuffix().empty()) + llvm::sys::path::append(Libs1, SelectedMultilibs.back().gccSuffix()); + getFilePaths().push_back(Libs1.c_str()); + + SmallString<128> Libs2(GCCToolchainDir); + llvm::sys::path::append(Libs2, GCCToolchainName, "lib"); + if (!SelectedMultilibs.back().gccSuffix().empty()) + llvm::sys::path::append(Libs2, SelectedMultilibs.back().gccSuffix()); + getFilePaths().push_back(Libs2.c_str()); } Tool *XtensaToolChain::buildLinker() const { @@ -171,17 +106,15 @@ void XtensaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, DriverArgs.hasArg(options::OPT_nostdlibinc)) return; - if (!XtensaGCCToolchain.IsValid()) + if (!GCCInstallation.isValid()) return; - std::string Slash = XtensaGCCToolchain.Slash; - - std::string Path1 = getDriver().ResourceDir.c_str() + Slash + "include"; - std::string Path2 = XtensaGCCToolchain.GCCToolchainDir + Slash + - XtensaGCCToolchain.GCCToolchainName + Slash + - "sys-include"; - std::string Path3 = XtensaGCCToolchain.GCCToolchainDir + Slash + - XtensaGCCToolchain.GCCToolchainName + Slash + "include"; + SmallString<128> Path1(getDriver().ResourceDir); + llvm::sys::path::append(Path1, "include"); + SmallString<128> Path2(GCCToolchainDir); + llvm::sys::path::append(Path2, GCCToolchainName, "sys-include"); + SmallString<128> Path3(GCCToolchainDir); + llvm::sys::path::append(Path3, GCCToolchainName, "include"); const StringRef Paths[] = {Path1, Path2, Path3}; addSystemIncludes(DriverArgs, CC1Args, Paths); @@ -190,20 +123,20 @@ void XtensaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, void XtensaToolChain::addLibStdCxxIncludePaths( const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args) const { - if (!XtensaGCCToolchain.IsValid()) + if (!GCCInstallation.isValid()) return; - std::string Slash = XtensaGCCToolchain.Slash; + SmallString<128> BaseDir(GCCToolchainDir); + llvm::sys::path::append(BaseDir, GCCToolchainName, "include", "c++", + GCCLibAndIncVersion); + SmallString<128> TargetDir(BaseDir); + llvm::sys::path::append(TargetDir, GCCToolchainName); + SmallString<128> TargetDirBackward(BaseDir); + llvm::sys::path::append(TargetDirBackward, "backward"); - std::string BaseDir = XtensaGCCToolchain.GCCToolchainDir + Slash + - XtensaGCCToolchain.GCCToolchainName + Slash + - "include" + Slash + "c++" + Slash + - XtensaGCCToolchain.GCCLibAndIncVersion; - std::string TargetDir = BaseDir + Slash + XtensaGCCToolchain.GCCToolchainName; addLibStdCXXIncludePaths(BaseDir, "", "", DriverArgs, CC1Args); addLibStdCXXIncludePaths(TargetDir, "", "", DriverArgs, CC1Args); - TargetDir = BaseDir + Slash + "backward"; - addLibStdCXXIncludePaths(TargetDir, "", "", DriverArgs, CC1Args); + addLibStdCXXIncludePaths(TargetDirBackward, "", "", DriverArgs, CC1Args); } ToolChain::CXXStdlibType @@ -235,7 +168,7 @@ void tools::xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const auto &TC = static_cast(getToolChain()); - if (!TC.XtensaGCCToolchain.IsValid()) + if (TC.GCCToolchainName == "") llvm_unreachable("Unable to find Xtensa GCC assembler"); claimNoWarnArgs(Args); @@ -262,13 +195,13 @@ void tools::xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, for (const auto &II : Inputs) CmdArgs.push_back(II.getFilename()); - std::string Slash = TC.XtensaGCCToolchain.Slash; + SmallString<128> Asm(TC.GCCToolchainDir); + llvm::sys::path::append(Asm, "bin", + TC.GCCToolchainName + "-" + getShortName()); - const char *Asm = - Args.MakeArgString(getToolChain().getDriver().Dir + Slash + - TC.XtensaGCCToolchain.GCCToolchainName + "-as"); - C.addCommand(std::make_unique( - JA, *this, ResponseFileSupport::AtFileCurCP(), Asm, CmdArgs, Inputs)); + C.addCommand( + std::make_unique(JA, *this, ResponseFileSupport::AtFileCurCP(), + Args.MakeArgString(Asm), CmdArgs, Inputs)); } void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, @@ -278,13 +211,13 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, const char *LinkingOutput) const { const auto &TC = static_cast(getToolChain()); - std::string Slash = TC.XtensaGCCToolchain.Slash; - if (!TC.XtensaGCCToolchain.IsValid()) + if (TC.GCCToolchainName == "") llvm_unreachable("Unable to find Xtensa GCC linker"); - std::string Linker = getToolChain().getDriver().Dir + Slash + - TC.XtensaGCCToolchain.GCCToolchainName + "-ld"; + SmallString<128> Linker(TC.GCCToolchainDir); + llvm::sys::path::append(Linker, "bin", + TC.GCCToolchainName + "-" + getShortName()); ArgStringList CmdArgs; Args.AddAllArgs(CmdArgs, options::OPT_L); diff --git a/clang/lib/Driver/ToolChains/Xtensa.h b/clang/lib/Driver/ToolChains/Xtensa.h index f040a7d333c91..10fc87c90bc6a 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.h +++ b/clang/lib/Driver/ToolChains/Xtensa.h @@ -20,19 +20,6 @@ namespace clang { namespace driver { namespace toolchains { -class XtensaGCCToolchainDetector { -public: - std::string GCCLibAndIncVersion; - std::string GCCToolchainName; - std::string GCCToolchainDir; - std::string Slash; - - XtensaGCCToolchainDetector(const Driver &D, const llvm::Triple &HostTriple, - const llvm::opt::ArgList &Args); - - bool IsValid() const { return GCCToolchainName != ""; } -}; - class LLVM_LIBRARY_VISIBILITY XtensaToolChain : public Generic_ELF { protected: Tool *buildLinker() const override; @@ -49,13 +36,15 @@ class LLVM_LIBRARY_VISIBILITY XtensaToolChain : public Generic_ELF { llvm::opt::ArgStringList &CC1Args) const override; CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override; bool IsIntegratedAssemblerDefault() const override { - return (IsIntegratedAsm || (XtensaGCCToolchain.GCCToolchainName == "")); + return (IsIntegratedAsm || (GCCToolchainName == "")); } static const StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args); - XtensaGCCToolchainDetector XtensaGCCToolchain; bool IsIntegratedAsm = true; + std::string GCCLibAndIncVersion = ""; + std::string GCCToolchainName = ""; + std::string GCCToolchainDir = ""; }; } // end namespace toolchains @@ -65,7 +54,7 @@ namespace xtensa { class LLVM_LIBRARY_VISIBILITY Linker : public Tool { public: Linker(const ToolChain &TC) - : Tool("Xtensa::Linker", "xtensa-esp32-elf-ld", TC) {} + : Tool("Xtensa::Linker", "ld", TC) {} bool hasIntegratedCPP() const override { return false; } bool isLinkJob() const override { return true; } void ConstructJob(Compilation &C, const JobAction &JA, @@ -77,7 +66,7 @@ class LLVM_LIBRARY_VISIBILITY Linker : public Tool { class LLVM_LIBRARY_VISIBILITY Assembler : public Tool { public: Assembler(const ToolChain &TC) - : Tool("Xtensa::Assembler", "xtensa-esp32-elf-as", TC) {} + : Tool("Xtensa::Assembler", "as", TC) {} bool hasIntegratedCPP() const override { return false; } void ConstructJob(Compilation &C, const JobAction &JA, diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32-elf-ld b/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32-elf-ld new file mode 100644 index 0000000000000..b23e55619b2ff --- /dev/null +++ b/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32-elf-ld @@ -0,0 +1 @@ +#!/bin/true diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/no-rtti/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/no-rtti/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/xtensa-toolchain.c b/clang/test/Driver/xtensa-toolchain.c new file mode 100644 index 0000000000000..245e09902f0f8 --- /dev/null +++ b/clang/test/Driver/xtensa-toolchain.c @@ -0,0 +1,42 @@ +// A basic clang -cc1 command-line, and simple environment check. + +// RUN: %clang %s -### -no-canonical-prefixes -target xtensa \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: | FileCheck -check-prefix=CC1 %s +// CC1: clang{{.*}} "-cc1" "-triple" "xtensa" + +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL %s + +// C-XTENSA-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" +// C-XTENSA-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" + +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -frtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL-RTTI %s + +// C-XTENSA-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" +// C-XTENSA-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" + +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -mfix-esp32-psram-cache-issue 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL-PSRAM %s + +// C-XTENSA-BAREMETAL-PSRAM: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-BAREMETAL-PSRAM: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/esp32-psram/no-rtti" +// C-XTENSA-BAREMETAL-PSRAM: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/esp32-psram/no-rtti" + +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -mfix-esp32-psram-cache-issue -frtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL-PSRAM-RTTI %s + +// C-XTENSA-BAREMETAL-PSRAM-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-BAREMETAL-PSRAM-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/esp32-psram" +// C-XTENSA-BAREMETAL-PSRAM-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/esp32-psram" From 33749f2a02eeb44b75dc69ad21dc83c0ccccf9c8 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:04:36 +0300 Subject: [PATCH 074/289] [Xtensa] Add '--rtlib' option support for ESP Xtensa toolchain --- clang/lib/Driver/ToolChains/Xtensa.cpp | 62 ++++++++++++++++++++------ clang/lib/Driver/ToolChains/Xtensa.h | 2 + 2 files changed, 50 insertions(+), 14 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 6f076d061d645..5a0cf5492fcb7 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -152,6 +152,11 @@ XtensaToolChain::GetCXXStdlibType(const ArgList &Args) const { return ToolChain::CST_Libstdcxx; } +ToolChain::UnwindLibType +XtensaToolChain::GetUnwindLibType(const llvm::opt::ArgList &Args) const { + return ToolChain::UNW_None; +} + const StringRef XtensaToolChain::GetTargetCPUVersion(const ArgList &Args) { if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) { StringRef CPUName = A->getValue(); @@ -209,33 +214,62 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, const InputInfoList &Inputs, const ArgList &Args, const char *LinkingOutput) const { - const auto &TC = - static_cast(getToolChain()); + ArgStringList CmdArgs; + bool WantCRTs = + !Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles); + const auto &ToolChain = + static_cast(getToolChain()); - if (TC.GCCToolchainName == "") + if (ToolChain.GCCToolchainName == "") llvm_unreachable("Unable to find Xtensa GCC linker"); - SmallString<128> Linker(TC.GCCToolchainDir); + SmallString<128> Linker(ToolChain.GCCToolchainDir); llvm::sys::path::append(Linker, "bin", - TC.GCCToolchainName + "-" + getShortName()); - ArgStringList CmdArgs; + ToolChain.GCCToolchainName + "-" + getShortName()); - Args.AddAllArgs(CmdArgs, options::OPT_L); - TC.AddFilePathLibArgs(Args, CmdArgs); + const char *crtbegin, *crtend; + auto RuntimeLib = ToolChain.GetRuntimeLibType(Args); + if (RuntimeLib == ToolChain::RLT_Libgcc) { + crtbegin = "crtbegin.o"; + crtend = "crtend.o"; + } else { + assert (RuntimeLib == ToolChain::RLT_CompilerRT); + crtbegin = ToolChain.getCompilerRTArgString(Args, "crtbegin", + ToolChain::FT_Object); + crtend = ToolChain.getCompilerRTArgString(Args, "crtend", + ToolChain::FT_Object); + } + if (WantCRTs) { + // TODO: The crt0.o is not used for esp targets, but maybe used in + // future for other vendors + //CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin))); + } + + AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA); + + Args.AddAllArgs(CmdArgs, options::OPT_L); + ToolChain.AddFilePathLibArgs(Args, CmdArgs); Args.addAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_e, options::OPT_s, options::OPT_t, options::OPT_u_Group}); + + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nodefaultlibs)) { + if (ToolChain.ShouldLinkCXXStdlib(Args)) + ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs); + AddRunTimeLibs(ToolChain, ToolChain.getDriver(), CmdArgs, Args); + } - AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA); - - CmdArgs.push_back("-lgcc"); + if (WantCRTs) + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend))); CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); - C.addCommand( - std::make_unique(JA, *this, ResponseFileSupport::AtFileCurCP(), - Args.MakeArgString(Linker), CmdArgs, Inputs)); + C.addCommand(std::make_unique( + JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker), + CmdArgs, Inputs)); } // Get features by CPU name diff --git a/clang/lib/Driver/ToolChains/Xtensa.h b/clang/lib/Driver/ToolChains/Xtensa.h index 10fc87c90bc6a..38f8f1b2c8d5f 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.h +++ b/clang/lib/Driver/ToolChains/Xtensa.h @@ -35,6 +35,8 @@ class LLVM_LIBRARY_VISIBILITY XtensaToolChain : public Generic_ELF { addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args) const override; CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override; + UnwindLibType GetUnwindLibType(const llvm::opt::ArgList &Args) const override; + bool IsIntegratedAssemblerDefault() const override { return (IsIntegratedAsm || (GCCToolchainName == "")); } From 558537b75185cc516e670def8de85bb25325d08c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:05:41 +0300 Subject: [PATCH 075/289] [Xtensa]: Add '-fuse-ld' option support to ESP Xtensa toolchain --- clang/lib/Driver/ToolChains/Xtensa.cpp | 11 ++++++++--- clang/test/Driver/xtensa-toolchain.c | 8 ++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 5a0cf5492fcb7..407e79b74e58c 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -215,6 +215,7 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, const ArgList &Args, const char *LinkingOutput) const { ArgStringList CmdArgs; + SmallString<128> Linker; bool WantCRTs = !Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles); const auto &ToolChain = @@ -223,9 +224,13 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, if (ToolChain.GCCToolchainName == "") llvm_unreachable("Unable to find Xtensa GCC linker"); - SmallString<128> Linker(ToolChain.GCCToolchainDir); - llvm::sys::path::append(Linker, "bin", - ToolChain.GCCToolchainName + "-" + getShortName()); + if (Args.hasArg(options::OPT_fuse_ld_EQ)) { + Linker.assign(ToolChain.GetLinkerPath()); + } else { + Linker.assign(ToolChain.GCCToolchainDir); + llvm::sys::path::append(Linker, "bin", + ToolChain.GCCToolchainName + "-" + getShortName()); + } const char *crtbegin, *crtend; auto RuntimeLib = ToolChain.GetRuntimeLibType(Args); diff --git a/clang/test/Driver/xtensa-toolchain.c b/clang/test/Driver/xtensa-toolchain.c index 245e09902f0f8..0570c1d0f0b57 100644 --- a/clang/test/Driver/xtensa-toolchain.c +++ b/clang/test/Driver/xtensa-toolchain.c @@ -5,7 +5,7 @@ // RUN: | FileCheck -check-prefix=CC1 %s // CC1: clang{{.*}} "-cc1" "-triple" "xtensa" -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: %clang %s -### -no-canonical-prefixes \ // RUN: -target xtensa --rtlib=platform \ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL %s @@ -14,7 +14,7 @@ // C-XTENSA-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" // C-XTENSA-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: %clang %s -### -no-canonical-prefixes \ // RUN: -target xtensa --rtlib=platform \ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -frtti 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL-RTTI %s @@ -23,7 +23,7 @@ // C-XTENSA-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" // C-XTENSA-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: %clang %s -### -no-canonical-prefixes \ // RUN: -target xtensa --rtlib=platform \ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -mfix-esp32-psram-cache-issue 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL-PSRAM %s @@ -32,7 +32,7 @@ // C-XTENSA-BAREMETAL-PSRAM: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/esp32-psram/no-rtti" // C-XTENSA-BAREMETAL-PSRAM: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/esp32-psram/no-rtti" -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: %clang %s -### -no-canonical-prefixes \ // RUN: -target xtensa --rtlib=platform \ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -mfix-esp32-psram-cache-issue -frtti 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL-PSRAM-RTTI %s From 199fb2ab29549c66c306ac31823eab3fe7c6c446 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:22:13 +0300 Subject: [PATCH 076/289] [Xtensa] Use B0 register for FP cmp operations. The virtual bool registers allocation from BR class may cause situation when we need to spill such 1-bit registers, this would cause performance degradation due to load/store operations of the 32-bit BR register. The performance improvement from using virtual bool registers is not significant. So, just use only B0 register for FP compare operations. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index e40b5b368a465..c3f71b524c3f2 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -2004,14 +2004,15 @@ XtensaTargetLowering::emitSelectCC(MachineInstr &MI, (MI.getOpcode() == Xtensa::SELECT_CC_FP_INT)) { int BrKind = 0; int CmpKind = 0; - MachineRegisterInfo &RegInfo = F->getRegInfo(); - const TargetRegisterClass *RC = &Xtensa::BRRegClass; - unsigned b = RegInfo.createVirtualRegister(RC); + unsigned b = Xtensa::B0; + GetFPBranchKind(Cond, BrKind, CmpKind); BuildMI(MBB, DL, TII.get(CmpKind), b) .addReg(LHS.getReg()) .addReg(RHS.getReg()); - BuildMI(MBB, DL, TII.get(BrKind)).addReg(b, RegState::Kill).addMBB(SinkMBB); + BuildMI(MBB, DL, TII.get(BrKind)) + .addReg(b, RegState::Kill) + .addMBB(SinkMBB); } else { BuildMI(MBB, DL, TII.get(Cond)) .addReg(LHS.getReg()) @@ -3051,16 +3052,15 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( MachineBasicBlock *TargetBB = MI.getOperand(3).getMBB(); int BrKind = 0; int CmpKind = 0; - MachineFunction *MF = MBB->getParent(); - MachineRegisterInfo &RegInfo = MF->getRegInfo(); - const TargetRegisterClass *RC = &Xtensa::BRRegClass; + unsigned RegB = Xtensa::B0; - unsigned RegB = RegInfo.createVirtualRegister(RC); GetFPBranchKind(Cond.getImm(), BrKind, CmpKind); BuildMI(*MBB, MI, DL, TII.get(CmpKind), RegB) .addReg(LHS.getReg()) .addReg(RHS.getReg()); - BuildMI(*MBB, MI, DL, TII.get(BrKind)).addReg(RegB).addMBB(TargetBB); + BuildMI(*MBB, MI, DL, TII.get(BrKind)) + .addReg(RegB, RegState::Kill) + .addMBB(TargetBB); MI.eraseFromParent(); return MBB; From 0ef0258e2bedbe058c60c392e16a50f528b2edf2 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:23:01 +0300 Subject: [PATCH 077/289] ci: add .gitlab-ci.yml to support CI/CD --- .gitlab-ci.yml | 195 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 .gitlab-ci.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000000..94f59533db5ad --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,195 @@ +stages: + - build + - private_deploy + - test + - public_deploy + +image: ${CI_DOCKER_REGISTRY}/llvm-build:3 + +variables: + + CONF_TARGET: "xtensa-esp32-elf" + XTENSA_CLANG_TOOLCHAIN: "${CONF_TARGET}-clang" + PLATFORM_NAME_LINUX: "linux-amd64" + PLATFORM_NAME_WIN: "win64" + PLATFORM_NAME_MACOS: "macos" + + XTENSA_CLANG_TOOLCHAIN_BRANCH: "esp-20220415-r14.0.0" + GCC_REL_NAME: "gcc8_4_0-esp-2021r2-patch3" + + ARCHIVE_TOOL_LINUX: "tar -cJf" + UNARCHIVE_TOOL_LINUX: "tar -xf" + ARCHIVE_EXT_LINUX: "tar.xz" + + ARCHIVE_TOOL_WIN: "zip -9 -r" + UNARCHIVE_TOOL_WIN: "unzip" + ARCHIVE_EXT_WIN: "zip" + + ARCHIVE_TOOL_MACOS: "tar -cJf" + UNARCHIVE_TOOL_MACOS: "tar -xf" + ARCHIVE_EXT_MACOS: "tar.xz" + + DIST_DIR: "dist" + +.use_ci_tools: &use_ci_tools | + curl -sSL ${CIT_LOADER_URL} -o cit_loader.sh && sh cit_loader.sh + source citools/import_functions + +.add_gitlab_key: &add_gitlab_key | + cit_add_ssh_key "${GITLAB_KEY}" + +before_script: + - *use_ci_tools + - *add_gitlab_key + +# Prepare release name/number +.get_release_name: &get_release_name | + # using annotated tags + REL_NUM=$(git describe --abbrev=7) + REL_SFX="llvm14_0_0" + REL_NAME=${CONF_TARGET}-${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} + ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} + echo "CONF_TARGET: $CONF_TARGET" + echo "PLATFORM_NAME: $PLATFORM_NAME" + echo "REL_NUM: $REL_NUM" + echo "REL_NAME: $REL_NAME" + echo "ARCHIVE_NAME: $ARCHIVE_NAME" + +# Get an existing crosstool-ng build for esp32 +.get_gcc_toolchain: &get_gcc_toolchain | + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-2021r2-patch3/${XTENSA_GCC_TOOLCHAIN} + ${UNARCHIVE_TOOL} ${XTENSA_GCC_TOOLCHAIN} + mv xtensa-esp32-elf ${XTENSA_CLANG_TOOLCHAIN} + +.get_clang_toolchain_build_scripts: &get_clang_toolchain_build_scripts | + git clone -b ${XTENSA_CLANG_TOOLCHAIN_BRANCH} ${GITLAB_SSH_SERVER}/${XTENSA_CLANG_TOOLCHAIN_REPO} + cp -r xtensa-clang-toolchain/* . + +# LLVM Build System used the remote address to show detailed version info, we'll change it to the public repository +.fix_origin_remote_for_public: &fix_origin_remote_for_public | + git remote set-url origin "${GH_REPO_HTTPS}" + + # Pack the toolchain +.package_toolchain: &package_toolchain | + ${ARCHIVE_TOOL} ${ARCHIVE_NAME} ${XTENSA_CLANG_TOOLCHAIN}/ + mkdir -p ${DIST_DIR} + mv ${ARCHIVE_NAME} ${DIST_DIR}/ + echo "${ARCHIVE_NAME}" > ${DIST_DIR}/file_${PLATFORM_NAME}_${CONF_TARGET} + +.build_template: + stage: build + tags: [ "amd64", "build" ] + artifacts: + paths: + - ${DIST_DIR}/ + when: always + expire_in: 10 day + script: + - *get_release_name + - *get_gcc_toolchain + - *fix_origin_remote_for_public + - *get_clang_toolchain_build_scripts + - ${BUILD_TOOLCHAIN_CMD} "${XTENSA_CLANG_TOOLCHAIN}" + - *package_toolchain + +linux_amd64_build: + extends: .build_template + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + # a filename was moved here from the global 'variables:' because of GCC_REL_NAME value couldn't be expanded and substituted there + XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-linux.sh" + +win64_build: + extends: .build_template + variables: + PLATFORM_NAME: "${PLATFORM_NAME_WIN}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_WIN}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" + ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" + XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-win64.zip" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" + +macos_amd64_build: + extends: .build_template + variables: + PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" + ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-macos.tar.gz" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-macos.sh" + +linux_amd64_testsuite: + stage: test + tags: [ "amd64", "build" ] + dependencies: + - linux_amd64_build + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + script: + - *get_release_name + - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} + + # getting testsuit + - git clone -b feature/ci_llvm_multitarget --depth 1 $GITLAB_SSH_SERVER/idf/llvm-xtensa-testsuite.git + + # preparing testsuit + - export PATH=$PATH:${PWD}/${XTENSA_CLANG_TOOLCHAIN}/bin/ + - cd llvm-xtensa-testsuite + + # qemu + - ./qemu_esp32_install.sh + + # run testsuite for esp32 + - ./run_esp32_tests.sh + +upload_to_http: + stage: private_deploy + when: manual + allow_failure: true + tags: [ "deploy", "shiny" ] + variables: + # force the fetch strategy to clean old archives up in dist/ dir + GIT_STRATEGY: fetch + before_script: + - *use_ci_tools + script: + - cit_add_ssh_key "${HTTP_UPLOAD_KEY}" + # List of archives + - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) + - cd ${DIST_DIR} + - scp ${FILES} ${HTTP_UPLOAD_DIR}/ct-ng/llvm-builds + # Show info + - echo -e "\nArchives were published there:\n\n$(for n in ${FILES}; do echo "${HTTP_PUBLIC_DIR}/ct-ng/llvm-builds/${n}"; done)\n" + +upload_to_github: + stage: public_deploy + when: manual + allow_failure: true + only: + - tags + tags: [ "amd64", "internet" ] + image: espressif/github-hub:2 + variables: + GIT_STRATEGY: fetch + GITHUB_TOKEN: "${GH_TOKEN}" + GITHUB_REPO: "${GH_REPO_HTTPS}" + TAG: "${CI_COMMIT_TAG}" + before_script: [] + script: + - ls -l dist*/ + - git remote add github ${GH_REPO_HTTPS} + - hub release show ${TAG} || { echo "Please create a release on GitHub with ${TAG} tag at first"; exit 1; } + # List of archives + - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) + - cd ${DIST_DIR} + - ls -l $FILES + # Upload archives + - for n in ${FILES}; do hub release edit -m "" -a "${n}" "${TAG}"; done From 774e48348aa80f3a147bd164630b9dd659b51fcc Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:28:36 +0300 Subject: [PATCH 078/289] [Xtensa] Fix inline asm Fix inline asm printing of the memory operands. --- .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 9 ------- .../Xtensa/MCTargetDesc/XtensaInstPrinter.h | 3 --- llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp | 27 +++++++++++-------- llvm/lib/Target/Xtensa/XtensaAsmPrinter.h | 2 -- llvm/test/CodeGen/Xtensa/inline-asm.ll | 20 ++++++++++++++ 5 files changed, 36 insertions(+), 25 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/inline-asm.ll diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index 0960c73dba937..0a0d298ad267a 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -27,15 +27,6 @@ using namespace llvm; #include "XtensaGenAsmWriter.inc" -void XtensaInstPrinter::printAddress(unsigned Base, int64_t Disp, - raw_ostream &O) { - O << Disp; - if (Base) { - O << '('; - O << getRegisterName(Base) << ')'; - } -} - static void printExpr(const MCExpr *Expr, raw_ostream &OS) { int Offset = 0; const MCSymbolRefExpr *SRE; diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index b103fb5dc9e01..3e8c752bc4426 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -35,9 +35,6 @@ class XtensaInstPrinter : public MCInstPrinter { // Print the given operand. static void printOperand(const MCOperand &MO, raw_ostream &O); - // Print an address - static void printAddress(unsigned Base, int64_t Disp, raw_ostream &O); - // Override MCInstPrinter. void printRegName(raw_ostream &O, MCRegister Reg) const override; void printInst(const MCInst *MI, uint64_t Address, StringRef Annot, diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp index 57bb8dd43317c..a3dbb40924d70 100644 --- a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp @@ -199,18 +199,23 @@ bool XtensaAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS) { - XtensaInstPrinter::printAddress(MI->getOperand(OpNo).getReg(), - MI->getOperand(OpNo + 1).getImm(), OS); - return false; -} + if (ExtraCode && ExtraCode[0]) + return true; // Unknown modifier. + + assert(OpNo + 1 < MI->getNumOperands() && "Insufficient operands"); + + const MachineOperand &Base = MI->getOperand(OpNo); + const MachineOperand &Offset = MI->getOperand(OpNo + 1); -void XtensaAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum, - raw_ostream &OS) { - OS << '%' - << XtensaInstPrinter::getRegisterName(MI->getOperand(opNum).getReg()); - OS << "("; - OS << MI->getOperand(opNum + 1).getImm(); - OS << ")"; + assert(Base.isReg() && + "Unexpected base pointer for inline asm memory operand."); + assert(Offset.isImm() && "Unexpected offset for inline asm memory operand."); + + OS << XtensaInstPrinter::getRegisterName(Base.getReg()); + OS << ", "; + OS << Offset.getImm(); + + return false; } MCSymbol * diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h index f7236a39fe6da..1137309cd9a45 100644 --- a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h @@ -50,8 +50,6 @@ class LLVM_LIBRARY_VISIBILITY XtensaAsmPrinter : public AsmPrinter { bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS) override; - void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &OS); - MCSymbol *GetConstantPoolIndexSymbol(const MachineOperand &MO) const; MCSymbol *GetJumpTableSymbol(const MachineOperand &MO) const; diff --git a/llvm/test/CodeGen/Xtensa/inline-asm.ll b/llvm/test/CodeGen/Xtensa/inline-asm.ll new file mode 100644 index 0000000000000..7a267d5145981 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/inline-asm.ll @@ -0,0 +1,20 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=Xtensa %s + + +define dso_local i32 @test_memptr(i32 noundef %0) local_unnamed_addr #0 { +; Xtensa-LABEL: test_memptr: +; Xtensa: # %bb.0: +; Xtensa-NEXT: entry a1, 32 +; Xtensa-NEXT: #APP +; Xtensa-NEXT: l32i a2, a2, 0 +; Xtensa-NEXT: #NO_APP +; Xtensa-NEXT: retw + %2 = inttoptr i32 %0 to i32* + %3 = call i32 asm sideeffect "l32i $0, $1", "=r,*m"(i32* elementtype(i32) %2) + ret i32 %3 +} + +attributes #0 = { nounwind } +attributes #1 = { nounwind } + From 9487fa278e7b0ce6b7cfb3bb3fb1a7b4a757e4fc Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:29:47 +0300 Subject: [PATCH 079/289] [Xtensa]: Fix handling of empty '-fuse-ld' option for ESP toolchain --- clang/lib/Driver/ToolChains/Xtensa.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 407e79b74e58c..5a0ef9ba707bc 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -224,7 +224,7 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, if (ToolChain.GCCToolchainName == "") llvm_unreachable("Unable to find Xtensa GCC linker"); - if (Args.hasArg(options::OPT_fuse_ld_EQ)) { + if (!Args.getLastArgValue(options::OPT_fuse_ld_EQ).empty()) { Linker.assign(ToolChain.GetLinkerPath()); } else { Linker.assign(ToolChain.GCCToolchainDir); From b092ffc46e5b9a6f11d61c3c8662b7c77c04f244 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:30:15 +0300 Subject: [PATCH 080/289] esp: Adds support for vendor 'Espressif' to target triple --- clang/lib/Driver/ToolChains/Gnu.cpp | 3 +- clang/lib/Driver/ToolChains/Xtensa.cpp | 14 ++- .../bin/xtensa-esp32s2-elf-ld | 1 + .../bin/xtensa-esp32s3-elf-ld | 1 + .../gcc/xtensa-esp32s2-elf/8.4.0/crtbegin.o | 0 .../lib/gcc/xtensa-esp32s2-elf/8.4.0/crtend.o | 0 .../8.4.0/no-rtti/crtbegin.o | 0 .../xtensa-esp32s2-elf/8.4.0/no-rtti/crtend.o | 0 .../gcc/xtensa-esp32s3-elf/8.4.0/crtbegin.o | 0 .../lib/gcc/xtensa-esp32s3-elf/8.4.0/crtend.o | 0 .../8.4.0/no-rtti/crtbegin.o | 0 .../xtensa-esp32s3-elf/8.4.0/no-rtti/crtend.o | 0 .../xtensa-esp32s2-elf/lib/crt0.o | 0 .../xtensa-esp32s2-elf/lib/no-rtti/crt0.o | 0 .../xtensa-esp32s3-elf/lib/crt0.o | 0 .../xtensa-esp32s3-elf/lib/no-rtti/crt0.o | 0 clang/test/Driver/xtensa-toolchain.c | 105 +++++++++++++----- llvm/include/llvm/TargetParser/Triple.h | 1 + llvm/lib/TargetParser/Triple.cpp | 2 + 19 files changed, 97 insertions(+), 30 deletions(-) create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/no-rtti/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/no-rtti/crt0.o diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index 8530dc95ce9f1..f0b5d6c7eee6e 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -2618,8 +2618,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( static const char *const XtensaLibDirs[] = {"/lib"}; static const char *const XtensaTriples[] = { - "xtensa-unknown-elf", "xtensa-esp32-elf", "xtensa-esp32s2-elf", - "xtensa-esp32s3-elf"}; + "xtensa-esp-elf", "xtensa-esp-unknown-elf"}; using std::begin; using std::end; diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 5a0ef9ba707bc..1c41ce568a40c 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -37,8 +37,20 @@ XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, const ArgList &Args) : Generic_ELF(D, Triple, Args) { - GCCInstallation.init(Triple, Args); + std::vector ExtraAliases; + if (Triple.getVendor() == llvm::Triple::Espressif) { + std::string ESPCpuName = "esp32"; + if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) { + ESPCpuName = A->getValue(); + } + ExtraAliases = {std::string("xtensa-") + ESPCpuName + "-elf"}; + if (Args.hasArg(options::OPT_v)) { + llvm::errs() << "Use GCC target extra alias: " << ExtraAliases[0] << "\n"; + } + } + + GCCInstallation.init(Triple, Args, ExtraAliases); if (!GCCInstallation.isValid()) { llvm_unreachable("Unexpected Xtensa GCC toolchain version"); } diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld b/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld new file mode 100644 index 0000000000000..b23e55619b2ff --- /dev/null +++ b/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld @@ -0,0 +1 @@ +#!/bin/true diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld b/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld new file mode 100644 index 0000000000000..b23e55619b2ff --- /dev/null +++ b/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld @@ -0,0 +1 @@ +#!/bin/true diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/no-rtti/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/no-rtti/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/xtensa-toolchain.c b/clang/test/Driver/xtensa-toolchain.c index 0570c1d0f0b57..aa753abcf6366 100644 --- a/clang/test/Driver/xtensa-toolchain.c +++ b/clang/test/Driver/xtensa-toolchain.c @@ -1,42 +1,93 @@ // A basic clang -cc1 command-line, and simple environment check. -// RUN: %clang %s -### -no-canonical-prefixes -target xtensa \ +// RUN: %clang %s -### -no-canonical-prefixes -target xtensa-esp-elf \ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ -// RUN: | FileCheck -check-prefix=CC1 %s -// CC1: clang{{.*}} "-cc1" "-triple" "xtensa" +// RUN: | FileCheck -check-prefix=CC1-ESP-DEFAULT %s +// CC1-ESP-DEFAULT: clang{{.*}} "-cc1" "-triple" "xtensa-esp-unknown-elf" -// RUN: %clang %s -### -no-canonical-prefixes \ -// RUN: -target xtensa --rtlib=platform \ +// RUN: %clang %s -### -no-canonical-prefixes -target xtensa-esp-elf -mcpu=esp32\ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL %s +// RUN: | FileCheck -check-prefix=CC1-ESP32 %s +// CC1-ESP32: clang{{.*}} "-cc1" "-triple" "xtensa-esp-unknown-elf" {{.*}}"-target-cpu" "esp32" -// C-XTENSA-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" -// C-XTENSA-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" +// RUN: %clang %s -### -no-canonical-prefixes -target xtensa-esp-elf -mcpu=esp32s2\ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: | FileCheck -check-prefix=CC1-ESP32S2 %s +// CC1-ESP32S2: clang{{.*}} "-cc1" "-triple" "xtensa-esp-unknown-elf" {{.*}}"-target-cpu" "esp32s2" + +// RUN: %clang %s -### -no-canonical-prefixes -target xtensa-esp-elf -mcpu=esp32s3\ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: | FileCheck -check-prefix=CC1-ESP32S3 %s +// CC1-ESP32S3: clang{{.*}} "-cc1" "-triple" "xtensa-esp-unknown-elf" {{.*}}"-target-cpu" "esp32s3" + +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa-esp-elf --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL %s -// RUN: %clang %s -### -no-canonical-prefixes \ -// RUN: -target xtensa --rtlib=platform \ +// C-XTENSA-ESP32-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" +// C-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" + +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa-esp-elf --rtlib=platform \ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -frtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL-RTTI %s +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL-RTTI %s -// C-XTENSA-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" -// C-XTENSA-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" +// C-XTENSA-ESP32-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-ESP32-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" +// C-XTENSA-ESP32-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" -// RUN: %clang %s -### -no-canonical-prefixes \ -// RUN: -target xtensa --rtlib=platform \ +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa-esp-elf --rtlib=platform \ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -mfix-esp32-psram-cache-issue 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL-PSRAM %s +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL-PSRAM %s -// C-XTENSA-BAREMETAL-PSRAM: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-BAREMETAL-PSRAM: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/esp32-psram/no-rtti" -// C-XTENSA-BAREMETAL-PSRAM: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/esp32-psram/no-rtti" +// C-XTENSA-ESP32-BAREMETAL-PSRAM: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-ESP32-BAREMETAL-PSRAM: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/esp32-psram/no-rtti" +// C-XTENSA-ESP32-BAREMETAL-PSRAM: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/esp32-psram/no-rtti" -// RUN: %clang %s -### -no-canonical-prefixes \ -// RUN: -target xtensa --rtlib=platform \ +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa-esp-elf --rtlib=platform \ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -mfix-esp32-psram-cache-issue -frtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-BAREMETAL-PSRAM-RTTI %s +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL-PSRAM-RTTI %s + +// C-XTENSA-ESP32-BAREMETAL-PSRAM-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-ESP32-BAREMETAL-PSRAM-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/esp32-psram" +// C-XTENSA-ESP32-BAREMETAL-PSRAM-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/esp32-psram" + +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa-esp-elf -mcpu=esp32s2 --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S2-BAREMETAL %s + +// C-XTENSA-ESP32S2-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s2-elf-ld" +// C-XTENSA-ESP32S2-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32s2-elf{{/|\\\\}}8.4.0/no-rtti" +// C-XTENSA-ESP32S2-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32s2-elf{{/|\\\\}}lib/no-rtti" + +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa-esp-elf -mcpu=esp32s2 --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -frtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S2-BAREMETAL-RTTI %s + +// C-XTENSA-ESP32S2-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s2-elf-ld" +// C-XTENSA-ESP32S2-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32s2-elf{{/|\\\\}}8.4.0" +// C-XTENSA-ESP32S2-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32s2-elf{{/|\\\\}}lib" + +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa-esp-elf -mcpu=esp32s3 --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S3-BAREMETAL %s + +// C-XTENSA-ESP32S3-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" +// C-XTENSA-ESP32S3-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32s3-elf{{/|\\\\}}8.4.0/no-rtti" +// C-XTENSA-ESP32S3-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32s3-elf{{/|\\\\}}lib/no-rtti" + +// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ +// RUN: -target xtensa-esp-elf -mcpu=esp32s3 --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -frtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S3-BAREMETAL-RTTI %s -// C-XTENSA-BAREMETAL-PSRAM-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-BAREMETAL-PSRAM-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/esp32-psram" -// C-XTENSA-BAREMETAL-PSRAM-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/esp32-psram" +// C-XTENSA-ESP32S3-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" +// C-XTENSA-ESP32S3-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32s3-elf{{/|\\\\}}8.4.0" +// C-XTENSA-ESP32S3-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32s3-elf{{/|\\\\}}lib" diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h index ebd92f264d904..f8d16ba2be90b 100644 --- a/llvm/include/llvm/TargetParser/Triple.h +++ b/llvm/include/llvm/TargetParser/Triple.h @@ -194,6 +194,7 @@ class Triple { Mesa, SUSE, OpenEmbedded, + Espressif, LastVendorType = OpenEmbedded }; enum OSType { diff --git a/llvm/lib/TargetParser/Triple.cpp b/llvm/lib/TargetParser/Triple.cpp index bf89aace65e58..c915c6c6cea2e 100644 --- a/llvm/lib/TargetParser/Triple.cpp +++ b/llvm/lib/TargetParser/Triple.cpp @@ -253,6 +253,7 @@ StringRef Triple::getVendorTypeName(VendorType Kind) { case PC: return "pc"; case SCEI: return "scei"; case SUSE: return "suse"; + case Espressif: return "esp"; } llvm_unreachable("Invalid VendorType!"); @@ -637,6 +638,7 @@ static Triple::VendorType parseVendor(StringRef VendorName) { .Case("mesa", Triple::Mesa) .Case("suse", Triple::SUSE) .Case("oe", Triple::OpenEmbedded) + .Case("esp", Triple::Espressif) .Default(Triple::UnknownVendor); } From f10b5e2eea784f353c34122b17c40ba941102fcd Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:52:35 +0300 Subject: [PATCH 081/289] [Xtensa] Fix multilib support. This is temporary fix, according to new api changes in release 18. --- clang/lib/Driver/ToolChains/Gnu.cpp | 7 +++++-- clang/lib/Driver/ToolChains/Xtensa.cpp | 15 +-------------- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index f0b5d6c7eee6e..97beccc61b872 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -2617,8 +2617,11 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( "s390x-redhat-linux"}; static const char *const XtensaLibDirs[] = {"/lib"}; - static const char *const XtensaTriples[] = { - "xtensa-esp-elf", "xtensa-esp-unknown-elf"}; + static const char *const XtensaTriples[] = {"xtensa-esp-elf", + "xtensa-esp-unknown-elf", + "xtensa-esp32-elf", + "xtensa-esp32s2-elf", + "xtensa-esp32s3-elf"}; using std::begin; using std::end; diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 1c41ce568a40c..26a787f591722 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -37,20 +37,7 @@ XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, const ArgList &Args) : Generic_ELF(D, Triple, Args) { - std::vector ExtraAliases; - - if (Triple.getVendor() == llvm::Triple::Espressif) { - std::string ESPCpuName = "esp32"; - if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) { - ESPCpuName = A->getValue(); - } - ExtraAliases = {std::string("xtensa-") + ESPCpuName + "-elf"}; - if (Args.hasArg(options::OPT_v)) { - llvm::errs() << "Use GCC target extra alias: " << ExtraAliases[0] << "\n"; - } - } - - GCCInstallation.init(Triple, Args, ExtraAliases); + GCCInstallation.init(Triple, Args); if (!GCCInstallation.isValid()) { llvm_unreachable("Unexpected Xtensa GCC toolchain version"); } From dffbf298f6b4bcf69f080a4f4ef4adfc34cf4456 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:56:01 +0300 Subject: [PATCH 082/289] esp/riscv: Use GCC assembler for ESP RISCV chips --- clang/lib/Driver/ToolChains/RISCVToolchain.cpp | 1 + clang/lib/Driver/ToolChains/RISCVToolchain.h | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp index 624099d21ae12..29a031c640191 100644 --- a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp +++ b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp @@ -8,6 +8,7 @@ #include "RISCVToolchain.h" #include "CommonArgs.h" +#include "Arch/RISCV.h" #include "clang/Driver/Compilation.h" #include "clang/Driver/InputInfo.h" #include "clang/Driver/Options.h" diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.h b/clang/lib/Driver/ToolChains/RISCVToolchain.h index fa0aa265d842b..42a35ed287925 100644 --- a/clang/lib/Driver/ToolChains/RISCVToolchain.h +++ b/clang/lib/Driver/ToolChains/RISCVToolchain.h @@ -37,6 +37,12 @@ class LLVM_LIBRARY_VISIBILITY RISCVToolChain : public Generic_ELF { addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args) const override; + bool IsIntegratedAssemblerDefault() const override { + if (GCCInstallation.getTriple().getVendor() == llvm::Triple::Espressif) + return false; + return Generic_ELF::IsIntegratedAssemblerDefault(); + } + protected: Tool *buildLinker() const override; From 18a41ad1207edd271e1424e37d75408b1f6c9b18 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:57:44 +0300 Subject: [PATCH 083/289] esp/riscv: Adds support for 'riscv32-esp-elf' target triple --- clang/lib/Driver/ToolChains/Gnu.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index 97beccc61b872..eba6efd0ae3c3 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -2599,7 +2599,9 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( static const char *const RISCV32LibDirs[] = {"/lib32", "/lib"}; static const char *const RISCV32Triples[] = {"riscv32-unknown-linux-gnu", - "riscv32-unknown-elf"}; + "riscv32-unknown-elf", + "riscv32-esp-elf", + "riscv32-esp-unknown-elf"}; static const char *const RISCV64LibDirs[] = {"/lib64", "/lib"}; static const char *const RISCV64Triples[] = {"riscv64-unknown-linux-gnu", "riscv64-unknown-elf"}; From 0ef365a053ab8accff594d28a546f6c790d7b794 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:58:22 +0300 Subject: [PATCH 084/289] riscv: Add default multilib. Make '-print-multi-lib' output compatible with GCC --- clang/lib/Driver/ToolChains/Gnu.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index eba6efd0ae3c3..720da928e4007 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -1860,6 +1860,10 @@ static void findRISCVBareMetalMultilibs(const Driver &D, {"rv64imafdc", "lp64d"}}; std::vector Ms; + + if (TargetTriple.getVendor() == llvm::Triple::Espressif) + Ms.emplace_back(MultilibBuilder()); + for (auto Element : RISCVMultilibSet) { // multilib path rule is ${march}/${mabi} Ms.emplace_back( From 1342578ec27d7a2597ad663f6bac6e9cdcae8c70 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:59:25 +0300 Subject: [PATCH 085/289] esp/riscv: Add multilib support for 'riscv32-esp-elf' GCC toolcahin --- clang/lib/Driver/ToolChains/Gnu.cpp | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index 720da928e4007..d36dff64ad28a 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -1856,6 +1856,7 @@ static void findRISCVBareMetalMultilibs(const Driver &D, // TODO: support MULTILIB_REUSE constexpr RiscvMultilib RISCVMultilibSet[] = { {"rv32i", "ilp32"}, {"rv32im", "ilp32"}, {"rv32iac", "ilp32"}, + {"rv32imc", "ilp32"}, {"rv32imac", "ilp32"}, {"rv32imafc", "ilp32f"}, {"rv64imac", "lp64"}, {"rv64imafdc", "lp64d"}}; @@ -1876,13 +1877,21 @@ static void findRISCVBareMetalMultilibs(const Driver &D, MultilibSetBuilder() .Either(Ms) .makeMultilibSet() - .FilterOut(NonExistent) - .setFilePathsCallback([](const Multilib &M) { - return std::vector( - {M.gccSuffix(), - "/../../../../riscv64-unknown-elf/lib" + M.gccSuffix(), - "/../../../../riscv32-unknown-elf/lib" + M.gccSuffix()}); - }); + .FilterOut(NonExistent); + + if (TargetTriple.getVendor() == llvm::Triple::Espressif) { + RISCVMultilibs.setFilePathsCallback([](const Multilib &M) { + return std::vector( + {M.gccSuffix(), "/../../../../riscv32-esp-elf/lib" + M.gccSuffix()}); + }); + } else { + RISCVMultilibs.setFilePathsCallback([](const Multilib &M) { + return std::vector( + {M.gccSuffix(), + "/../../../../riscv64-unknown-elf/lib" + M.gccSuffix(), + "/../../../../riscv32-unknown-elf/lib" + M.gccSuffix()}); + }); + } Multilib::flags_list Flags; llvm::StringSet<> Added_ABIs; From 16265ff6e0c4afd9705ea8668afae815d07c4d97 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 14:59:58 +0300 Subject: [PATCH 086/289] esp/riscv: Add 'libnosys' to linker command line by default Necessary to avoid build failures when build system (e.g. cmake) tries to make simple compiler checks at configuration stage. --- clang/lib/Driver/ToolChains/RISCVToolchain.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp index 29a031c640191..3afcbd452683b 100644 --- a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp +++ b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp @@ -8,7 +8,6 @@ #include "RISCVToolchain.h" #include "CommonArgs.h" -#include "Arch/RISCV.h" #include "clang/Driver/Compilation.h" #include "clang/Driver/InputInfo.h" #include "clang/Driver/Options.h" @@ -217,6 +216,9 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("--start-group"); CmdArgs.push_back("-lc"); CmdArgs.push_back("-lgloss"); + if (ToolChain.getTriple().getVendor() == llvm::Triple::Espressif) { + CmdArgs.push_back("-lnosys"); + } CmdArgs.push_back("--end-group"); AddRunTimeLibs(ToolChain, ToolChain.getDriver(), CmdArgs, Args); } From 1f71f41af7b970cc8b4c9f70369b009d5b95e533 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:02 +0300 Subject: [PATCH 087/289] esp/riscv: Exclude 'crt0.o' from linking in 'freestanding' mode --- clang/lib/Driver/ToolChains/RISCVToolchain.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp index 3afcbd452683b..0ea8e591a5e34 100644 --- a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp +++ b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp @@ -192,7 +192,17 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA, } if (WantCRTs) { - CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); + /* Espressif toolcahin uses newlib. crt0.o from it refers to 'main' symbol. + In 'freestanding' mode 'main' is not marked as special symbol by clang, + so when compiling C++ program with 'clang++' 'main' gets mmangled + (if not decalred as 'extern "C"' ) and linker can not resolve it. + The problem can happen, for example, when cmake checks C++ compiler by buiding simple C++ code, + unfortunately 'main' function in that code is not decalred as 'extern "C"'. */ + bool Freestanding = + Args.hasFlag(options::OPT_ffreestanding, options::OPT_fhosted, false); + if (!Freestanding || ToolChain.getTriple().getVendor() != llvm::Triple::Espressif) { + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); + } CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin))); } From 776ea18da3c756a54f5e72300065c9de8d46a441 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:03 +0300 Subject: [PATCH 088/289] riscv: Add ESP toolchain tests --- .../bin/riscv32-esp-elf-as | 1 + .../bin/riscv32-esp-elf-ld | 1 + .../lib/gcc/riscv32-esp-elf/8.4.0/crtbegin.o | 0 .../lib/gcc/riscv32-esp-elf/8.4.0/crtend.o | 0 .../8.4.0/rv32i/ilp32/crtbegin.o | 0 .../8.4.0/rv32i/ilp32/crtend.o | 0 .../8.4.0/rv32imac/ilp32/crtbegin.o | 0 .../8.4.0/rv32imac/ilp32/crtend.o | 0 .../8.4.0/rv32imafc/ilp32f/crtbegin.o | 0 .../8.4.0/rv32imafc/ilp32f/crtend.o | 0 .../8.4.0/rv32imc/ilp32/crtbegin.o | 0 .../8.4.0/rv32imc/ilp32/crtend.o | 0 .../riscv32-esp-elf/include/c++/8.4.0/.keep | 0 .../riscv32-esp-elf/lib/crt0.o | 0 .../riscv32-esp-elf/lib/rv32i/ilp32/crt0.o | 0 .../riscv32-esp-elf/lib/rv32imac/ilp32/crt0.o | 0 .../lib/rv32imafc/ilp32f/crt0.o | 0 .../riscv32-esp-elf/lib/rv32imc/ilp32/crt0.o | 0 clang/test/Driver/riscv32-esp-toolchain.c | 306 ++++++++++++++++++ 19 files changed, 308 insertions(+) create mode 100755 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-as create mode 100755 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/include/c++/8.4.0/.keep create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/crt0.o create mode 100644 clang/test/Driver/riscv32-esp-toolchain.c diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-as b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-as new file mode 100755 index 0000000000000..b23e55619b2ff --- /dev/null +++ b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-as @@ -0,0 +1 @@ +#!/bin/true diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld new file mode 100755 index 0000000000000..b23e55619b2ff --- /dev/null +++ b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld @@ -0,0 +1 @@ +#!/bin/true diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/include/c++/8.4.0/.keep b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/include/c++/8.4.0/.keep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/riscv32-esp-toolchain.c b/clang/test/Driver/riscv32-esp-toolchain.c new file mode 100644 index 0000000000000..5c34c0a3bf7a1 --- /dev/null +++ b/clang/test/Driver/riscv32-esp-toolchain.c @@ -0,0 +1,306 @@ +// A basic clang -cc1 command-line, and simple environment check. + +// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ +// RUN: | FileCheck -check-prefix=CC1 %s +// CC1: clang{{.*}} "-cc1" "-triple" "riscv32-esp-unknown-elf" + +// Test interaction with -fuse-ld=lld, if lld is available. +// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk -fuse-ld=lld 2>&1 \ +// RUN: | FileCheck -check-prefix=LLD %s +// LLD: {{(error: invalid linker name in argument '-fuse-ld=lld')|(ld.lld)}} + +// rv32imac is the default + +// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ +// RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk \ +// RUN: --sysroot=%S/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32IMAC-BAREMETAL-MULTI-ILP32 %s + +// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" +// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "-mabi" "ilp32" "-march" "rv32imac" +// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "--sysroot={{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf" +// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "-m" "elf32lriscv" +// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtbegin.o" +// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" +// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib" +// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" +// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtend.o" + +// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ +// RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld \ +// RUN: --sysroot= \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32 %s + +// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" +// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-mabi" "ilp32" "-march" "rv32imac" +// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-m" "elf32lriscv" +// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtbegin.o" +// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" +// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib" +// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" +// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtend.o" + +// RUN: %clangxx %s -### -no-canonical-prefixes -target riscv32-esp-elf \ +// RUN: -ffreestanding -stdlib=libstdc++ --rtlib=libgcc --ld-path=riscv32-esp-elf-ld \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk \ +// RUN: --sysroot=%S/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf 2>&1 \ +// RUN: | FileCheck -check-prefix=CXX-RV32IMAC-BAREMETAL-MULTI-ILP32 %s + +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-internal-isystem" "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/include/c++{{/|\\\\}}8.4.0" +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-mabi" "ilp32" "-march" "rv32imac" +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "--sysroot={{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf" +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-m" "elf32lriscv" +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtbegin.o" +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib" +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-lstdc++" "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" +// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtend.o" + +// RUN: %clangxx %s -### -no-canonical-prefixes -target riscv32-esp-elf \ +// RUN: -ffreestanding -stdlib=libstdc++ --rtlib=libgcc --ld-path=riscv32-esp-elf-ld \ +// RUN: --sysroot= \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ +// RUN: | FileCheck -check-prefix=CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32 %s + +// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-internal-isystem" "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/include/c++{{/|\\\\}}8.4.0" +// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" +// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-mabi" "ilp32" "-march" "rv32imac" +// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-m" "elf32lriscv" +// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtbegin.o" +// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" +// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib" +// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-lstdc++" "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" +// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtend.o" + +// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ +// RUN: -march=rv32i -mabi=ilp32 \ +// RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld --sysroot= \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32I-BAREMETAL-MULTI-ILP32 %s + +// C-RV32I-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" +// C-RV32I-BAREMETAL-MULTI-ILP32: "-mabi" "ilp32" "-march" "rv32i" +// C-RV32I-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32I-BAREMETAL-MULTI-ILP32: "-m" "elf32lriscv" +// C-RV32I-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32{{/|\\\\}}crtbegin.o" +// C-RV32I-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" +// C-RV32I-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib" +// C-RV32I-BAREMETAL-MULTI-ILP32: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" +// C-RV32I-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32{{/|\\\\}}crtend.o" + +// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ +// RUN: -march=rv32imc -mabi=ilp32 \ +// RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld --sysroot= \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32IMC-BAREMETAL-MULTI-ILP32 %s + +// C-RV32IMC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" +// C-RV32IMC-BAREMETAL-MULTI-ILP32: "-mabi" "ilp32" "-march" "rv32imc" +// C-RV32IMC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32IMC-BAREMETAL-MULTI-ILP32: "-m" "elf32lriscv" +// C-RV32IMC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32{{/|\\\\}}crtbegin.o" +// C-RV32IMC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" +// C-RV32IMC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib" +// C-RV32IMC-BAREMETAL-MULTI-ILP32: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" +// C-RV32IMC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32{{/|\\\\}}crtend.o" + +// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ +// RUN: -march=rv32imafc -mabi=ilp32f \ +// RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld --sysroot= \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32IMAFC-BAREMETAL-MULTI-ILP32F %s + +// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" +// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "-mabi" "ilp32f" "-march" "rv32imafc" +// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "-m" "elf32lriscv" +// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f{{/|\\\\}}crtbegin.o" +// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" +// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib" +// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" +// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f{{/|\\\\}}crtend.o" + +// RUN: %clang -target riscv32-esp-elf %s -emit-llvm -S -o - | FileCheck %s + +typedef __builtin_va_list va_list; +typedef __SIZE_TYPE__ size_t; +typedef __PTRDIFF_TYPE__ ptrdiff_t; +typedef __WCHAR_TYPE__ wchar_t; +typedef __WINT_TYPE__ wint_t; + + +// Check Alignments + +// CHECK: @align_c = dso_local global i32 1 +int align_c = __alignof(char); + +// CHECK: @align_s = dso_local global i32 2 +int align_s = __alignof(short); + +// CHECK: @align_i = dso_local global i32 4 +int align_i = __alignof(int); + +// CHECK: @align_wc = dso_local global i32 4 +int align_wc = __alignof(wchar_t); + +// CHECK: @align_wi = dso_local global i32 4 +int align_wi = __alignof(wint_t); + +// CHECK: @align_l = dso_local global i32 4 +int align_l = __alignof(long); + +// CHECK: @align_ll = dso_local global i32 8 +int align_ll = __alignof(long long); + +// CHECK: @align_p = dso_local global i32 4 +int align_p = __alignof(void*); + +// CHECK: @align_f = dso_local global i32 4 +int align_f = __alignof(float); + +// CHECK: @align_d = dso_local global i32 8 +int align_d = __alignof(double); + +// CHECK: @align_ld = dso_local global i32 16 +int align_ld = __alignof(long double); + +// CHECK: @align_vl = dso_local global i32 4 +int align_vl = __alignof(va_list); + +// CHECK: @align_a_c = dso_local global i32 1 +int align_a_c = __alignof(_Atomic(char)); + +// CHECK: @align_a_s = dso_local global i32 2 +int align_a_s = __alignof(_Atomic(short)); + +// CHECK: @align_a_i = dso_local global i32 4 +int align_a_i = __alignof(_Atomic(int)); + +// CHECK: @align_a_wc = dso_local global i32 4 +int align_a_wc = __alignof(_Atomic(wchar_t)); + +// CHECK: @align_a_wi = dso_local global i32 4 +int align_a_wi = __alignof(_Atomic(wint_t)); + +// CHECK: @align_a_l = dso_local global i32 4 +int align_a_l = __alignof(_Atomic(long)); + +// CHECK: @align_a_ll = dso_local global i32 8 +int align_a_ll = __alignof(_Atomic(long long)); + +// CHECK: @align_a_p = dso_local global i32 4 +int align_a_p = __alignof(_Atomic(void*)); + +// CHECK: @align_a_f = dso_local global i32 4 +int align_a_f = __alignof(_Atomic(float)); + +// CHECK: @align_a_d = dso_local global i32 8 +int align_a_d = __alignof(_Atomic(double)); + +// CHECK: @align_a_ld = dso_local global i32 16 +int align_a_ld = __alignof(_Atomic(long double)); + +// CHECK: @align_a_s4 = dso_local global i32 4 +int align_a_s4 = __alignof(_Atomic(struct { char s[4]; })); + +// CHECK: @align_a_s8 = dso_local global i32 8 +int align_a_s8 = __alignof(_Atomic(struct { char s[8]; })); + +// CHECK: @align_a_s16 = dso_local global i32 16 +int align_a_s16 = __alignof(_Atomic(struct { char s[16]; })); + +// CHECK: @align_a_s32 = dso_local global i32 1 +int align_a_s32 = __alignof(_Atomic(struct { char s[32]; })); + + +// Check Sizes + +// CHECK: @size_a_c = dso_local global i32 1 +int size_a_c = sizeof(_Atomic(char)); + +// CHECK: @size_a_s = dso_local global i32 2 +int size_a_s = sizeof(_Atomic(short)); + +// CHECK: @size_a_i = dso_local global i32 4 +int size_a_i = sizeof(_Atomic(int)); + +// CHECK: @size_a_wc = dso_local global i32 4 +int size_a_wc = sizeof(_Atomic(wchar_t)); + +// CHECK: @size_a_wi = dso_local global i32 4 +int size_a_wi = sizeof(_Atomic(wint_t)); + +// CHECK: @size_a_l = dso_local global i32 4 +int size_a_l = sizeof(_Atomic(long)); + +// CHECK: @size_a_ll = dso_local global i32 8 +int size_a_ll = sizeof(_Atomic(long long)); + +// CHECK: @size_a_p = dso_local global i32 4 +int size_a_p = sizeof(_Atomic(void*)); + +// CHECK: @size_a_f = dso_local global i32 4 +int size_a_f = sizeof(_Atomic(float)); + +// CHECK: @size_a_d = dso_local global i32 8 +int size_a_d = sizeof(_Atomic(double)); + +// CHECK: @size_a_ld = dso_local global i32 16 +int size_a_ld = sizeof(_Atomic(long double)); + + +// Check types + +// CHECK: zeroext i8 @check_char() +char check_char() { return 0; } + +// CHECK: define dso_local signext i16 @check_short() +short check_short() { return 0; } + +// CHECK: define dso_local i32 @check_int() +int check_int() { return 0; } + +// CHECK: define dso_local i32 @check_wchar_t() +int check_wchar_t() { return 0; } + +// CHECK: define dso_local i32 @check_long() +long check_long() { return 0; } + +// CHECK: define dso_local i64 @check_longlong() +long long check_longlong() { return 0; } + +// CHECK: define dso_local zeroext i8 @check_uchar() +unsigned char check_uchar() { return 0; } + +// CHECK: define dso_local zeroext i16 @check_ushort() +unsigned short check_ushort() { return 0; } + +// CHECK: define dso_local i32 @check_uint() +unsigned int check_uint() { return 0; } + +// CHECK: define dso_local i32 @check_ulong() +unsigned long check_ulong() { return 0; } + +// CHECK: define dso_local i64 @check_ulonglong() +unsigned long long check_ulonglong() { return 0; } + +// CHECK: define dso_local i32 @check_size_t() +size_t check_size_t() { return 0; } + +// CHECK: define dso_local float @check_float() +float check_float() { return 0; } + +// CHECK: define dso_local double @check_double() +double check_double() { return 0; } + +// CHECK: define dso_local fp128 @check_longdouble() +long double check_longdouble() { return 0; } From d52877434372ab9648c8e32460bab417efb05df7 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 1 Oct 2024 23:53:05 +0300 Subject: [PATCH 089/289] [Xtensa] Remove redundant target features --- clang/lib/Driver/ToolChains/Xtensa.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 26a787f591722..271abd98f18ec 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -298,9 +298,4 @@ void xtensa::getXtensaTargetFeatures(const Driver &D, const llvm::Triple &Triple std::vector &Features) { if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) getXtensaFeaturesFromMcpu(D, Args, A, A->getValue(), Features); - - // Now add any that the user explicitly requested on the command line, - // which may override the defaults. - handleTargetFeaturesGroup(D, Triple, Args, Features, - options::OPT_m_xtensa_Features_Group); } From c124b896eada41b2bfe1021c158eabd7712f7624 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 26 Sep 2023 21:47:29 +0300 Subject: [PATCH 090/289] [Xtensa] Implement support of the sysroot --- .../xtensa-esp32-elf/include/c++/8.4.0/.keep | 0 clang/test/Driver/xtensa-toolchain.c | 32 +++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/include/c++/8.4.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/include/c++/8.4.0/.keep b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/include/c++/8.4.0/.keep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/xtensa-toolchain.c b/clang/test/Driver/xtensa-toolchain.c index aa753abcf6366..7cf4f151de2fc 100644 --- a/clang/test/Driver/xtensa-toolchain.c +++ b/clang/test/Driver/xtensa-toolchain.c @@ -91,3 +91,35 @@ // C-XTENSA-ESP32S3-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" // C-XTENSA-ESP32S3-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32s3-elf{{/|\\\\}}8.4.0" // C-XTENSA-ESP32S3-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32s3-elf{{/|\\\\}}lib" + +// RUN: %clang %s -### -no-canonical-prefixes \ +// RUN: -target xtensa-esp-elf -mcpu=esp32 --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree \ +// RUN: --sysroot=%S/Inputs/multilib_xtensa_tree/xtensa-esp32-elf 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-SYSROOT-BAREMETAL %s + +// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "--sysroot={{.*}}/Inputs/multilib_xtensa_tree/xtensa-esp32-elf" +// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" +// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" + +// RUN: %clang++ %s -### -no-canonical-prefixes \ +// RUN: -target xtensa-esp-elf -mcpu=esp32 -stdlib=libstdc++ --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: | FileCheck -check-prefix=CXX-XTENSA-ESP32-BAREMETAL %s + +// CXX-XTENSA-ESP32-BAREMETAL: "-internal-isystem" "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf/include/c++{{/|\\\\}}8.4.0" +// CXX-XTENSA-ESP32-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// CXX-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" +// CXX-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" + +// RUN: %clang++ %s -### -no-canonical-prefixes \ +// RUN: -target xtensa-esp-elf -mcpu=esp32 -stdlib=libstdc++ --rtlib=platform \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree \ +// RUN: --sysroot=%S/Inputs/multilib_xtensa_tree/xtensa-esp32-elf 2>&1 \ +// RUN: | FileCheck -check-prefix=CXX-XTENSA-ESP32-SYSROOT-BAREMETAL %s + +// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-internal-isystem" "{{.*}}Inputs/multilib_xtensa_tree/xtensa-esp32-elf/include/c++/8.4.0" +// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" +// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" From 0654dc10fd68c50002702fb62399769ce4e5b84c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:06 +0300 Subject: [PATCH 091/289] [Xtensa] Fix crtbegin/crtend implementation. Add Xtensa to the list of arcthitectures with crt support in compiler_rt. --- compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake index 37ad48bef818a..3952d19051c1a 100644 --- a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake +++ b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake @@ -16,6 +16,7 @@ set(SPARCV9 sparcv9) set(WASM32 wasm32) set(WASM64 wasm64) set(VE ve) +set(XTENSA xtensa) if(APPLE) set(ARM64 arm64) @@ -30,7 +31,7 @@ endif() set(ALL_SANITIZER_COMMON_SUPPORTED_ARCH ${X86} ${X86_64} ${PPC64} ${RISCV64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9} - ${HEXAGON} ${LOONGARCH64}) + ${HEXAGON} ${XTENSA} ${LOONGARCH64}) set(ALL_ASAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV64} ${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9} ${HEXAGON} ${LOONGARCH64}) From 15d5dd981685f46df2165cc3cea0164cb05bbf9d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:07 +0300 Subject: [PATCH 092/289] [Xtensa] Fix ill.n instruction econding --- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 2 +- llvm/test/MC/Xtensa/xtensa-valid-density.s | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 7ce22ce9c6142..a0df1d0259778 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1441,7 +1441,7 @@ let isBarrier = 1, isTerminator = 1 in { let s = 0; } - def ILL_N : RRRN_Inst<0x0C, (outs), (ins), + def ILL_N : RRRN_Inst<0x0D, (outs), (ins), "ill.n", []>, Requires<[HasDensity]> { let r = 0xf; let s = 0x0; diff --git a/llvm/test/MC/Xtensa/xtensa-valid-density.s b/llvm/test/MC/Xtensa/xtensa-valid-density.s index fc5457ce82ddc..f4315c61e8efd 100644 --- a/llvm/test/MC/Xtensa/xtensa-valid-density.s +++ b/llvm/test/MC/Xtensa/xtensa-valid-density.s @@ -5,5 +5,5 @@ LBL0: # CHECK-INST: ill.n -# CHECK: encoding: [0x6c,0xf0] +# CHECK: encoding: [0x6d,0xf0] ill.n From 7948e4feb3c33822271c036336de9a94c60a5515 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 18 Sep 2024 01:21:57 +0300 Subject: [PATCH 093/289] ci/cd: fix clang version in gitlab-ci.yml --- .gitlab-ci.yml | 164 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 162 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 94f59533db5ad..ef0811d9a8ded 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -46,8 +46,8 @@ before_script: .get_release_name: &get_release_name | # using annotated tags REL_NUM=$(git describe --abbrev=7) - REL_SFX="llvm14_0_0" - REL_NAME=${CONF_TARGET}-${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} + REL_SFX="llvm15_0_0" + REL_NAME=${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} echo "CONF_TARGET: $CONF_TARGET" echo "PLATFORM_NAME: $PLATFORM_NAME" @@ -71,6 +71,163 @@ before_script: # Pack the toolchain .package_toolchain: &package_toolchain | + ${ARCHIVE_TOOL} ${ARCHIVE_NAME} esp-clang/ + mkdir -p ${DISTRO_DIR} + mv ${ARCHIVE_NAME} ${DISTRO_DIR}/ + echo "${ARCHIVE_NAME}" > ${DISTRO_DIR}/file_${PLATFORM_NAME} + +.build_template: + stage: build + tags: [ "amd64", "build" ] + allow_failure: true + artifacts: + paths: + - ${DIST_DIR}/ + - ${BUILD_DIR}/clang_tests.log + - ${BUILD_DIR}/clang_build.log + when: always + expire_in: 1 day + variables: + BUILD_TOOLCHAIN_CMD_ARGS: "" + # use separate dist dir for universal toolchain + # TODO: remove this var after switching to universal toolchain builds + DIST_DIR: "dist_new" + script: + - *get_release_name + - mkdir ${DOWNLOADS_DIR} + - pushd ${DOWNLOADS_DIR} + - export ESP_GCC_TOOLCHAIN_DIST_BASE=$PWD + - *get_gcc_toolchain + - git clone -b ${NEWLIB_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${NEWLIB_REPO}.git + - export NEWLIB_PATH=$PWD/${NEWLIB_REPO} + - git clone -b ${BINUTILS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${BINUTILS_REPO}.git + - export BINUTILS_PATH=$PWD/${BINUTILS_REPO} + - git clone -b ${XTENSA_OVERLAYS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${XTENSA_OVERLAYS_REPO}.git + - export XTENSA_OVERLAYS_PATH=$PWD/${XTENSA_OVERLAYS_REPO} + - popd + - *get_clang_toolchain_build_scripts + - *fix_origin_remote_for_public + - export ESP_GCC_TOOLCHAIN_REL_VER=${GCC_REL_NAME} + - export LLVM_PROJECT_PATH=$PWD + - export BUILD_PATH=$PWD/${BUILD_DIR} + - mkdir -p ${BUILD_PATH} + - export USE_PARALLEL_LINK_JOBS=2 + - ${BUILD_TOOLCHAIN_CMD} --llvm-path=${LLVM_PROJECT_PATH} --newlib-path=${NEWLIB_PATH} + --gcc-toolchains-path=${ESP_GCC_TOOLCHAIN_DIST_BASE} --binutils-path=${BINUTILS_PATH} + --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} ${BUILD_TOOLCHAIN_CMD_ARGS} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/clang_build.log + # Run unit tests for native build only. + # Run as non-root user because permission tests fail when run by root. + - export BUILD_HOST=$(gcc -dumpmachine) + - export LLVM_BUILD_PATH=${LLVM_PROJECT_PATH}/llvm/build-Release-${CONF_HOST} + - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then + echo "Run unit tests for native build"; + useradd -m test_runner; + chown -R test_runner ${LLVM_BUILD_PATH}; + touch ${BUILD_PATH}/clang_tests.log; + chmod o+w ${BUILD_PATH}/clang_tests.log; + runuser -l test_runner -c 'cmake --build '${LLVM_BUILD_PATH}' --target check-all 2>&1 > '${BUILD_PATH}'/clang_tests.log'; + fi + - export DISTRO_DIR=$PWD/$DIST_DIR + - pushd ${BUILD_PATH} + - *package_toolchain + - popd + +build_x86_64-linux-gnu: + extends: .build_template + variables: + CONF_HOST: "x86_64-linux-gnu" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" + +build_x86_64-w64-mingw32: + extends: .build_template + needs: + - job: build_x86_64-linux-gnu + before_script: + - *use_ci_tools + - *add_gitlab_key + # get ARCHIVE_NAME for Linux release. Modify vars to make get_release_name working properly + - export PLATFORM_NAME_ORIG=${PLATFORM_NAME} + - export ARCHIVE_EXT_ORIG=${ARCHIVE_EXT} + - export PLATFORM_NAME=${PLATFORM_NAME_LINUX} + - export ARCHIVE_EXT=${ARCHIVE_EXT_LINUX} + - *get_release_name + # restore modified vars + - export PLATFORM_NAME=${PLATFORM_NAME_ORIG} + - export ARCHIVE_EXT=${ARCHIVE_EXT_ORIG} + # unpack Linux release to re-use it as native Clang for Windows build + - mkdir -p esp-clang-${PLATFORM_NAME_LINUX} + - ${UNARCHIVE_TOOL_LINUX} ${DIST_DIR}/${ARCHIVE_NAME} -C esp-clang-${PLATFORM_NAME_LINUX} + # we do not want to keep artifacts from 'x86_64-linux-gnu' job + - rm -rf ${DIST_DIR} + - rm -rf ${BUILD_DIR} + # add build command args speciifc for Windows build + - export BUILD_TOOLCHAIN_CMD_ARGS="--host=${CONF_HOST} --native-esp-clang-path=$PWD/esp-clang-${PLATFORM_NAME_LINUX}" + variables: + CONF_HOST: "x86_64-w64-mingw32" + PLATFORM_NAME: "${PLATFORM_NAME_WIN}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_WIN}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" + ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" + +test_x86_64-linux-gnu: + stage: test + tags: [ "amd64", "build" ] + allow_failure: true + needs: + - job: build_x86_64-linux-gnu + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + # use separate dist dir for universal toolchain + # TODO: remove this var after switching to universal toolchain builds + DIST_DIR: "dist_new" + script: + - *get_release_name + - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} + # getting testsuite + - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/llvm-xtensa-testsuite.git + # preparing testsuite + - export PATH=${PWD}/esp-clang/bin:$PATH + - cd llvm-xtensa-testsuite + # qemu + - ./qemu_esp32_install.sh + # run testsuite for esp32 + - ./run_esp32_tests.sh + +########################################################################### +#################### START OF TEMPORARY LEGACY CODE ####################### +# TODO: the code below is to be removed after migration to new build script +.get_release_name_old: &get_release_name_old | + # using annotated tags + REL_NUM=$(git describe --abbrev=7) + REL_SFX="llvm15_0_0" + REL_NAME=${CONF_TARGET}-${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} + ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} + echo "PLATFORM_NAME: $PLATFORM_NAME" + echo "REL_NUM: $REL_NUM" + echo "REL_NAME: $REL_NAME" + echo "ARCHIVE_NAME: $ARCHIVE_NAME" + +.get_gcc_toolchain_old: &get_gcc_toolchain_old | + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-2021r2-patch3/${XTENSA_GCC_TOOLCHAIN} + ${UNARCHIVE_TOOL} ${XTENSA_GCC_TOOLCHAIN} + if [[ "$XTENSA_GCC_TOOLCHAIN" == *"linux-amd64"* ]]; then + cp -r xtensa-esp32-elf ${XTENSA_CLANG_TOOLCHAIN} + else + mv xtensa-esp32-elf ${XTENSA_CLANG_TOOLCHAIN} + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-2021r2-patch3/xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz + tar -xf xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz + fi + export GCC_ESP32_LINUX_TOOLCHAIN="xtensa-esp32-elf" + +.package_toolchain_old: &package_toolchain_old | ${ARCHIVE_TOOL} ${ARCHIVE_NAME} ${XTENSA_CLANG_TOOLCHAIN}/ mkdir -p ${DIST_DIR} mv ${ARCHIVE_NAME} ${DIST_DIR}/ @@ -84,6 +241,9 @@ before_script: - ${DIST_DIR}/ when: always expire_in: 10 day + variables: + XTENSA_CLANG_TOOLCHAIN_REF: "release_esp32_clang_15.0.0_gcc_8.4.0" + GCC_REL_NAME: "gcc8_4_0-esp-2021r2-patch3" script: - *get_release_name - *get_gcc_toolchain From 1c85ac1dc806da4cc0f05b9fee3bccd226269e40 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 26 Sep 2023 22:12:15 +0300 Subject: [PATCH 094/289] [Xtensa] fix compiler-rt crt build script --- compiler-rt/cmake/crt-config-ix.cmake | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/compiler-rt/cmake/crt-config-ix.cmake b/compiler-rt/cmake/crt-config-ix.cmake index ebc7d671e74ee..0897caabdef2e 100644 --- a/compiler-rt/cmake/crt-config-ix.cmake +++ b/compiler-rt/cmake/crt-config-ix.cmake @@ -31,10 +31,11 @@ set(PPC64 powerpc64 powerpc64le) set(RISCV32 riscv32) set(RISCV64 riscv64) set(VE ve) +set(XTENSA xtensa) set(ALL_CRT_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC32} ${PPC64} ${RISCV32} ${RISCV64} ${VE} ${HEXAGON} ${LOONGARCH64} - ${MIPS32} ${MIPS64} ${SPARC} ${SPARCV9}) + ${MIPS32} ${MIPS64} ${SPARC} ${SPARCV9} ${XTENSA}) include(CompilerRTUtils) From ba244a57b1a82dcd8ed9df2847aa8935b1724f32 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 15:11:36 +0300 Subject: [PATCH 095/289] [Xtensa] Implement asm macro for bbci/bbsi. Add bbci.l macro for bbci instructon and bbsi.l for bbsi. --- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 4 ++++ llvm/test/MC/Xtensa/Core/branch.s | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index a0df1d0259778..e44f126d69a15 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -412,6 +412,8 @@ def BBCI : RRI8_Inst<0x07, (outs), let imm8 = target; } +def : InstAlias<"bbci.l\t$s, $imm, $target", (BBCI AR:$s, uimm5:$imm, brtarget:$target)>; + def BBSI : RRI8_Inst<0x07, (outs), (ins AR:$s, uimm5:$imm, brtarget:$target), "bbsi\t$s, $imm, $target", []> { @@ -424,6 +426,8 @@ def BBSI : RRI8_Inst<0x07, (outs), let imm8 = target; } +def : InstAlias<"bbsi.l\t$s, $imm, $target", (BBSI AR:$s, uimm5:$imm, brtarget:$target)>; + //===----------------------------------------------------------------------===// // Call and jump instructions //===----------------------------------------------------------------------===// diff --git a/llvm/test/MC/Xtensa/Core/branch.s b/llvm/test/MC/Xtensa/Core/branch.s index 7e9ba5f713345..66b68a610c1d2 100644 --- a/llvm/test/MC/Xtensa/Core/branch.s +++ b/llvm/test/MC/Xtensa/Core/branch.s @@ -29,6 +29,10 @@ bbci a3, 16, LBL0 # CHECK: encoding: [0x07,0x73,A] bbci a3, (16), LBL0 +# CHECK-INST: bbci a3, 16, LBL0 +# CHECK: encoding: [0x07,0x73,A] +bbci.l a3, 16, LBL0 + # Instruction format RRI8 # CHECK-INST: bbs a12, a5, LBL0 # CHECK: encoding: [0x57,0xdc,A] @@ -39,6 +43,10 @@ bbs a12, a5, LBL0 # CHECK: encoding: [0x07,0xf3,A] bbsi a3, 16, LBL0 +# CHECK-INST: bbsi a3, 16, LBL0 +# CHECK: encoding: [0x07,0xf3,A] +bbsi.l a3, 16, LBL0 + # Instruction format RRI8 # CHECK-INST: bnall a7, a3, LBL0 # CHECK: encoding: [0x37,0xc7,A] From b2c2d254c4fc1fd0649e24a5c1de3ea9a6927dfa Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 17:26:56 +0300 Subject: [PATCH 096/289] [Xtensa] Implement support of region asm directives in asm parser. Implement support of the regions using ".begin" and ".end" directives, currently only "literal_prefix" region is supported. --- .../Xtensa/AsmParser/XtensaAsmParser.cpp | 105 ++++++++++++++++++ .../MCTargetDesc/XtensaTargetStreamer.cpp | 18 ++- .../MCTargetDesc/XtensaTargetStreamer.h | 5 + llvm/test/MC/Xtensa/directive-region.s | 25 +++++ 4 files changed, 151 insertions(+), 2 deletions(-) create mode 100644 llvm/test/MC/Xtensa/directive-region.s diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 648d58e8c18a4..776529b3c43d8 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -35,6 +35,21 @@ using namespace llvm; struct XtensaOperand; class XtensaAsmParser : public MCTargetAsmParser { + // Xtensa GNU assembler supports region definitions using + // .begin and .end directives. Currently only .literal_prefix regions are + // supported. + struct RegionInfo { + public: + SMLoc Loc; + StringRef RegionDirectiveName; + StringRef LiteralPrefixName; + RegionInfo() = default; + RegionInfo( SMLoc L, StringRef DirectiveName, StringRef Name = "") + : Loc(L), RegionDirectiveName(DirectiveName), LiteralPrefixName(Name) {} + }; + + // Stack of active region definitions. + SmallVector RegionInProgress; SMLoc getLoc() const { return getParser().getTok().getLoc(); } @@ -76,6 +91,8 @@ class XtensaAsmParser : public MCTargetAsmParser { } ParseStatus parsePCRelTarget(OperandVector &Operands); bool parseLiteralDirective(SMLoc L); + bool parseBeginDirective(SMLoc L); + bool parseEndDirective(SMLoc L); bool checkRegister(unsigned RegNo); @@ -958,6 +975,79 @@ bool XtensaAsmParser::parseLiteralDirective(SMLoc L) { return false; } +bool XtensaAsmParser::parseBeginDirective(SMLoc L) { + MCAsmParser &Parser = getParser(); + const MCExpr *Value; + SMLoc BeginLoc = getLexer().getLoc(); + XtensaTargetStreamer &TS = this->getTargetStreamer(); + + if (Parser.parseExpression(Value)) + return true; + + const MCSymbolRefExpr *SE = dyn_cast(Value); + if (!SE) + return Error(BeginLoc, "region option must be a symbol"); + + StringRef RegionDirectiveName = SE->getSymbol().getName(); + + if (RegionDirectiveName == "literal_prefix") { + + SMLoc OpcodeLoc = getLexer().getLoc(); + if (parseOptionalToken(AsmToken::EndOfStatement)) + return Error(OpcodeLoc, "expected literal section name"); + + if (Parser.parseExpression(Value)) + return true; + + OpcodeLoc = getLexer().getLoc(); + SE = dyn_cast(Value); + if (!SE) + return Error(OpcodeLoc, "literal_prefix name must be a symbol"); + + StringRef LiteralPrefixName = SE->getSymbol().getName(); + TS.setLiteralSectionPrefix(LiteralPrefixName); + RegionInProgress.emplace_back(BeginLoc, RegionDirectiveName, LiteralPrefixName); + } else { + return Error(BeginLoc, "unsupported region directive"); + } + + return false; +} + +bool XtensaAsmParser::parseEndDirective(SMLoc L) { + MCAsmParser &Parser = getParser(); + const MCExpr *Value; + SMLoc EndLoc = getLexer().getLoc(); + XtensaTargetStreamer &TS = this->getTargetStreamer(); + + if (Parser.parseExpression(Value)) + return true; + + const MCSymbolRefExpr *SE = dyn_cast(Value); + if (!SE) + return Error(EndLoc, "region option must be a symbol"); + + StringRef RegionDirectiveName = SE->getSymbol().getName(); + + if (RegionInProgress.empty()) + return Error(EndLoc, ".end of the region without .begin"); + else { + RegionInfo Region = RegionInProgress.pop_back_val(); + + if (RegionInProgress.empty()) + TS.setLiteralSectionPrefix(""); + else + TS.setLiteralSectionPrefix(Region.LiteralPrefixName); + + if (RegionDirectiveName != Region.RegionDirectiveName) { + return Error(EndLoc, ".end directive differs from .begin directive"); + } + } + + // Error: does not match begin literal_prefix + return false; +} + ParseStatus XtensaAsmParser::parseDirective(AsmToken DirectiveID) { StringRef IDVal = DirectiveID.getString(); SMLoc Loc = getLexer().getLoc(); @@ -972,6 +1062,21 @@ ParseStatus XtensaAsmParser::parseDirective(AsmToken DirectiveID) { return parseLiteralDirective(Loc); } + if (IDVal == ".literal") { + parseLiteralDirective(Loc); + return false; + } + + if (IDVal == ".begin") { + parseBeginDirective(Loc); + return false; + } + + if (IDVal == ".end") { + parseEndDirective(Loc); + return false; + } + return ParseStatus::NoMatch; } diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp index 0ea70cff4d404..43ca314f9d0c1 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp @@ -85,8 +85,15 @@ void XtensaTargetELFStreamer::emitLiteral(MCSymbol *LblSym, const MCExpr *Value, MCStreamer &OutStreamer = getStreamer(); if (SwitchLiteralSection) { MCContext &Context = OutStreamer.getContext(); + StringRef LiteralSectionPrefix = getLiteralSectionPrefix(); + std::string SectionName; + auto *CS = static_cast(OutStreamer.getCurrentSectionOnly()); - std::string SectionName = getLiteralSectionName(CS->getName()); + if (LiteralSectionPrefix != "") { + SectionName = LiteralSectionPrefix.str() + ".literal"; + } else { + SectionName = getLiteralSectionName(CS->getName()); + } MCSection *ConstSection = Context.getELFSection( SectionName, ELF::SHT_PROGBITS, ELF::SHF_EXECINSTR | ELF::SHF_ALLOC); @@ -106,7 +113,14 @@ void XtensaTargetELFStreamer::emitLiteral(MCSymbol *LblSym, const MCExpr *Value, void XtensaTargetELFStreamer::startLiteralSection(MCSection *BaseSection) { MCContext &Context = getStreamer().getContext(); - std::string SectionName = getLiteralSectionName(BaseSection->getName()); + StringRef LiteralSectionPrefix = getLiteralSectionPrefix(); + std::string SectionName; + + if (LiteralSectionPrefix != "") { + SectionName = LiteralSectionPrefix.str() + ".literal"; + } else { + SectionName = getLiteralSectionName(BaseSection->getName()); + } MCSection *ConstSection = Context.getELFSection( SectionName, ELF::SHT_PROGBITS, ELF::SHF_EXECINSTR | ELF::SHF_ALLOC); diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h index 817940e880b3c..73df9c25d28e8 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h @@ -17,6 +17,7 @@ namespace llvm { class formatted_raw_ostream; class XtensaTargetStreamer : public MCTargetStreamer { + StringRef LiteralSectionPrefix = ""; public: XtensaTargetStreamer(MCStreamer &S); @@ -31,6 +32,10 @@ class XtensaTargetStreamer : public MCTargetStreamer { // Switch to the literal section. The BaseSection name is used to construct // literal section name. virtual void startLiteralSection(MCSection *BaseSection) = 0; + + void setLiteralSectionPrefix(StringRef Name) { LiteralSectionPrefix = Name; } + + StringRef getLiteralSectionPrefix() { return LiteralSectionPrefix; } }; class XtensaTargetAsmStreamer : public XtensaTargetStreamer { diff --git a/llvm/test/MC/Xtensa/directive-region.s b/llvm/test/MC/Xtensa/directive-region.s new file mode 100644 index 0000000000000..515e5040e130b --- /dev/null +++ b/llvm/test/MC/Xtensa/directive-region.s @@ -0,0 +1,25 @@ +# RUN: llvm-mc -triple xtensa-esp-elf -filetype obj -o - %s \ +# RUN: | llvm-readobj -S --sd - \ +# RUN: | FileCheck %s + + .text + .begin literal_prefix .ExceptionVector + .literal_position + .literal .LCPI0_0, 305419896 + .global test_literal + .p2align 2 + .type test_literal,@function +test_literal: +# %bb.0: + entry a1, 32 + mov.n a7, a1 + l32r a2, .LCPI0_0 + retw.n + .end literal_prefix + +# CHECK: Section { +# CHECK: Name: .ExceptionVector.literal +# CHECK: SectionData ( +# CHECK: 0000: 78563412 +# CHECK: ) +# CHECK: } From 6beb5f2e0b5cf576d9b4c8918de92c3356ae0537 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 17:33:01 +0300 Subject: [PATCH 097/289] [Xtensa] Corrected asm parser. --- .../Xtensa/AsmParser/XtensaAsmParser.cpp | 67 ++++++++++++++++--- .../Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp | 4 +- llvm/test/MC/Xtensa/Core/arith.s | 10 +++ llvm/test/MC/Xtensa/Core/invalid.s | 10 +-- llvm/test/MC/Xtensa/Core/processor-control.s | 4 ++ 5 files changed, 80 insertions(+), 15 deletions(-) diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 776529b3c43d8..080dd0521c060 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -107,6 +107,10 @@ class XtensaAsmParser : public MCTargetAsmParser { XtensaAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, const MCInstrInfo &MII, const MCTargetOptions &Options) : MCTargetAsmParser(Options, STI, MII) { + Parser.addAliasForDirective(".half", ".2byte"); + Parser.addAliasForDirective(".hword", ".2byte"); + Parser.addAliasForDirective(".word", ".4byte"); + Parser.addAliasForDirective(".dword", ".8byte"); setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); } @@ -252,7 +256,10 @@ struct XtensaOperand : public MCParsedAsmOperand { return Kind == Immediate && inRange(getImm(), MinValue, MaxValue); } - bool isImm8() const { return isImm(-128, 127); } + bool isImm8() const { + //The addi instruction maybe expaned to addmi and addi. + return isImm((-32768 - 128), (32512 + 127)); + } bool isImm8_sh8() const { return isImm(-32768, 32512) && @@ -483,6 +490,40 @@ bool XtensaAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, Inst.setLoc(IDLoc); const unsigned Opcode = Inst.getOpcode(); switch (Opcode) { + case Xtensa::ADDI: { + int64_t Imm = Inst.getOperand(2).getImm(); + // Expand 16-bit immediate in ADDI instruction: + // ADDI rd, rs, imm - > ADMI rd, rs, (imm & 0xff00); ADDI rd, rd, (imm & 0xff) + if ((Imm < -128) || (Imm > 127)) { + unsigned DReg = Inst.getOperand(0).getReg(); + unsigned SReg = Inst.getOperand(1).getReg(); + MCInst ADDMIInst; + MCInst ADDIInst; + int64_t ImmHi = Imm & (~((uint64_t)0xff)); + int64_t ImmLo = Imm & 0xff; + + if (ImmLo > 127) { + ImmHi += 0x100; + ImmLo = ImmLo - 0x100; + } + + ADDMIInst.setOpcode(Xtensa::ADDMI); + ADDMIInst.addOperand(MCOperand::createReg(DReg)); + ADDMIInst.addOperand(MCOperand::createReg(SReg)); + ADDMIInst.addOperand(MCOperand::createImm(ImmHi)); + ADDMIInst.setLoc(IDLoc); + + Out.emitInstruction(ADDMIInst, *STI); + + ADDIInst.setOpcode(Xtensa::ADDI); + ADDIInst.addOperand(MCOperand::createReg(DReg)); + ADDIInst.addOperand(MCOperand::createReg(DReg)); + ADDIInst.addOperand(MCOperand::createImm(ImmLo)); + ADDIInst.setLoc(IDLoc); + + Inst = ADDIInst; + } + } break; case Xtensa::L32R: { const MCSymbolRefExpr *OpExpr = static_cast(Inst.getOperand(1).getExpr()); @@ -575,7 +616,7 @@ bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, } case Match_InvalidImm8: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), - "expected immediate in range [-128, 127]"); + "expected immediate in range [-32896, 32639]"); case Match_InvalidImm8_sh8: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [-32768, 32512], first 8 bits " @@ -690,6 +731,7 @@ ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, AsmToken Buf[2]; std::string RegName = ""; int64_t Num; + bool IsIdentifier = false; // If this a parenthesised register name is allowed, parse it atomically if (AllowParens && getLexer().is(AsmToken::LParen)) { @@ -708,10 +750,17 @@ ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, default: return ParseStatus::NoMatch; case AsmToken::Integer: + case AsmToken::LParen: if ((!SR) && (!UR)) return ParseStatus::NoMatch; + const MCExpr *Res; + + if (getParser().parseExpression(Res)) + return ParseStatus::Failure; + + if (!Res->evaluateAsAbsolute(Num)) + return ParseStatus::NoMatch; - Num = getLexer().getTok().getIntVal(); // Parse case when we expect UR operand as special case, // because SR and UR registers may have the same number // and such situation may lead to confilct @@ -739,6 +788,7 @@ ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, RegNo = MatchRegisterAltName(RegName); break; case AsmToken::Identifier: + IsIdentifier = true; RegName = getLexer().getTok().getIdentifier().str(); RegNo = MatchRegisterName(RegName); if (RegNo == 0) @@ -760,7 +810,10 @@ ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, Operands.push_back(XtensaOperand::createToken("(", FirstS)); SMLoc S = getLoc(); SMLoc E = getParser().getTok().getEndLoc(); - getLexer().Lex(); + + if (IsIdentifier) + getLexer().Lex(); + Operands.push_back(XtensaOperand::createReg(RegNo, S, E)); if (HadParens) { @@ -789,12 +842,8 @@ ParseStatus XtensaAsmParser::parseImmediate(OperandVector &Operands) { return ParseStatus::Failure; break; case AsmToken::Identifier: { - StringRef Identifier; - if (getParser().parseIdentifier(Identifier)) + if (getParser().parseExpression(Res)) return ParseStatus::Failure; - - MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier); - Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext()); break; } case AsmToken::Percent: diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp index 28764d369247a..4537369b017d0 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp @@ -23,10 +23,12 @@ XtensaMCAsmInfo::XtensaMCAsmInfo(const Triple &TT) { PrivateGlobalPrefix = ".L"; CommentString = "#"; ZeroDirective = "\t.space\t"; + Data16bitsDirective = "\t.half\t"; + Data32bitsDirective = "\t.word\t"; Data64bitsDirective = "\t.quad\t"; GlobalDirective = "\t.global\t"; UsesELFSectionDirectiveForBSS = true; SupportsDebugInformation = true; ExceptionsType = ExceptionHandling::DwarfCFI; - AlignmentIsInBytes = false; + AlignmentIsInBytes = true; } diff --git a/llvm/test/MC/Xtensa/Core/arith.s b/llvm/test/MC/Xtensa/Core/arith.s index 2f3146b9533c9..fb6ac2608b0a9 100644 --- a/llvm/test/MC/Xtensa/Core/arith.s +++ b/llvm/test/MC/Xtensa/Core/arith.s @@ -27,6 +27,16 @@ addi a8, sp, -128 # CHECK-INST: addi a8, a1, -12 # CHECK: encoding: [0x82,0xc1,0xf4] addi a8, a1, -12 +# CHECK-INST: addmi a8, a1, 256 +# CHECK: encoding: [0x82,0xd1,0x01] +# CHECK-INST: addi a8, a8, 0 +# CHECK: encoding: [0x82,0xc8,0x00] +addi a8, a1, 256 +# CHECK-INST: addmi a8, a1, -9984 +# CHECK: encoding: [0x82,0xd1,0xd9] +# CHECK-INST: addi a8, a8, -16 +# CHECK: encoding: [0x82,0xc8,0xf0] +addi a8, a1, -10000 # Instruction format RRI8 # CHECK-INST: addmi a1, a2, 32512 diff --git a/llvm/test/MC/Xtensa/Core/invalid.s b/llvm/test/MC/Xtensa/Core/invalid.s index c7473e90c10ba..7fc7b47db1337 100644 --- a/llvm/test/MC/Xtensa/Core/invalid.s +++ b/llvm/test/MC/Xtensa/Core/invalid.s @@ -5,12 +5,12 @@ LBL0: # Out of range immediates # imm8 -addi a1, a2, 300 -# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [-128, 127] +addi a1, a2, -33000 +# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [-32896, 32639] # imm8 -addi a1, a2, -129 -# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [-128, 127] +addi a1, a2, 34000 +# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [-32896, 32639] # imm8_sh8 addmi a1, a2, 33 @@ -66,7 +66,7 @@ aaa a10, a12 and sp, a2, 10 # CHECK: :[[#@LINE-1]]:13: error: invalid operand for instruction addi sp, a1, a2 -# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [-128, 127] +# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [-32896, 32639] # Check invalid register names for different formats # Instruction format RRR diff --git a/llvm/test/MC/Xtensa/Core/processor-control.s b/llvm/test/MC/Xtensa/Core/processor-control.s index ebbc577db7722..6295786dfb61a 100644 --- a/llvm/test/MC/Xtensa/Core/processor-control.s +++ b/llvm/test/MC/Xtensa/Core/processor-control.s @@ -68,6 +68,10 @@ wsr.sar a8 # CHECK: encoding: [0x80,0x03,0x13] wsr a8, 3 +# CHECK-INST: wsr a8, sar +# CHECK: encoding: [0x80,0x03,0x13] +wsr a8, (2 + 1) + # Instruction format RRR # CHECK-INST: xsr a8, sar # CHECK: encoding: [0x80,0x03,0x61] From e6e04f0b0f1e5db73399596369c0447a70a88fb1 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 27 Sep 2023 01:19:34 +0300 Subject: [PATCH 098/289] riscv/gnu: Adds `no-rtti` multilib support --- clang/lib/Driver/ToolChains/Gnu.cpp | 41 ++++++++++++++++--- .../8.4.0/rv32i/ilp32/no-rtti/crtbegin.o | 0 .../8.4.0/rv32i/ilp32/no-rtti/crtend.o | 0 .../8.4.0/rv32imac/ilp32/no-rtti/crtbegin.o | 0 .../8.4.0/rv32imac/ilp32/no-rtti/crtend.o | 0 .../8.4.0/rv32imafc/ilp32f/no-rtti/crtbegin.o | 0 .../8.4.0/rv32imafc/ilp32f/no-rtti/crtend.o | 0 .../8.4.0/rv32imc/ilp32/no-rtti/crtbegin.o | 0 .../8.4.0/rv32imc/ilp32/no-rtti/crtend.o | 0 .../lib/rv32i/ilp32/no-rtti/crt0.o | 0 .../lib/rv32imac/ilp32/no-rtti/crt0.o | 0 .../lib/rv32imafc/ilp32f/no-rtti/crt0.o | 0 .../lib/rv32imc/ilp32/no-rtti/crt0.o | 0 clang/test/Driver/riscv32-esp-toolchain.c | 19 +++++++++ 14 files changed, 54 insertions(+), 6 deletions(-) create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/no-rtti/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/no-rtti/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/no-rtti/crt0.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/no-rtti/crt0.o diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index d36dff64ad28a..2f52884ccbfcb 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -1864,14 +1864,37 @@ static void findRISCVBareMetalMultilibs(const Driver &D, if (TargetTriple.getVendor() == llvm::Triple::Espressif) Ms.emplace_back(MultilibBuilder()); + if (TargetTriple.getVendor() == llvm::Triple::Espressif) { + Ms.emplace_back(MultilibBuilder()); + Ms.emplace_back(MultilibBuilder("no-rtti") + .flag("-fno-rtti") + .flag("-frtti", /*Disallow=*/true)); + } for (auto Element : RISCVMultilibSet) { - // multilib path rule is ${march}/${mabi} - Ms.emplace_back( - MultilibBuilder( - (Twine(Element.march) + "/" + Twine(Element.mabi)).str()) - .flag(Twine("-march=", Element.march).str()) - .flag(Twine("-mabi=", Element.mabi).str())); + if (TargetTriple.getVendor() == llvm::Triple::Espressif) { + // multilib path rule is ${march}/${mabi} + Ms.emplace_back( + MultilibBuilder( + (Twine(Element.march) + "/" + Twine(Element.mabi)).str()) + .flag(Twine("-march=", Element.march).str()) + .flag(Twine("-mabi=", Element.mabi).str())); + /* no-rtti version for every ${march}/${mabi} */ + Ms.emplace_back( + MultilibBuilder( + (Twine(Element.march) + "/" + Twine(Element.mabi) + "/no-rtti").str()) + .flag(Twine("-march=", Element.march).str()) + .flag(Twine("-mabi=", Element.mabi).str()) + .flag("-fno-rtti") + .flag("-frtti", /*Disallow=*/true)); + } else { + // multilib path rule is ${march}/${mabi} + Ms.emplace_back( + MultilibBuilder( + (Twine(Element.march) + "/" + Twine(Element.mabi)).str()) + .flag(Twine("-march=", Element.march).str()) + .flag(Twine("-mabi=", Element.mabi).str())); + } } MultilibSet RISCVMultilibs = MultilibSetBuilder() @@ -1907,6 +1930,12 @@ static void findRISCVBareMetalMultilibs(const Driver &D, } } + if (TargetTriple.getVendor() == llvm::Triple::Espressif) { + addMultilibFlag( + Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, true), "frtti", + Flags); + } + if (selectRISCVMultilib(RISCVMultilibs, MArch, Flags, Result.SelectedMultilibs)) Result.Multilibs = RISCVMultilibs; diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/no-rtti/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/no-rtti/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/no-rtti/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/no-rtti/crt0.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/riscv32-esp-toolchain.c b/clang/test/Driver/riscv32-esp-toolchain.c index 5c34c0a3bf7a1..34ef6871f30f0 100644 --- a/clang/test/Driver/riscv32-esp-toolchain.c +++ b/clang/test/Driver/riscv32-esp-toolchain.c @@ -81,6 +81,25 @@ // CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-lstdc++" "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" // CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtend.o" +// RUN: %clangxx %s -### -no-canonical-prefixes -target riscv32-esp-elf \ +// RUN: -ffreestanding -stdlib=libstdc++ --rtlib=libgcc --ld-path=riscv32-esp-elf-ld \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk \ +// RUN: --sysroot=%S/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf \ +// RUN: -fno-rtti 2>&1 \ +// RUN: | FileCheck -check-prefix=CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32 %s + +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-internal-isystem" "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/include/c++{{/|\\\\}}8.4.0" +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-mabi" "ilp32" "-march" "rv32imac" +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "--sysroot={{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf" +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-m" "elf32lriscv" +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti{{/|\\\\}}crtbegin.o" +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib" +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-lstdc++" "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" +// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti{{/|\\\\}}crtend.o" + // RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ // RUN: -march=rv32i -mabi=ilp32 \ // RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld --sysroot= \ From 0824de3e8e5f6e294227f9b3598914714b6bbc7c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 18 Sep 2024 18:32:10 +0300 Subject: [PATCH 099/289] esp/ci: Adds MacOS x86_64/ARM64 --- .gitlab-ci.yml | 165 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 127 insertions(+), 38 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ef0811d9a8ded..5ce8137132c93 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,19 +4,28 @@ stages: - test - public_deploy -image: ${CI_DOCKER_REGISTRY}/llvm-build:3 +image: ${CI_DOCKER_REGISTRY}/llvm-build:4 variables: - CONF_TARGET: "xtensa-esp32-elf" - XTENSA_CLANG_TOOLCHAIN: "${CONF_TARGET}-clang" + # move all these to CI/CD settings + GCC_REL_NAME: "esp-2022r1-RC1" + GCC_REL_VER: "gcc11_2_0" + NEWLIB_REPO: "newlib-cygwin" + NEWLIB_REF: "esp_based_on_4_1_0" + BINUTILS_REPO: "binutils-gdb" + BINUTILS_REF: "esp_based_on_binutils-2_35" + XTENSA_OVERLAYS_REPO: "xtensa-overlays" + XTENSA_OVERLAYS_REF: "master" + # TODO: update vars below to tags names after related branches are merged in those repos + XTENSA_CLANG_TOOLCHAIN_REF: "build_macos_arm64" + LLVM_GCC_TESTSUITE_REF: "feature/toolchain_build_script" + + PLATFORM_NAME_LINUX_ARM64: "linux-arm64" PLATFORM_NAME_LINUX: "linux-amd64" PLATFORM_NAME_WIN: "win64" PLATFORM_NAME_MACOS: "macos" - XTENSA_CLANG_TOOLCHAIN_BRANCH: "esp-20220415-r14.0.0" - GCC_REL_NAME: "gcc8_4_0-esp-2021r2-patch3" - ARCHIVE_TOOL_LINUX: "tar -cJf" UNARCHIVE_TOOL_LINUX: "tar -xf" ARCHIVE_EXT_LINUX: "tar.xz" @@ -30,6 +39,16 @@ variables: ARCHIVE_EXT_MACOS: "tar.xz" DIST_DIR: "dist" + BUILD_DIR: "_build" + DOWNLOADS_DIR: "_downloads" + +########################################################################### +#################### START OF TEMPORARY LEGACY CODE ####################### +# TODO: the code below is to be removed after migration to new build script + CONF_TARGET: "xtensa-esp32-elf" + XTENSA_CLANG_TOOLCHAIN: "${CONF_TARGET}-clang" +##################### END OF TEMPORARY LEGACY CODE ######################## +########################################################################### .use_ci_tools: &use_ci_tools | curl -sSL ${CIT_LOADER_URL} -o cit_loader.sh && sh cit_loader.sh @@ -49,27 +68,35 @@ before_script: REL_SFX="llvm15_0_0" REL_NAME=${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} - echo "CONF_TARGET: $CONF_TARGET" echo "PLATFORM_NAME: $PLATFORM_NAME" echo "REL_NUM: $REL_NUM" echo "REL_NAME: $REL_NAME" echo "ARCHIVE_NAME: $ARCHIVE_NAME" -# Get an existing crosstool-ng build for esp32 +# Get an existing crosstool-ng builds for all chips .get_gcc_toolchain: &get_gcc_toolchain | - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-2021r2-patch3/${XTENSA_GCC_TOOLCHAIN} - ${UNARCHIVE_TOOL} ${XTENSA_GCC_TOOLCHAIN} - mv xtensa-esp32-elf ${XTENSA_CLANG_TOOLCHAIN} + declare -a XTENSA_CPUS=("esp32" + "esp32s2" + "esp32s3") + for ((i = 0; i < ${#XTENSA_CPUS[@]}; i++)); do + XTENSA_CPU=${XTENSA_CPUS[$i]} + GCC_TOOLCHAIN_ARCH=xtensa-${XTENSA_CPU}-elf-${GCC_REL_VER}-${GCC_REL_NAME}-${PLATFORM_NAME}.${ARCHIVE_EXT} + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/${GCC_REL_NAME}/${GCC_TOOLCHAIN_ARCH} + ${UNARCHIVE_TOOL} ${GCC_TOOLCHAIN_ARCH} + done; + GCC_TOOLCHAIN_ARCH=riscv32-esp-elf-${GCC_REL_VER}-${GCC_REL_NAME}-${PLATFORM_NAME}.${ARCHIVE_EXT} + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/${GCC_REL_NAME}/${GCC_TOOLCHAIN_ARCH} + ${UNARCHIVE_TOOL} ${GCC_TOOLCHAIN_ARCH} .get_clang_toolchain_build_scripts: &get_clang_toolchain_build_scripts | - git clone -b ${XTENSA_CLANG_TOOLCHAIN_BRANCH} ${GITLAB_SSH_SERVER}/${XTENSA_CLANG_TOOLCHAIN_REPO} + git clone -b ${XTENSA_CLANG_TOOLCHAIN_REF} ${GITLAB_SSH_SERVER}/${XTENSA_CLANG_TOOLCHAIN_REPO} cp -r xtensa-clang-toolchain/* . # LLVM Build System used the remote address to show detailed version info, we'll change it to the public repository .fix_origin_remote_for_public: &fix_origin_remote_for_public | git remote set-url origin "${GH_REPO_HTTPS}" - # Pack the toolchain +# Pack the toolchain .package_toolchain: &package_toolchain | ${ARCHIVE_TOOL} ${ARCHIVE_NAME} esp-clang/ mkdir -p ${DISTRO_DIR} @@ -83,8 +110,10 @@ before_script: artifacts: paths: - ${DIST_DIR}/ + - newlib/ - ${BUILD_DIR}/clang_tests.log - ${BUILD_DIR}/clang_build.log + - ${BUILD_DIR}/newlib_build.log when: always expire_in: 1 day variables: @@ -98,8 +127,6 @@ before_script: - pushd ${DOWNLOADS_DIR} - export ESP_GCC_TOOLCHAIN_DIST_BASE=$PWD - *get_gcc_toolchain - - git clone -b ${NEWLIB_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${NEWLIB_REPO}.git - - export NEWLIB_PATH=$PWD/${NEWLIB_REPO} - git clone -b ${BINUTILS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${BINUTILS_REPO}.git - export BINUTILS_PATH=$PWD/${BINUTILS_REPO} - git clone -b ${XTENSA_OVERLAYS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${XTENSA_OVERLAYS_REPO}.git @@ -112,15 +139,38 @@ before_script: - export BUILD_PATH=$PWD/${BUILD_DIR} - mkdir -p ${BUILD_PATH} - export USE_PARALLEL_LINK_JOBS=2 - - ${BUILD_TOOLCHAIN_CMD} --llvm-path=${LLVM_PROJECT_PATH} --newlib-path=${NEWLIB_PATH} + # build Clang toolchain w/o newlib + - ${BUILD_TOOLCHAIN_CMD} --llvm-path=${LLVM_PROJECT_PATH} --gcc-toolchains-path=${ESP_GCC_TOOLCHAIN_DIST_BASE} --binutils-path=${BINUTILS_PATH} - --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} ${BUILD_TOOLCHAIN_CMD_ARGS} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/clang_build.log + --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} --host=${CONF_HOST} ${BUILD_TOOLCHAIN_CMD_ARGS} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/clang_build.log + # use just built Clang to build newlib + - export PATH=${BUILD_PATH}/esp-clang/bin:$PATH + - export BUILD_HOST=$(gcc -dumpmachine) + - export NEWLIB_OVERLAY_DISTRO_PATH=$PWD/newlib; + # build newlib overlay using ESP native (Linux) clang toolchain only + # it will be re-used for cross-buit toolchains (win and mac). + # FIXME: it would be good to move newlib overlay build to separate job and have job sequence like + # clang_linux_wo_newlib -> newlib_overlay -> clang_linux_full(copy newlib) -> clang_linux_unittests + # but we need full native (Linux) toolchain to run unittests and unittests need clang build dir. + # clang build dir may occupy about 2GB, so it looks too heavy to pass it as artifact + - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then + export BUILD_NEWLIB_PATH=${BUILD_PATH}/newlib; + mkdir -p ${NEWLIB_OVERLAY_DISTRO_PATH}; + git clone -b ${NEWLIB_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${NEWLIB_REPO}.git; + export NEWLIB_PATH=$PWD/${NEWLIB_REPO}; + ./build-toolchain.sh --newlib-path=${NEWLIB_PATH} --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} ${BUILD_NEWLIB_PATH} 2>&1 > ${BUILD_PATH}/newlib_build.log; + pushd ${BUILD_NEWLIB_PATH}; + ${ARCHIVE_TOOL_LINUX} ${NEWLIB_OVERLAY_DISTRO_PATH}/esp-clang-newlib-overlay.${ARCHIVE_EXT_LINUX} esp-clang/; + popd; + fi + - ${UNARCHIVE_TOOL_LINUX} ${NEWLIB_OVERLAY_DISTRO_PATH}/esp-clang-newlib-overlay.${ARCHIVE_EXT_LINUX} -C ${BUILD_PATH} + # strip binutils afer newlib is built + - STRIP_BINUTILS=YES ./build-toolchain.sh --host=${CONF_HOST} ${BUILD_PATH} # Run unit tests for native build only. # Run as non-root user because permission tests fail when run by root. - - export BUILD_HOST=$(gcc -dumpmachine) - - export LLVM_BUILD_PATH=${LLVM_PROJECT_PATH}/llvm/build-Release-${CONF_HOST} - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then - echo "Run unit tests for native build"; + export LLVM_BUILD_PATH=${LLVM_PROJECT_PATH}/llvm/build-Release-${CONF_HOST}; + echo "Run unit tests for native build ib ${LLVM_BUILD_PATH}"; useradd -m test_runner; chown -R test_runner ${LLVM_BUILD_PATH}; touch ${BUILD_PATH}/clang_tests.log; @@ -142,7 +192,7 @@ build_x86_64-linux-gnu: ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" -build_x86_64-w64-mingw32: +.build_x86_64-w64-mingw32: extends: .build_template needs: - job: build_x86_64-linux-gnu @@ -165,7 +215,7 @@ build_x86_64-w64-mingw32: - rm -rf ${DIST_DIR} - rm -rf ${BUILD_DIR} # add build command args speciifc for Windows build - - export BUILD_TOOLCHAIN_CMD_ARGS="--host=${CONF_HOST} --native-esp-clang-path=$PWD/esp-clang-${PLATFORM_NAME_LINUX}" + - export BUILD_TOOLCHAIN_CMD_ARGS="--native-esp-clang-path=$PWD/esp-clang-${PLATFORM_NAME_LINUX}" variables: CONF_HOST: "x86_64-w64-mingw32" PLATFORM_NAME: "${PLATFORM_NAME_WIN}" @@ -174,7 +224,30 @@ build_x86_64-w64-mingw32: ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" -test_x86_64-linux-gnu: +.build_apple-darwin_template: + extends: .build_template + needs: + - job: build_x86_64-linux-gnu + variables: + PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" + ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + GCC_ARCHIVE_EXT: "${GCC_ARCHIVE_EXT_MACOS}" + +build_x86_64-apple-darwin: + extends: .build_apple-darwin_template + variables: + CONF_HOST: "x86_64-apple-darwin21.1" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-macos.sh --host-arch=x86_64" + +build_aarch64-apple-darwin: + extends: .build_apple-darwin_template + variables: + CONF_HOST: "aarch64-apple-darwin21.1" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-macos.sh --host-arch=aarch64" + +.test_x86_64-linux-gnu: stage: test tags: [ "amd64", "build" ] allow_failure: true @@ -233,7 +306,7 @@ test_x86_64-linux-gnu: mv ${ARCHIVE_NAME} ${DIST_DIR}/ echo "${ARCHIVE_NAME}" > ${DIST_DIR}/file_${PLATFORM_NAME}_${CONF_TARGET} -.build_template: +.build_template_old: stage: build tags: [ "amd64", "build" ] artifacts: @@ -245,26 +318,36 @@ test_x86_64-linux-gnu: XTENSA_CLANG_TOOLCHAIN_REF: "release_esp32_clang_15.0.0_gcc_8.4.0" GCC_REL_NAME: "gcc8_4_0-esp-2021r2-patch3" script: - - *get_release_name - - *get_gcc_toolchain + - *get_release_name_old + - *get_gcc_toolchain_old - *fix_origin_remote_for_public - *get_clang_toolchain_build_scripts - ${BUILD_TOOLCHAIN_CMD} "${XTENSA_CLANG_TOOLCHAIN}" - - *package_toolchain + - *package_toolchain_old linux_amd64_build: - extends: .build_template + extends: .build_template_old variables: PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - # a filename was moved here from the global 'variables:' because of GCC_REL_NAME value couldn't be expanded and substituted there XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz" BUILD_TOOLCHAIN_CMD: "./build-toolchain-linux.sh" +linux_arm64_build: + extends: .build_template_old + image: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-linux-arm64.tar.gz" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-linux-arm64.sh" + win64_build: - extends: .build_template + extends: .build_template_old variables: PLATFORM_NAME: "${PLATFORM_NAME_WIN}" ARCHIVE_TOOL: "${ARCHIVE_TOOL_WIN}" @@ -274,7 +357,7 @@ win64_build: BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" macos_amd64_build: - extends: .build_template + extends: .build_template_old variables: PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" @@ -286,22 +369,22 @@ macos_amd64_build: linux_amd64_testsuite: stage: test tags: [ "amd64", "build" ] - dependencies: - - linux_amd64_build + needs: + - job: linux_amd64_build variables: PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" script: - - *get_release_name + - *get_release_name_old - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} - # getting testsuit - - git clone -b feature/ci_llvm_multitarget --depth 1 $GITLAB_SSH_SERVER/idf/llvm-xtensa-testsuite.git + # getting testsuite + - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/llvm-xtensa-testsuite.git - # preparing testsuit - - export PATH=$PATH:${PWD}/${XTENSA_CLANG_TOOLCHAIN}/bin/ + # preparing testsuite + - export PATH=${PWD}/${XTENSA_CLANG_TOOLCHAIN}/bin/:$PATH - cd llvm-xtensa-testsuite # qemu @@ -310,6 +393,12 @@ linux_amd64_testsuite: # run testsuite for esp32 - ./run_esp32_tests.sh + # run testsuite for compiler_rt library + - ./run_esp32_crt_tests.sh ../$XTENSA_CLANG_TOOLCHAIN + +##################### END OF TEMPORARY LEGACY CODE ######################## +########################################################################### + upload_to_http: stage: private_deploy when: manual From eae1fc81c816c28705c7015c6d96776ececee3e5 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:11 +0300 Subject: [PATCH 100/289] esp/ci: Adds minimal distro with libraries/headres only --- .gitlab-ci.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5ce8137132c93..6f616b8865638 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -9,6 +9,8 @@ image: ${CI_DOCKER_REGISTRY}/llvm-build:4 variables: # move all these to CI/CD settings + REL_SFX: "llvm14_0_0" + CLANG_VER: "14.0.0" GCC_REL_NAME: "esp-2022r1-RC1" GCC_REL_VER: "gcc11_2_0" NEWLIB_REPO: "newlib-cygwin" @@ -65,9 +67,9 @@ before_script: .get_release_name: &get_release_name | # using annotated tags REL_NUM=$(git describe --abbrev=7) - REL_SFX="llvm15_0_0" REL_NAME=${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} + LIBS_ARCHIVE_NAME=libs_${REL_NAME}.${ARCHIVE_EXT} echo "PLATFORM_NAME: $PLATFORM_NAME" echo "REL_NUM: $REL_NUM" echo "REL_NAME: $REL_NAME" @@ -103,6 +105,13 @@ before_script: mv ${ARCHIVE_NAME} ${DISTRO_DIR}/ echo "${ARCHIVE_NAME}" > ${DISTRO_DIR}/file_${PLATFORM_NAME} +# Pack libs to be used for Rust, Go etc. +.package_libs: &package_libs | + ${ARCHIVE_TOOL} ${LIBS_ARCHIVE_NAME} esp-clang/lib/libclang* esp-clang/lib/clang/${CLANG_VER}/include + mkdir -p ${DISTRO_DIR} + mv ${LIBS_ARCHIVE_NAME} ${DISTRO_DIR}/ + echo "${LIBS_ARCHIVE_NAME}" > ${DISTRO_DIR}/file_lib${PLATFORM_NAME} + .build_template: stage: build tags: [ "amd64", "build" ] @@ -115,7 +124,7 @@ before_script: - ${BUILD_DIR}/clang_build.log - ${BUILD_DIR}/newlib_build.log when: always - expire_in: 1 day + expire_in: 3 day variables: BUILD_TOOLCHAIN_CMD_ARGS: "" # use separate dist dir for universal toolchain @@ -180,6 +189,7 @@ before_script: - export DISTRO_DIR=$PWD/$DIST_DIR - pushd ${BUILD_PATH} - *package_toolchain + - *package_libs - popd build_x86_64-linux-gnu: From 8b57b010cb8a10bfc7d112e70e853717ae8567c6 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:12 +0300 Subject: [PATCH 101/289] esp/ci: Upgrade GCC toolchain to `esp-2022r1` --- .gitlab-ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6f616b8865638..70f206d102ea9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -11,12 +11,12 @@ variables: # move all these to CI/CD settings REL_SFX: "llvm14_0_0" CLANG_VER: "14.0.0" - GCC_REL_NAME: "esp-2022r1-RC1" + GCC_REL_NAME: "esp-2022r1" GCC_REL_VER: "gcc11_2_0" NEWLIB_REPO: "newlib-cygwin" - NEWLIB_REF: "esp_based_on_4_1_0" + NEWLIB_REF: "esp-2022r1" BINUTILS_REPO: "binutils-gdb" - BINUTILS_REF: "esp_based_on_binutils-2_35" + BINUTILS_REF: "esp-2022r1-binutils" XTENSA_OVERLAYS_REPO: "xtensa-overlays" XTENSA_OVERLAYS_REF: "master" # TODO: update vars below to tags names after related branches are merged in those repos From 3202cc9c0203684c6c3a959a9f6ec1b041c9920c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:12 +0300 Subject: [PATCH 102/289] esp/ci: Move newlib build to separate job --- .gitlab-ci.yml | 248 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 180 insertions(+), 68 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 70f206d102ea9..f3fadafed1fcd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,5 +1,6 @@ stages: - build + - pack - private_deploy - test - public_deploy @@ -19,14 +20,16 @@ variables: BINUTILS_REF: "esp-2022r1-binutils" XTENSA_OVERLAYS_REPO: "xtensa-overlays" XTENSA_OVERLAYS_REF: "master" - # TODO: update vars below to tags names after related branches are merged in those repos + LLVM_GCC_TESTSUITE_REF: "release_universal_clang_toolchain" + # TODO: update var below to tags names after related branches are merged in those repos + # XTENSA_CLANG_TOOLCHAIN_REF: "release_universal_clang_toolchain" XTENSA_CLANG_TOOLCHAIN_REF: "build_macos_arm64" - LLVM_GCC_TESTSUITE_REF: "feature/toolchain_build_script" PLATFORM_NAME_LINUX_ARM64: "linux-arm64" PLATFORM_NAME_LINUX: "linux-amd64" PLATFORM_NAME_WIN: "win64" PLATFORM_NAME_MACOS: "macos" + PLATFORM_NAME_MACOS_ARM64: "macos-arm64" ARCHIVE_TOOL_LINUX: "tar -cJf" UNARCHIVE_TOOL_LINUX: "tar -xf" @@ -40,6 +43,11 @@ variables: UNARCHIVE_TOOL_MACOS: "tar -xf" ARCHIVE_EXT_MACOS: "tar.xz" + ARCHIVE_TOOL_NEWLIB: ${ARCHIVE_TOOL_LINUX} + UNARCHIVE_TOOL_NEWLIB: ${UNARCHIVE_TOOL_LINUX} + ARCHIVE_EXT_NEWLIB: ${ARCHIVE_EXT_LINUX} + + DIST_NEW_DIR: "_dist_new" DIST_DIR: "dist" BUILD_DIR: "_build" DOWNLOADS_DIR: "_downloads" @@ -110,7 +118,19 @@ before_script: ${ARCHIVE_TOOL} ${LIBS_ARCHIVE_NAME} esp-clang/lib/libclang* esp-clang/lib/clang/${CLANG_VER}/include mkdir -p ${DISTRO_DIR} mv ${LIBS_ARCHIVE_NAME} ${DISTRO_DIR}/ - echo "${LIBS_ARCHIVE_NAME}" > ${DISTRO_DIR}/file_lib${PLATFORM_NAME} + echo "${LIBS_ARCHIVE_NAME}" > ${DISTRO_DIR}/file_libs-${PLATFORM_NAME} + +.get_binutils: &get_binutils | + git clone -b ${BINUTILS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${BINUTILS_REPO}.git + BINUTILS_PATH=$PWD/${BINUTILS_REPO} + +.get_xtensa_overlays: &get_xtensa_overlays | + git clone -b ${XTENSA_OVERLAYS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${XTENSA_OVERLAYS_REPO}.git + XTENSA_OVERLAYS_PATH=$PWD/${XTENSA_OVERLAYS_REPO} + +.get_newlib: &get_newlib | + git clone -b ${NEWLIB_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${NEWLIB_REPO}.git + NEWLIB_PATH=$PWD/${NEWLIB_REPO} .build_template: stage: build @@ -119,77 +139,49 @@ before_script: artifacts: paths: - ${DIST_DIR}/ - - newlib/ - - ${BUILD_DIR}/clang_tests.log - - ${BUILD_DIR}/clang_build.log - - ${BUILD_DIR}/newlib_build.log + - ${BUILD_DIR}/tests.log + - ${BUILD_DIR}/build.log when: always - expire_in: 3 day + expire_in: 1 day variables: - BUILD_TOOLCHAIN_CMD_ARGS: "" + BUILD_TOOLCHAIN_CMD_EXTRA_ARGS: "" # use separate dist dir for universal toolchain # TODO: remove this var after switching to universal toolchain builds - DIST_DIR: "dist_new" + DIST_DIR: ${DIST_NEW_DIR} script: - *get_release_name - mkdir ${DOWNLOADS_DIR} - pushd ${DOWNLOADS_DIR} - - export ESP_GCC_TOOLCHAIN_DIST_BASE=$PWD + - ESP_GCC_TOOLCHAIN_DIST_BASE=$PWD - *get_gcc_toolchain - - git clone -b ${BINUTILS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${BINUTILS_REPO}.git - - export BINUTILS_PATH=$PWD/${BINUTILS_REPO} - - git clone -b ${XTENSA_OVERLAYS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${XTENSA_OVERLAYS_REPO}.git - - export XTENSA_OVERLAYS_PATH=$PWD/${XTENSA_OVERLAYS_REPO} + - *get_binutils + - *get_xtensa_overlays - popd - *get_clang_toolchain_build_scripts - *fix_origin_remote_for_public - - export ESP_GCC_TOOLCHAIN_REL_VER=${GCC_REL_NAME} - - export LLVM_PROJECT_PATH=$PWD - - export BUILD_PATH=$PWD/${BUILD_DIR} + - LLVM_PROJECT_PATH=$PWD + - BUILD_PATH=$PWD/${BUILD_DIR} - mkdir -p ${BUILD_PATH} - export USE_PARALLEL_LINK_JOBS=2 # build Clang toolchain w/o newlib - ${BUILD_TOOLCHAIN_CMD} --llvm-path=${LLVM_PROJECT_PATH} --gcc-toolchains-path=${ESP_GCC_TOOLCHAIN_DIST_BASE} --binutils-path=${BINUTILS_PATH} - --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} --host=${CONF_HOST} ${BUILD_TOOLCHAIN_CMD_ARGS} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/clang_build.log - # use just built Clang to build newlib - - export PATH=${BUILD_PATH}/esp-clang/bin:$PATH - - export BUILD_HOST=$(gcc -dumpmachine) - - export NEWLIB_OVERLAY_DISTRO_PATH=$PWD/newlib; - # build newlib overlay using ESP native (Linux) clang toolchain only - # it will be re-used for cross-buit toolchains (win and mac). - # FIXME: it would be good to move newlib overlay build to separate job and have job sequence like - # clang_linux_wo_newlib -> newlib_overlay -> clang_linux_full(copy newlib) -> clang_linux_unittests - # but we need full native (Linux) toolchain to run unittests and unittests need clang build dir. - # clang build dir may occupy about 2GB, so it looks too heavy to pass it as artifact - - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then - export BUILD_NEWLIB_PATH=${BUILD_PATH}/newlib; - mkdir -p ${NEWLIB_OVERLAY_DISTRO_PATH}; - git clone -b ${NEWLIB_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${NEWLIB_REPO}.git; - export NEWLIB_PATH=$PWD/${NEWLIB_REPO}; - ./build-toolchain.sh --newlib-path=${NEWLIB_PATH} --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} ${BUILD_NEWLIB_PATH} 2>&1 > ${BUILD_PATH}/newlib_build.log; - pushd ${BUILD_NEWLIB_PATH}; - ${ARCHIVE_TOOL_LINUX} ${NEWLIB_OVERLAY_DISTRO_PATH}/esp-clang-newlib-overlay.${ARCHIVE_EXT_LINUX} esp-clang/; - popd; - fi - - ${UNARCHIVE_TOOL_LINUX} ${NEWLIB_OVERLAY_DISTRO_PATH}/esp-clang-newlib-overlay.${ARCHIVE_EXT_LINUX} -C ${BUILD_PATH} - # strip binutils afer newlib is built - - STRIP_BINUTILS=YES ./build-toolchain.sh --host=${CONF_HOST} ${BUILD_PATH} - # Run unit tests for native build only. + --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} --host=${CONF_HOST} ${BUILD_TOOLCHAIN_CMD_EXTRA_ARGS} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log + - BUILD_HOST=$(gcc -dumpmachine) + # Do not run unit tests for cross-builds. # Run as non-root user because permission tests fail when run by root. - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then export LLVM_BUILD_PATH=${LLVM_PROJECT_PATH}/llvm/build-Release-${CONF_HOST}; - echo "Run unit tests for native build ib ${LLVM_BUILD_PATH}"; + echo "Run unit tests for native build in ${LLVM_BUILD_PATH}"; useradd -m test_runner; chown -R test_runner ${LLVM_BUILD_PATH}; - touch ${BUILD_PATH}/clang_tests.log; - chmod o+w ${BUILD_PATH}/clang_tests.log; - runuser -l test_runner -c 'cmake --build '${LLVM_BUILD_PATH}' --target check-all 2>&1 > '${BUILD_PATH}'/clang_tests.log'; + touch ${BUILD_PATH}/tests.log; + chmod o+w ${BUILD_PATH}/tests.log; + runuser -l test_runner -c 'cmake --build '${LLVM_BUILD_PATH}' --target check-all 2>&1 > '${BUILD_PATH}'/tests.log'; fi - export DISTRO_DIR=$PWD/$DIST_DIR - pushd ${BUILD_PATH} - *package_toolchain - - *package_libs - popd build_x86_64-linux-gnu: @@ -202,67 +194,187 @@ build_x86_64-linux-gnu: ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" -.build_x86_64-w64-mingw32: +build_x86_64-w64-mingw32: extends: .build_template needs: + # needs native toolchain and newlib from this job - job: build_x86_64-linux-gnu before_script: - *use_ci_tools - *add_gitlab_key # get ARCHIVE_NAME for Linux release. Modify vars to make get_release_name working properly - - export PLATFORM_NAME_ORIG=${PLATFORM_NAME} - - export ARCHIVE_EXT_ORIG=${ARCHIVE_EXT} - - export PLATFORM_NAME=${PLATFORM_NAME_LINUX} - - export ARCHIVE_EXT=${ARCHIVE_EXT_LINUX} - - *get_release_name - # restore modified vars - - export PLATFORM_NAME=${PLATFORM_NAME_ORIG} - - export ARCHIVE_EXT=${ARCHIVE_EXT_ORIG} - # unpack Linux release to re-use it as native Clang for Windows build + - CLANG_LINUX_ARCHIVE=$(cat ${DIST_DIR}/file_${PLATFORM_NAME_LINUX}) + # unpack x86_64-linux-gnu toolchain to re-use it as native Clang for Windows build - mkdir -p esp-clang-${PLATFORM_NAME_LINUX} - - ${UNARCHIVE_TOOL_LINUX} ${DIST_DIR}/${ARCHIVE_NAME} -C esp-clang-${PLATFORM_NAME_LINUX} + - ${UNARCHIVE_TOOL_LINUX} ${DIST_DIR}/${CLANG_LINUX_ARCHIVE} -C esp-clang-${PLATFORM_NAME_LINUX} # we do not want to keep artifacts from 'x86_64-linux-gnu' job - rm -rf ${DIST_DIR} - rm -rf ${BUILD_DIR} # add build command args speciifc for Windows build - - export BUILD_TOOLCHAIN_CMD_ARGS="--native-esp-clang-path=$PWD/esp-clang-${PLATFORM_NAME_LINUX}" + - export BUILD_TOOLCHAIN_CMD_EXTRA_ARGS="--native-esp-clang-path=$PWD/esp-clang-${PLATFORM_NAME_LINUX}" variables: CONF_HOST: "x86_64-w64-mingw32" PLATFORM_NAME: "${PLATFORM_NAME_WIN}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_WIN}" + # Use Linux compressor to minimize artifact size. + # Toolchain is not fully stripped yet, so may exceed max artifact size. + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" .build_apple-darwin_template: extends: .build_template - needs: - - job: build_x86_64-linux-gnu variables: - PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" - GCC_ARCHIVE_EXT: "${GCC_ARCHIVE_EXT_MACOS}" build_x86_64-apple-darwin: extends: .build_apple-darwin_template variables: CONF_HOST: "x86_64-apple-darwin21.1" + PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" BUILD_TOOLCHAIN_CMD: "./build-toolchain-macos.sh --host-arch=x86_64" build_aarch64-apple-darwin: extends: .build_apple-darwin_template variables: CONF_HOST: "aarch64-apple-darwin21.1" + PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" BUILD_TOOLCHAIN_CMD: "./build-toolchain-macos.sh --host-arch=aarch64" -.test_x86_64-linux-gnu: - stage: test +build_newlib: + stage: build + tags: [ "amd64", "build" ] + needs: + # needs native toolchainfrom this job + - job: build_x86_64-linux-gnu + artifacts: + paths: + - ${DIST_DIR}/ + - ${BUILD_DIR}/build.log + when: always + expire_in: 1 day + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + # use separate dist dir for universal toolchain + # TODO: remove this var after switching to universal toolchain builds + DIST_DIR: ${DIST_NEW_DIR} + script: + # get ARCHIVE_NAME for Linux release. + - CLANG_ARCHIVE=$PWD/${DIST_DIR}/$(cat ${DIST_DIR}/file_${PLATFORM_NAME_LINUX}) + - mkdir -p ${DOWNLOADS_DIR} + - pushd ${DOWNLOADS_DIR} + - *get_xtensa_overlays + - *get_newlib + # unpack clang + - ${UNARCHIVE_TOOL} ${CLANG_ARCHIVE} + - export PATH=$PWD/esp-clang/bin:$PATH + - popd + - rm -rf $PWD/${DIST_DIR} + - *get_clang_toolchain_build_scripts + # build newlib overlay using ESP native (Linux) clang toolchain only + # it will be re-used for cross-buit toolchains (win and mac). + - NEWLIB_OVERLAY_DISTRO_PATH=$PWD/${DIST_DIR} + - mkdir -p ${NEWLIB_OVERLAY_DISTRO_PATH} + - BUILD_PATH=$PWD/${BUILD_DIR} + - mkdir -p ${BUILD_PATH} + - ./build-toolchain.sh --newlib-path=${NEWLIB_PATH} --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log + - pushd ${BUILD_PATH} + - ${ARCHIVE_TOOL_NEWLIB} ${NEWLIB_OVERLAY_DISTRO_PATH}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} esp-clang/ + - popd + +.pack_template: + stage: pack tags: [ "amd64", "build" ] allow_failure: true + artifacts: + paths: + - ${DIST_DIR}/ + when: always + expire_in: 3 day + variables: + # use separate dist dir for universal toolchain + # TODO: remove this var after switching to universal toolchain builds + DIST_DIR: ${DIST_NEW_DIR} + script: + - *get_release_name + - export BUILD_PATH=$PWD/${BUILD_DIR} + - mkdir -p ${BUILD_PATH} + # unpack clang + - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} -C ${BUILD_PATH} + # unpack newlib + - ${UNARCHIVE_TOOL_NEWLIB} ${DIST_DIR}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} -C ${BUILD_PATH} + - rm -rf ${DIST_DIR} + - *get_clang_toolchain_build_scripts + # strip binutils afer newlib is built + - STRIP_BINUTILS=YES ./build-toolchain.sh --host=${CONF_HOST} ${BUILD_PATH} + - DISTRO_DIR=$PWD/${DIST_DIR} + - pushd ${BUILD_PATH} + - *package_toolchain + - *package_libs + - popd + +pack_x86_64-linux-gnu: + extends: .pack_template needs: - job: build_x86_64-linux-gnu + - job: build_newlib + variables: + CONF_HOST: "x86_64-linux-gnu" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + +pack_x86_64-w64-mingw32: + extends: .pack_template + needs: + - job: build_x86_64-w64-mingw32 + - job: build_newlib + variables: + CONF_HOST: "x86_64-w64-mingw32" + PLATFORM_NAME: "${PLATFORM_NAME_WIN}" + # use Linux compressor to save space. + # upon release archive will be re-packed into zip format for uploading to GH + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" + +.pack_apple-darwin_template: + extends: .pack_template + variables: + ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" + ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + +pack_x86_64-apple-darwin: + extends: .pack_apple-darwin_template + needs: + - job: build_x86_64-apple-darwin + - job: build_newlib + variables: + CONF_HOST: "x86_64-apple-darwin21.1" + PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" + +pack_aarch64-apple-darwin: + extends: .pack_apple-darwin_template + needs: + - job: build_aarch64-apple-darwin + - job: build_newlib + variables: + CONF_HOST: "aarch64-apple-darwin21.1" + PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" + +test_x86_64-linux-gnu: + stage: test + tags: [ "amd64", "build" ] + allow_failure: true + needs: + - job: pack_x86_64-linux-gnu variables: PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" @@ -270,7 +382,7 @@ build_aarch64-apple-darwin: ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" # use separate dist dir for universal toolchain # TODO: remove this var after switching to universal toolchain builds - DIST_DIR: "dist_new" + DIST_DIR: ${DIST_NEW_DIR} script: - *get_release_name - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} From 0cbc93fb07728c4c37b98e181e62180203c9ae39 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:13 +0300 Subject: [PATCH 103/289] esp/ci: Adds Linux ARM/ARM64 universal toolchain builds --- .gitignore | 3 +++ .gitlab-ci.yml | 69 ++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 59 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index 20c4f52cd3786..0e13e97841618 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,9 @@ # Nested build directory /build* +/*/build-* +/_build +/_dist #==============================================================================# # Explicit files to ignore (only matches one). diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f3fadafed1fcd..6b57687583bf1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -23,10 +23,12 @@ variables: LLVM_GCC_TESTSUITE_REF: "release_universal_clang_toolchain" # TODO: update var below to tags names after related branches are merged in those repos # XTENSA_CLANG_TOOLCHAIN_REF: "release_universal_clang_toolchain" - XTENSA_CLANG_TOOLCHAIN_REF: "build_macos_arm64" + XTENSA_CLANG_TOOLCHAIN_REF: "universal_toolchain/build_linux_arm64" - PLATFORM_NAME_LINUX_ARM64: "linux-arm64" + CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 PLATFORM_NAME_LINUX: "linux-amd64" + PLATFORM_NAME_LINUX_ARMHF: "linux-armhf" + PLATFORM_NAME_LINUX_ARM64: "linux-arm64" PLATFORM_NAME_WIN: "win64" PLATFORM_NAME_MACOS: "macos" PLATFORM_NAME_MACOS_ARM64: "macos-arm64" @@ -171,7 +173,7 @@ before_script: # Do not run unit tests for cross-builds. # Run as non-root user because permission tests fail when run by root. - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then - export LLVM_BUILD_PATH=${LLVM_PROJECT_PATH}/llvm/build-Release-${CONF_HOST}; + export LLVM_BUILD_PATH=${LLVM_PROJECT_PATH}/llvm/build-${CONF_HOST}-Release; echo "Run unit tests for native build in ${LLVM_BUILD_PATH}"; useradd -m test_runner; chown -R test_runner ${LLVM_BUILD_PATH}; @@ -184,16 +186,34 @@ before_script: - *package_toolchain - popd -build_x86_64-linux-gnu: +.build_linux-gnu_template: extends: .build_template variables: - CONF_HOST: "x86_64-linux-gnu" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" +build_x86_64-linux-gnu: + extends: .build_linux-gnu_template + variables: + CONF_HOST: "x86_64-linux-gnu" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + +build_arm-linux-gnueabihf: + extends: .build_linux-gnu_template + image: ${CROSS_ARM_IMAGE} + variables: + CONF_HOST: "arm-linux-gnueabihf" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARMHF}" + +build_aarch64-linux-gnu: + extends: .build_linux-gnu_template + image: ${CROSS_ARM_IMAGE} + variables: + CONF_HOST: "aarch64-linux-gnu" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" + build_x86_64-w64-mingw32: extends: .build_template needs: @@ -228,20 +248,19 @@ build_x86_64-w64-mingw32: ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" build_x86_64-apple-darwin: extends: .build_apple-darwin_template variables: CONF_HOST: "x86_64-apple-darwin21.1" PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-macos.sh --host-arch=x86_64" build_aarch64-apple-darwin: extends: .build_apple-darwin_template variables: CONF_HOST: "aarch64-apple-darwin21.1" PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-macos.sh --host-arch=aarch64" build_newlib: stage: build @@ -318,17 +337,41 @@ build_newlib: - *package_libs - popd -pack_x86_64-linux-gnu: +.pack_linux-gnu_template: extends: .pack_template + variables: + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + +pack_x86_64-linux-gnu: + extends: .pack_linux-gnu_template needs: - job: build_x86_64-linux-gnu - job: build_newlib variables: CONF_HOST: "x86_64-linux-gnu" PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + +pack_arm-linux-gnueabihf: + extends: .pack_linux-gnu_template + image: ${CROSS_ARM_IMAGE} + needs: + - job: build_arm-linux-gnueabihf + - job: build_newlib + variables: + CONF_HOST: "arm-linux-gnueabihf" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARMHF}" + +pack_aarch64-linux-gnu: + extends: .pack_linux-gnu_template + image: ${CROSS_ARM_IMAGE} + needs: + - job: build_aarch64-linux-gnu + - job: build_newlib + variables: + CONF_HOST: "aarch64-linux-gnu" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" pack_x86_64-w64-mingw32: extends: .pack_template @@ -459,7 +502,7 @@ linux_amd64_build: linux_arm64_build: extends: .build_template_old - image: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 + image: ${CROSS_ARM_IMAGE} variables: PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" From 17ae9db09f1dee542cecfbaab9bee66479a37885 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:13 +0300 Subject: [PATCH 104/289] esp/ci: Upgrade Clang ver to 15 --- .gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6b57687583bf1..8a41a9dfc1694 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -10,8 +10,8 @@ image: ${CI_DOCKER_REGISTRY}/llvm-build:4 variables: # move all these to CI/CD settings - REL_SFX: "llvm14_0_0" - CLANG_VER: "14.0.0" + REL_SFX: "llvm15_0_0" + CLANG_VER: "15.0.0" GCC_REL_NAME: "esp-2022r1" GCC_REL_VER: "gcc11_2_0" NEWLIB_REPO: "newlib-cygwin" From 87b948d57f2d8b48b87bd9f4e8fd56f63368dcc5 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:14 +0300 Subject: [PATCH 105/289] esp/ci: Adds support to switch between legacy and universal toolchain release pipelines --- .gitlab-ci.yml | 578 +++---------------------------- .legacy-release.yml | 164 +++++++++ .universal-toolchain-release.yml | 409 ++++++++++++++++++++++ 3 files changed, 613 insertions(+), 538 deletions(-) create mode 100644 .legacy-release.yml create mode 100644 .universal-toolchain-release.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8a41a9dfc1694..d4a2c816f97ef 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -10,7 +10,7 @@ image: ${CI_DOCKER_REGISTRY}/llvm-build:4 variables: # move all these to CI/CD settings - REL_SFX: "llvm15_0_0" + REL_SFX: "llvm" CLANG_VER: "15.0.0" GCC_REL_NAME: "esp-2022r1" GCC_REL_VER: "gcc11_2_0" @@ -21,9 +21,7 @@ variables: XTENSA_OVERLAYS_REPO: "xtensa-overlays" XTENSA_OVERLAYS_REF: "master" LLVM_GCC_TESTSUITE_REF: "release_universal_clang_toolchain" - # TODO: update var below to tags names after related branches are merged in those repos - # XTENSA_CLANG_TOOLCHAIN_REF: "release_universal_clang_toolchain" - XTENSA_CLANG_TOOLCHAIN_REF: "universal_toolchain/build_linux_arm64" + XTENSA_CLANG_TOOLCHAIN_REF: "release_universal_clang_toolchain" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 PLATFORM_NAME_LINUX: "linux-amd64" @@ -40,6 +38,10 @@ variables: ARCHIVE_TOOL_WIN: "zip -9 -r" UNARCHIVE_TOOL_WIN: "unzip" ARCHIVE_EXT_WIN: "zip" + # Use Linux xz compressor to minimize Windows build artifact size. + # Upon release archive will be re-packed into zip format for uploading to GH. + ARCHIVE_TOOL_WIN_INT: ${ARCHIVE_TOOL_LINUX} + UNARCHIVE_TOOL_WIN_INT: ${UNARCHIVE_TOOL_LINUX} ARCHIVE_TOOL_MACOS: "tar -cJf" UNARCHIVE_TOOL_MACOS: "tar -xf" @@ -49,10 +51,13 @@ variables: UNARCHIVE_TOOL_NEWLIB: ${UNARCHIVE_TOOL_LINUX} ARCHIVE_EXT_NEWLIB: ${ARCHIVE_EXT_LINUX} - DIST_NEW_DIR: "_dist_new" + LIBS_ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + LIBS_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + LIBS_ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + DIST_DIR: "dist" - BUILD_DIR: "_build" - DOWNLOADS_DIR: "_downloads" + BUILD_DIR: "build" + DOWNLOADS_DIR: "downloads" ########################################################################### #################### START OF TEMPORARY LEGACY CODE ####################### @@ -62,548 +67,45 @@ variables: ##################### END OF TEMPORARY LEGACY CODE ######################## ########################################################################### -.use_ci_tools: &use_ci_tools | +.use_ci_tools_snippet: &use_ci_tools_snippet | curl -sSL ${CIT_LOADER_URL} -o cit_loader.sh && sh cit_loader.sh source citools/import_functions -.add_gitlab_key: &add_gitlab_key | - cit_add_ssh_key "${GITLAB_KEY}" - -before_script: - - *use_ci_tools - - *add_gitlab_key - -# Prepare release name/number -.get_release_name: &get_release_name | - # using annotated tags - REL_NUM=$(git describe --abbrev=7) - REL_NAME=${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} - ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} - LIBS_ARCHIVE_NAME=libs_${REL_NAME}.${ARCHIVE_EXT} - echo "PLATFORM_NAME: $PLATFORM_NAME" - echo "REL_NUM: $REL_NUM" - echo "REL_NAME: $REL_NAME" - echo "ARCHIVE_NAME: $ARCHIVE_NAME" - -# Get an existing crosstool-ng builds for all chips -.get_gcc_toolchain: &get_gcc_toolchain | - declare -a XTENSA_CPUS=("esp32" - "esp32s2" - "esp32s3") - for ((i = 0; i < ${#XTENSA_CPUS[@]}; i++)); do - XTENSA_CPU=${XTENSA_CPUS[$i]} - GCC_TOOLCHAIN_ARCH=xtensa-${XTENSA_CPU}-elf-${GCC_REL_VER}-${GCC_REL_NAME}-${PLATFORM_NAME}.${ARCHIVE_EXT} - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/${GCC_REL_NAME}/${GCC_TOOLCHAIN_ARCH} - ${UNARCHIVE_TOOL} ${GCC_TOOLCHAIN_ARCH} - done; - GCC_TOOLCHAIN_ARCH=riscv32-esp-elf-${GCC_REL_VER}-${GCC_REL_NAME}-${PLATFORM_NAME}.${ARCHIVE_EXT} - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/${GCC_REL_NAME}/${GCC_TOOLCHAIN_ARCH} - ${UNARCHIVE_TOOL} ${GCC_TOOLCHAIN_ARCH} - -.get_clang_toolchain_build_scripts: &get_clang_toolchain_build_scripts | - git clone -b ${XTENSA_CLANG_TOOLCHAIN_REF} ${GITLAB_SSH_SERVER}/${XTENSA_CLANG_TOOLCHAIN_REPO} - cp -r xtensa-clang-toolchain/* . - -# LLVM Build System used the remote address to show detailed version info, we'll change it to the public repository -.fix_origin_remote_for_public: &fix_origin_remote_for_public | - git remote set-url origin "${GH_REPO_HTTPS}" - -# Pack the toolchain -.package_toolchain: &package_toolchain | - ${ARCHIVE_TOOL} ${ARCHIVE_NAME} esp-clang/ - mkdir -p ${DISTRO_DIR} - mv ${ARCHIVE_NAME} ${DISTRO_DIR}/ - echo "${ARCHIVE_NAME}" > ${DISTRO_DIR}/file_${PLATFORM_NAME} - -# Pack libs to be used for Rust, Go etc. -.package_libs: &package_libs | - ${ARCHIVE_TOOL} ${LIBS_ARCHIVE_NAME} esp-clang/lib/libclang* esp-clang/lib/clang/${CLANG_VER}/include - mkdir -p ${DISTRO_DIR} - mv ${LIBS_ARCHIVE_NAME} ${DISTRO_DIR}/ - echo "${LIBS_ARCHIVE_NAME}" > ${DISTRO_DIR}/file_libs-${PLATFORM_NAME} - -.get_binutils: &get_binutils | - git clone -b ${BINUTILS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${BINUTILS_REPO}.git - BINUTILS_PATH=$PWD/${BINUTILS_REPO} - -.get_xtensa_overlays: &get_xtensa_overlays | - git clone -b ${XTENSA_OVERLAYS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${XTENSA_OVERLAYS_REPO}.git - XTENSA_OVERLAYS_PATH=$PWD/${XTENSA_OVERLAYS_REPO} - -.get_newlib: &get_newlib | - git clone -b ${NEWLIB_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${NEWLIB_REPO}.git - NEWLIB_PATH=$PWD/${NEWLIB_REPO} - -.build_template: - stage: build - tags: [ "amd64", "build" ] - allow_failure: true - artifacts: - paths: - - ${DIST_DIR}/ - - ${BUILD_DIR}/tests.log - - ${BUILD_DIR}/build.log - when: always - expire_in: 1 day - variables: - BUILD_TOOLCHAIN_CMD_EXTRA_ARGS: "" - # use separate dist dir for universal toolchain - # TODO: remove this var after switching to universal toolchain builds - DIST_DIR: ${DIST_NEW_DIR} - script: - - *get_release_name - - mkdir ${DOWNLOADS_DIR} - - pushd ${DOWNLOADS_DIR} - - ESP_GCC_TOOLCHAIN_DIST_BASE=$PWD - - *get_gcc_toolchain - - *get_binutils - - *get_xtensa_overlays - - popd - - *get_clang_toolchain_build_scripts - - *fix_origin_remote_for_public - - LLVM_PROJECT_PATH=$PWD - - BUILD_PATH=$PWD/${BUILD_DIR} - - mkdir -p ${BUILD_PATH} - - export USE_PARALLEL_LINK_JOBS=2 - # build Clang toolchain w/o newlib - - ${BUILD_TOOLCHAIN_CMD} --llvm-path=${LLVM_PROJECT_PATH} - --gcc-toolchains-path=${ESP_GCC_TOOLCHAIN_DIST_BASE} --binutils-path=${BINUTILS_PATH} - --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} --host=${CONF_HOST} ${BUILD_TOOLCHAIN_CMD_EXTRA_ARGS} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log - - BUILD_HOST=$(gcc -dumpmachine) - # Do not run unit tests for cross-builds. - # Run as non-root user because permission tests fail when run by root. - - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then - export LLVM_BUILD_PATH=${LLVM_PROJECT_PATH}/llvm/build-${CONF_HOST}-Release; - echo "Run unit tests for native build in ${LLVM_BUILD_PATH}"; - useradd -m test_runner; - chown -R test_runner ${LLVM_BUILD_PATH}; - touch ${BUILD_PATH}/tests.log; - chmod o+w ${BUILD_PATH}/tests.log; - runuser -l test_runner -c 'cmake --build '${LLVM_BUILD_PATH}' --target check-all 2>&1 > '${BUILD_PATH}'/tests.log'; - fi - - export DISTRO_DIR=$PWD/$DIST_DIR - - pushd ${BUILD_PATH} - - *package_toolchain - - popd - -.build_linux-gnu_template: - extends: .build_template - variables: - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" - -build_x86_64-linux-gnu: - extends: .build_linux-gnu_template - variables: - CONF_HOST: "x86_64-linux-gnu" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - -build_arm-linux-gnueabihf: - extends: .build_linux-gnu_template - image: ${CROSS_ARM_IMAGE} - variables: - CONF_HOST: "arm-linux-gnueabihf" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARMHF}" - -build_aarch64-linux-gnu: - extends: .build_linux-gnu_template - image: ${CROSS_ARM_IMAGE} - variables: - CONF_HOST: "aarch64-linux-gnu" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" - -build_x86_64-w64-mingw32: - extends: .build_template - needs: - # needs native toolchain and newlib from this job - - job: build_x86_64-linux-gnu - before_script: - - *use_ci_tools - - *add_gitlab_key - # get ARCHIVE_NAME for Linux release. Modify vars to make get_release_name working properly - - CLANG_LINUX_ARCHIVE=$(cat ${DIST_DIR}/file_${PLATFORM_NAME_LINUX}) - # unpack x86_64-linux-gnu toolchain to re-use it as native Clang for Windows build - - mkdir -p esp-clang-${PLATFORM_NAME_LINUX} - - ${UNARCHIVE_TOOL_LINUX} ${DIST_DIR}/${CLANG_LINUX_ARCHIVE} -C esp-clang-${PLATFORM_NAME_LINUX} - # we do not want to keep artifacts from 'x86_64-linux-gnu' job - - rm -rf ${DIST_DIR} - - rm -rf ${BUILD_DIR} - # add build command args speciifc for Windows build - - export BUILD_TOOLCHAIN_CMD_EXTRA_ARGS="--native-esp-clang-path=$PWD/esp-clang-${PLATFORM_NAME_LINUX}" - variables: - CONF_HOST: "x86_64-w64-mingw32" - PLATFORM_NAME: "${PLATFORM_NAME_WIN}" - # Use Linux compressor to minimize artifact size. - # Toolchain is not fully stripped yet, so may exceed max artifact size. - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" - ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" - -.build_apple-darwin_template: - extends: .build_template - variables: - ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" - ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" - BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" - -build_x86_64-apple-darwin: - extends: .build_apple-darwin_template - variables: - CONF_HOST: "x86_64-apple-darwin21.1" - PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" - -build_aarch64-apple-darwin: - extends: .build_apple-darwin_template - variables: - CONF_HOST: "aarch64-apple-darwin21.1" - PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" - -build_newlib: - stage: build - tags: [ "amd64", "build" ] - needs: - # needs native toolchainfrom this job - - job: build_x86_64-linux-gnu - artifacts: - paths: - - ${DIST_DIR}/ - - ${BUILD_DIR}/build.log - when: always - expire_in: 1 day - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - # use separate dist dir for universal toolchain - # TODO: remove this var after switching to universal toolchain builds - DIST_DIR: ${DIST_NEW_DIR} - script: - # get ARCHIVE_NAME for Linux release. - - CLANG_ARCHIVE=$PWD/${DIST_DIR}/$(cat ${DIST_DIR}/file_${PLATFORM_NAME_LINUX}) - - mkdir -p ${DOWNLOADS_DIR} - - pushd ${DOWNLOADS_DIR} - - *get_xtensa_overlays - - *get_newlib - # unpack clang - - ${UNARCHIVE_TOOL} ${CLANG_ARCHIVE} - - export PATH=$PWD/esp-clang/bin:$PATH - - popd - - rm -rf $PWD/${DIST_DIR} - - *get_clang_toolchain_build_scripts - # build newlib overlay using ESP native (Linux) clang toolchain only - # it will be re-used for cross-buit toolchains (win and mac). - - NEWLIB_OVERLAY_DISTRO_PATH=$PWD/${DIST_DIR} - - mkdir -p ${NEWLIB_OVERLAY_DISTRO_PATH} - - BUILD_PATH=$PWD/${BUILD_DIR} - - mkdir -p ${BUILD_PATH} - - ./build-toolchain.sh --newlib-path=${NEWLIB_PATH} --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log - - pushd ${BUILD_PATH} - - ${ARCHIVE_TOOL_NEWLIB} ${NEWLIB_OVERLAY_DISTRO_PATH}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} esp-clang/ - - popd - -.pack_template: - stage: pack - tags: [ "amd64", "build" ] - allow_failure: true - artifacts: - paths: - - ${DIST_DIR}/ - when: always - expire_in: 3 day - variables: - # use separate dist dir for universal toolchain - # TODO: remove this var after switching to universal toolchain builds - DIST_DIR: ${DIST_NEW_DIR} - script: - - *get_release_name - - export BUILD_PATH=$PWD/${BUILD_DIR} - - mkdir -p ${BUILD_PATH} - # unpack clang - - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} -C ${BUILD_PATH} - # unpack newlib - - ${UNARCHIVE_TOOL_NEWLIB} ${DIST_DIR}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} -C ${BUILD_PATH} - - rm -rf ${DIST_DIR} - - *get_clang_toolchain_build_scripts - # strip binutils afer newlib is built - - STRIP_BINUTILS=YES ./build-toolchain.sh --host=${CONF_HOST} ${BUILD_PATH} - - DISTRO_DIR=$PWD/${DIST_DIR} - - pushd ${BUILD_PATH} - - *package_toolchain - - *package_libs - - popd - -.pack_linux-gnu_template: - extends: .pack_template - variables: - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - -pack_x86_64-linux-gnu: - extends: .pack_linux-gnu_template - needs: - - job: build_x86_64-linux-gnu - - job: build_newlib - variables: - CONF_HOST: "x86_64-linux-gnu" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - -pack_arm-linux-gnueabihf: - extends: .pack_linux-gnu_template - image: ${CROSS_ARM_IMAGE} - needs: - - job: build_arm-linux-gnueabihf - - job: build_newlib - variables: - CONF_HOST: "arm-linux-gnueabihf" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARMHF}" - -pack_aarch64-linux-gnu: - extends: .pack_linux-gnu_template - image: ${CROSS_ARM_IMAGE} - needs: - - job: build_aarch64-linux-gnu - - job: build_newlib - variables: - CONF_HOST: "aarch64-linux-gnu" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" - -pack_x86_64-w64-mingw32: - extends: .pack_template - needs: - - job: build_x86_64-w64-mingw32 - - job: build_newlib - variables: - CONF_HOST: "x86_64-w64-mingw32" - PLATFORM_NAME: "${PLATFORM_NAME_WIN}" - # use Linux compressor to save space. - # upon release archive will be re-packed into zip format for uploading to GH - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" - -.pack_apple-darwin_template: - extends: .pack_template - variables: - ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" - ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" - -pack_x86_64-apple-darwin: - extends: .pack_apple-darwin_template - needs: - - job: build_x86_64-apple-darwin - - job: build_newlib - variables: - CONF_HOST: "x86_64-apple-darwin21.1" - PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" - -pack_aarch64-apple-darwin: - extends: .pack_apple-darwin_template - needs: - - job: build_aarch64-apple-darwin - - job: build_newlib - variables: - CONF_HOST: "aarch64-apple-darwin21.1" - PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" - -test_x86_64-linux-gnu: - stage: test - tags: [ "amd64", "build" ] - allow_failure: true - needs: - - job: pack_x86_64-linux-gnu - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - # use separate dist dir for universal toolchain - # TODO: remove this var after switching to universal toolchain builds - DIST_DIR: ${DIST_NEW_DIR} +.use_ci_tools: script: - - *get_release_name - - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} - # getting testsuite - - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/llvm-xtensa-testsuite.git - # preparing testsuite - - export PATH=${PWD}/esp-clang/bin:$PATH - - cd llvm-xtensa-testsuite - # qemu - - ./qemu_esp32_install.sh - # run testsuite for esp32 - - ./run_esp32_tests.sh - -########################################################################### -#################### START OF TEMPORARY LEGACY CODE ####################### -# TODO: the code below is to be removed after migration to new build script -.get_release_name_old: &get_release_name_old | - # using annotated tags - REL_NUM=$(git describe --abbrev=7) - REL_SFX="llvm15_0_0" - REL_NAME=${CONF_TARGET}-${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} - ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} - echo "PLATFORM_NAME: $PLATFORM_NAME" - echo "REL_NUM: $REL_NUM" - echo "REL_NAME: $REL_NAME" - echo "ARCHIVE_NAME: $ARCHIVE_NAME" - -.get_gcc_toolchain_old: &get_gcc_toolchain_old | - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-2021r2-patch3/${XTENSA_GCC_TOOLCHAIN} - ${UNARCHIVE_TOOL} ${XTENSA_GCC_TOOLCHAIN} - if [[ "$XTENSA_GCC_TOOLCHAIN" == *"linux-amd64"* ]]; then - cp -r xtensa-esp32-elf ${XTENSA_CLANG_TOOLCHAIN} - else - mv xtensa-esp32-elf ${XTENSA_CLANG_TOOLCHAIN} - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-2021r2-patch3/xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz - tar -xf xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz - fi - export GCC_ESP32_LINUX_TOOLCHAIN="xtensa-esp32-elf" + - *use_ci_tools_snippet -.package_toolchain_old: &package_toolchain_old | - ${ARCHIVE_TOOL} ${ARCHIVE_NAME} ${XTENSA_CLANG_TOOLCHAIN}/ - mkdir -p ${DIST_DIR} - mv ${ARCHIVE_NAME} ${DIST_DIR}/ - echo "${ARCHIVE_NAME}" > ${DIST_DIR}/file_${PLATFORM_NAME}_${CONF_TARGET} +.add_gitlab_key_snippet: &add_gitlab_key_snippet | + cit_add_ssh_key "${GITLAB_KEY}" -.build_template_old: - stage: build - tags: [ "amd64", "build" ] - artifacts: - paths: - - ${DIST_DIR}/ - when: always - expire_in: 10 day - variables: - XTENSA_CLANG_TOOLCHAIN_REF: "release_esp32_clang_15.0.0_gcc_8.4.0" - GCC_REL_NAME: "gcc8_4_0-esp-2021r2-patch3" +.add_gitlab_key: script: - - *get_release_name_old - - *get_gcc_toolchain_old - - *fix_origin_remote_for_public - - *get_clang_toolchain_build_scripts - - ${BUILD_TOOLCHAIN_CMD} "${XTENSA_CLANG_TOOLCHAIN}" - - *package_toolchain_old - -linux_amd64_build: - extends: .build_template_old - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-linux.sh" + - *add_gitlab_key_snippet -linux_arm64_build: - extends: .build_template_old - image: ${CROSS_ARM_IMAGE} - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-linux-arm64.tar.gz" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-linux-arm64.sh" - -win64_build: - extends: .build_template_old - variables: - PLATFORM_NAME: "${PLATFORM_NAME_WIN}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_WIN}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" - ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" - XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-win64.zip" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" - -macos_amd64_build: - extends: .build_template_old - variables: - PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" - ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" - XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-macos.tar.gz" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-macos.sh" +# LLVM Build System used the remote address to show detailed version info, we'll change it to the public repository +.fix_origin_remote_for_public_snippet: &fix_origin_remote_for_public_snippet | + git remote set-url origin "${GH_REPO_HTTPS}" -linux_amd64_testsuite: - stage: test - tags: [ "amd64", "build" ] - needs: - - job: linux_amd64_build - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" +.fix_origin_remote_for_public: script: - - *get_release_name_old - - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} - - # getting testsuite - - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/llvm-xtensa-testsuite.git - - # preparing testsuite - - export PATH=${PWD}/${XTENSA_CLANG_TOOLCHAIN}/bin/:$PATH - - cd llvm-xtensa-testsuite - - # qemu - - ./qemu_esp32_install.sh - - # run testsuite for esp32 - - ./run_esp32_tests.sh + - *fix_origin_remote_for_public_snippet - # run testsuite for compiler_rt library - - ./run_esp32_crt_tests.sh ../$XTENSA_CLANG_TOOLCHAIN - -##################### END OF TEMPORARY LEGACY CODE ######################## -########################################################################### +.get_clang_toolchain_build_scripts_snippet: &get_clang_toolchain_build_scripts_snippet | + git clone -b ${XTENSA_CLANG_TOOLCHAIN_REF} ${GITLAB_SSH_SERVER}/${XTENSA_CLANG_TOOLCHAIN_REPO} + cp -r xtensa-clang-toolchain/* . -upload_to_http: - stage: private_deploy - when: manual - allow_failure: true - tags: [ "deploy", "shiny" ] - variables: - # force the fetch strategy to clean old archives up in dist/ dir - GIT_STRATEGY: fetch - before_script: - - *use_ci_tools +.get_clang_toolchain_build_scripts: script: - - cit_add_ssh_key "${HTTP_UPLOAD_KEY}" - # List of archives - - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) - - cd ${DIST_DIR} - - scp ${FILES} ${HTTP_UPLOAD_DIR}/ct-ng/llvm-builds - # Show info - - echo -e "\nArchives were published there:\n\n$(for n in ${FILES}; do echo "${HTTP_PUBLIC_DIR}/ct-ng/llvm-builds/${n}"; done)\n" + - *get_clang_toolchain_build_scripts_snippet -upload_to_github: - stage: public_deploy - when: manual - allow_failure: true - only: - - tags - tags: [ "amd64", "internet" ] - image: espressif/github-hub:2 - variables: - GIT_STRATEGY: fetch - GITHUB_TOKEN: "${GH_TOKEN}" - GITHUB_REPO: "${GH_REPO_HTTPS}" - TAG: "${CI_COMMIT_TAG}" - before_script: [] - script: - - ls -l dist*/ - - git remote add github ${GH_REPO_HTTPS} - - hub release show ${TAG} || { echo "Please create a release on GitHub with ${TAG} tag at first"; exit 1; } - # List of archives - - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) - - cd ${DIST_DIR} - - ls -l $FILES - # Upload archives - - for n in ${FILES}; do hub release edit -m "" -a "${n}" "${TAG}"; done +before_script: + - !reference [.use_ci_tools, script] + - !reference [.add_gitlab_key, script] + +include: + - local: .universal-toolchain-release.yml + rules: + - if: $ESP_CLANG_LEGACY_RELEASE != "true" + - local: .legacy-release.yml + rules: + - if: $ESP_CLANG_LEGACY_RELEASE == "true" diff --git a/.legacy-release.yml b/.legacy-release.yml new file mode 100644 index 0000000000000..c46195f3fe475 --- /dev/null +++ b/.legacy-release.yml @@ -0,0 +1,164 @@ + +.get_release_name_legacy: &get_release_name_legacy | + # using annotated tags + REL_NUM=$(git describe --abbrev=7) + REL_SFX="llvm15_0_0" + REL_NAME=${CONF_TARGET}-${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} + ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} + echo "PLATFORM_NAME: $PLATFORM_NAME" + echo "REL_NUM: $REL_NUM" + echo "REL_NAME: $REL_NAME" + echo "ARCHIVE_NAME: $ARCHIVE_NAME" + +.get_gcc_toolchain_legacy: &get_gcc_toolchain_legacy | + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-2021r2-patch3/${XTENSA_GCC_TOOLCHAIN} + ${UNARCHIVE_TOOL} ${XTENSA_GCC_TOOLCHAIN} + if [[ "$XTENSA_GCC_TOOLCHAIN" == *"linux-amd64"* ]]; then + cp -r xtensa-esp32-elf ${XTENSA_CLANG_TOOLCHAIN} + else + mv xtensa-esp32-elf ${XTENSA_CLANG_TOOLCHAIN} + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-2021r2-patch3/xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz + tar -xf xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz + fi + export GCC_ESP32_LINUX_TOOLCHAIN="xtensa-esp32-elf" + +.package_toolchain_legacy: &package_toolchain_legacy | + ${ARCHIVE_TOOL} ${ARCHIVE_NAME} ${XTENSA_CLANG_TOOLCHAIN}/ + mkdir -p ${DIST_DIR} + mv ${ARCHIVE_NAME} ${DIST_DIR}/ + echo "${ARCHIVE_NAME}" > ${DIST_DIR}/file_${PLATFORM_NAME}_${CONF_TARGET} + +.build_template_legacy: + stage: build + tags: [ "amd64", "build" ] + artifacts: + paths: + - ${DIST_DIR}/ + when: always + expire_in: 10 day + variables: + XTENSA_CLANG_TOOLCHAIN_REF: "release_esp32_clang_15.0.0_gcc_8.4.0" + GCC_REL_NAME: "gcc8_4_0-esp-2021r2-patch3" + script: + - *get_release_name_legacy + - *get_gcc_toolchain_legacy + - !reference [.fix_origin_remote_for_public, script] + - !reference [.get_clang_toolchain_build_scripts, script] + - ${BUILD_TOOLCHAIN_CMD} "${XTENSA_CLANG_TOOLCHAIN}" + - *package_toolchain_legacy + +linux_amd64_build: + extends: .build_template_legacy + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-linux.sh" + +linux_arm64_build: + extends: .build_template_legacy + image: ${CROSS_ARM_IMAGE} + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-linux-arm64.tar.gz" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-linux-arm64.sh" + +win64_build: + extends: .build_template_legacy + variables: + PLATFORM_NAME: "${PLATFORM_NAME_WIN}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_WIN}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" + ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" + XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-win64.zip" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" + +macos_amd64_build: + extends: .build_template_legacy + variables: + PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" + ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-macos.tar.gz" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-macos.sh" + +linux_amd64_testsuite: + stage: test + tags: [ "amd64", "build" ] + needs: + - job: linux_amd64_build + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + LLVM_GCC_TESTSUITE_REF: "feature/ci_llvm_multitarget_crt_tests" + script: + - *get_release_name_legacy + - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} + + # getting testsuite + - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/llvm-xtensa-testsuite.git + + # preparing testsuite + - export PATH=${PWD}/${XTENSA_CLANG_TOOLCHAIN}/bin/:$PATH + - cd llvm-xtensa-testsuite + + # qemu + - ./qemu_esp32_install.sh + + # run testsuite for esp32 + - ./run_esp32_tests.sh + + # run testsuite for compiler_rt library + - ./run_esp32_crt_tests.sh ../$XTENSA_CLANG_TOOLCHAIN + +upload_to_http_legacy: + stage: private_deploy + when: manual + allow_failure: true + tags: [ "deploy", "shiny" ] + variables: + # force the fetch strategy to clean old archives up in dist/ dir + GIT_STRATEGY: fetch + before_script: + - !reference [.use_ci_tools, script] + script: + - cit_add_ssh_key "${HTTP_UPLOAD_KEY}" + # List of archives + - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) + - cd ${DIST_DIR} + - scp ${FILES} ${HTTP_UPLOAD_DIR}/ct-ng/llvm-builds + # Show info + - echo -e "\nArchives were published there:\n\n$(for n in ${FILES}; do echo "${HTTP_PUBLIC_DIR}/ct-ng/llvm-builds/${n}"; done)\n" + +upload_to_github_legacy: + stage: public_deploy + when: manual + allow_failure: true + only: + - tags + tags: [ "amd64", "internet" ] + image: espressif/github-hub:2 + variables: + GIT_STRATEGY: fetch + GITHUB_TOKEN: "${GH_TOKEN}" + GITHUB_REPO: "${GH_REPO_HTTPS}" + TAG: "${CI_COMMIT_TAG}" + before_script: [] + script: + - ls -l dist*/ + - git remote add github ${GH_REPO_HTTPS} + - hub release show ${TAG} || { echo "Please create a release on GitHub with ${TAG} tag at first"; exit 1; } + # List of archives + - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) + - cd ${DIST_DIR} + - ls -l $FILES + # Upload archives + - for n in ${FILES}; do hub release edit -m "" -a "${n}" "${TAG}"; done diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml new file mode 100644 index 0000000000000..baf00964c2439 --- /dev/null +++ b/.universal-toolchain-release.yml @@ -0,0 +1,409 @@ + +# Prepare release name/number +.get_release_name: &get_release_name | + # using annotated tags + REL_NUM=$(git describe --abbrev=7) + REL_NAME=${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} + ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} + LIBS_ARCHIVE_NAME=libs_${REL_NAME}.${LIBS_ARCHIVE_EXT} + echo "PLATFORM_NAME: $PLATFORM_NAME" + echo "REL_NUM: $REL_NUM" + echo "REL_NAME: $REL_NAME" + echo "ARCHIVE_NAME: $ARCHIVE_NAME" + +# Get an existing crosstool-ng builds for all chips +.get_gcc_toolchain: &get_gcc_toolchain | + declare -a XTENSA_CPUS=("esp32" + "esp32s2" + "esp32s3") + for ((i = 0; i < ${#XTENSA_CPUS[@]}; i++)); do + XTENSA_CPU=${XTENSA_CPUS[$i]} + GCC_TOOLCHAIN_ARCH=xtensa-${XTENSA_CPU}-elf-${GCC_REL_VER}-${GCC_REL_NAME}-${PLATFORM_NAME}.${GCC_ARCHIVE_EXT} + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/${GCC_REL_NAME}/${GCC_TOOLCHAIN_ARCH} + ${GCC_UNARCHIVE_TOOL} ${GCC_TOOLCHAIN_ARCH} + done; + GCC_TOOLCHAIN_ARCH=riscv32-esp-elf-${GCC_REL_VER}-${GCC_REL_NAME}-${PLATFORM_NAME}.${GCC_ARCHIVE_EXT} + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/${GCC_REL_NAME}/${GCC_TOOLCHAIN_ARCH} + ${GCC_UNARCHIVE_TOOL} ${GCC_TOOLCHAIN_ARCH} + +# Pack the toolchain +.package_toolchain: &package_toolchain | + ${ARCHIVE_TOOL} ${ARCHIVE_NAME} esp-clang/ + mkdir -p ${DISTRO_DIR} + mv ${ARCHIVE_NAME} ${DISTRO_DIR}/ + echo "${ARCHIVE_NAME}" > ${DISTRO_DIR}/file_${PLATFORM_NAME} + +# Pack libs to be used for Rust, Go etc. +.package_libs: &package_libs | + ${LIBS_ARCHIVE_TOOL} ${LIBS_ARCHIVE_NAME} esp-clang/lib/libclang* esp-clang/lib/clang/${CLANG_VER}/include + mkdir -p ${DISTRO_DIR} + mv ${LIBS_ARCHIVE_NAME} ${DISTRO_DIR}/ + echo "${LIBS_ARCHIVE_NAME}" > ${DISTRO_DIR}/file_libs-${PLATFORM_NAME} + +.get_binutils: &get_binutils | + git clone -b ${BINUTILS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${BINUTILS_REPO}.git + BINUTILS_PATH=$PWD/${BINUTILS_REPO} + +.get_xtensa_overlays: &get_xtensa_overlays | + git clone -b ${XTENSA_OVERLAYS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${XTENSA_OVERLAYS_REPO}.git + XTENSA_OVERLAYS_PATH=$PWD/${XTENSA_OVERLAYS_REPO} + +.get_newlib: &get_newlib | + git clone -b ${NEWLIB_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${NEWLIB_REPO}.git + NEWLIB_PATH=$PWD/${NEWLIB_REPO} + +.build_template: + stage: build + tags: [ "amd64", "build" ] + artifacts: + paths: + - ${DIST_DIR}/ + - ${BUILD_DIR}/tests.log + - ${BUILD_DIR}/build.log + when: always + expire_in: 1 day + variables: + BUILD_TOOLCHAIN_CMD_EXTRA_ARGS: "" + script: + - *get_release_name + - mkdir ${DOWNLOADS_DIR} + - pushd ${DOWNLOADS_DIR} + - ESP_GCC_TOOLCHAIN_DIST_BASE=$PWD + - *get_gcc_toolchain + - *get_binutils + - *get_xtensa_overlays + - popd + - !reference [.get_clang_toolchain_build_scripts, script] + - !reference [.fix_origin_remote_for_public, script] + - LLVM_PROJECT_PATH=$PWD + - BUILD_PATH=$PWD/${BUILD_DIR} + - mkdir -p ${BUILD_PATH} + - export USE_PARALLEL_LINK_JOBS=2 + # build Clang toolchain w/o newlib + - ${BUILD_TOOLCHAIN_CMD} --llvm-path=${LLVM_PROJECT_PATH} + --gcc-toolchains-path=${ESP_GCC_TOOLCHAIN_DIST_BASE} --binutils-path=${BINUTILS_PATH} + --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} --host=${CONF_HOST} ${BUILD_TOOLCHAIN_CMD_EXTRA_ARGS} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log + - BUILD_HOST=$(gcc -dumpmachine) + # Do not run unit tests for cross-builds. + # Run as non-root user because permission tests fail when run by root. + - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then + export LLVM_BUILD_PATH=${LLVM_PROJECT_PATH}/llvm/build-${CONF_HOST}-Release; + echo "Run unit tests for native build in ${LLVM_BUILD_PATH}"; + useradd -m test_runner; + chown -R test_runner ${LLVM_BUILD_PATH}; + touch ${BUILD_PATH}/tests.log; + chmod o+w ${BUILD_PATH}/tests.log; + runuser -l test_runner -c 'cmake --build '${LLVM_BUILD_PATH}' --target check-all 2>&1 > '${BUILD_PATH}'/tests.log'; + fi + - export DISTRO_DIR=$PWD/$DIST_DIR + - pushd ${BUILD_PATH} + - *package_toolchain + - popd + +.build_linux-gnu_template: + extends: .build_template + variables: + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + GCC_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + GCC_ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" + +build_x86_64-linux-gnu: + extends: .build_linux-gnu_template + variables: + CONF_HOST: "x86_64-linux-gnu" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + +build_arm-linux-gnueabihf: + extends: .build_linux-gnu_template + image: ${CROSS_ARM_IMAGE} + variables: + CONF_HOST: "arm-linux-gnueabihf" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARMHF}" + +build_aarch64-linux-gnu: + extends: .build_linux-gnu_template + image: ${CROSS_ARM_IMAGE} + variables: + CONF_HOST: "aarch64-linux-gnu" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" + +build_x86_64-w64-mingw32: + extends: .build_template + needs: + # needs native toolchain and newlib from this job + - job: build_x86_64-linux-gnu + before_script: + - !reference [.use_ci_tools, script] + - !reference [.add_gitlab_key, script] + # get ARCHIVE_NAME for Linux release. Modify vars to make get_release_name working properly + - CLANG_LINUX_ARCHIVE=$(cat ${DIST_DIR}/file_${PLATFORM_NAME_LINUX}) + # unpack x86_64-linux-gnu toolchain to re-use it as native Clang for Windows build + - mkdir -p esp-clang-${PLATFORM_NAME_LINUX} + - ${UNARCHIVE_TOOL_LINUX} ${DIST_DIR}/${CLANG_LINUX_ARCHIVE} -C esp-clang-${PLATFORM_NAME_LINUX} + # we do not want to keep artifacts from 'x86_64-linux-gnu' job + - rm -rf ${DIST_DIR} + - rm -rf ${BUILD_DIR} + # add build command args speciifc for Windows build + - export BUILD_TOOLCHAIN_CMD_EXTRA_ARGS="--native-esp-clang-path=$PWD/esp-clang-${PLATFORM_NAME_LINUX}" + variables: + CONF_HOST: "x86_64-w64-mingw32" + PLATFORM_NAME: "${PLATFORM_NAME_WIN}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + GCC_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" + GCC_ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" + BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" + +.build_apple-darwin_template: + extends: .build_template + variables: + ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" + ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + GCC_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" + GCC_ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" + +build_x86_64-apple-darwin: + extends: .build_apple-darwin_template + variables: + CONF_HOST: "x86_64-apple-darwin21.1" + PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" + +build_aarch64-apple-darwin: + extends: .build_apple-darwin_template + variables: + CONF_HOST: "aarch64-apple-darwin21.1" + PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" + +build_newlib: + stage: build + tags: [ "amd64", "build" ] + needs: + # needs native toolchainfrom this job + - job: build_x86_64-linux-gnu + artifacts: + paths: + - ${DIST_DIR}/ + - ${BUILD_DIR}/build.log + when: always + expire_in: 1 day + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + script: + # get ARCHIVE_NAME for Linux release. + - CLANG_ARCHIVE=$PWD/${DIST_DIR}/$(cat ${DIST_DIR}/file_${PLATFORM_NAME_LINUX}) + - mkdir -p ${DOWNLOADS_DIR} + - pushd ${DOWNLOADS_DIR} + - *get_xtensa_overlays + - *get_newlib + # unpack clang + - ${UNARCHIVE_TOOL} ${CLANG_ARCHIVE} + - export PATH=$PWD/esp-clang/bin:$PATH + - popd + - rm -rf $PWD/${DIST_DIR} + - !reference [.get_clang_toolchain_build_scripts, script] + # build newlib overlay using ESP native (Linux) clang toolchain only + # it will be re-used for cross-buit toolchains (win and mac). + - NEWLIB_OVERLAY_DISTRO_PATH=$PWD/${DIST_DIR} + - mkdir -p ${NEWLIB_OVERLAY_DISTRO_PATH} + - BUILD_PATH=$PWD/${BUILD_DIR} + - mkdir -p ${BUILD_PATH} + - ./build-toolchain.sh --newlib-path=${NEWLIB_PATH} --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log + - pushd ${BUILD_PATH} + - ${ARCHIVE_TOOL_NEWLIB} ${NEWLIB_OVERLAY_DISTRO_PATH}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} esp-clang/ + - popd + +.pack_template: + stage: pack + tags: [ "amd64", "build" ] + artifacts: + paths: + - ${DIST_DIR}/ + when: always + expire_in: 3 day + script: + - *get_release_name + - export BUILD_PATH=$PWD/${BUILD_DIR} + - mkdir -p ${BUILD_PATH} + # unpack clang + - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} -C ${BUILD_PATH} + # unpack newlib + - ${UNARCHIVE_TOOL_NEWLIB} ${DIST_DIR}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} -C ${BUILD_PATH} + - rm -rf ${DIST_DIR} + - !reference [.get_clang_toolchain_build_scripts, script] + # strip binutils afer newlib is built + - STRIP_BINUTILS=YES ./build-toolchain.sh --host=${CONF_HOST} ${BUILD_PATH} + - DISTRO_DIR=$PWD/${DIST_DIR} + - pushd ${BUILD_PATH} + - *package_toolchain + - *package_libs + - popd + +.pack_linux-gnu_template: + extends: .pack_template + variables: + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + LIBS_ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + LIBS_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + LIBS_ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + +pack_x86_64-linux-gnu: + extends: .pack_linux-gnu_template + needs: + - job: build_x86_64-linux-gnu + - job: build_newlib + variables: + CONF_HOST: "x86_64-linux-gnu" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + +pack_arm-linux-gnueabihf: + extends: .pack_linux-gnu_template + image: ${CROSS_ARM_IMAGE} + needs: + - job: build_arm-linux-gnueabihf + - job: build_newlib + variables: + CONF_HOST: "arm-linux-gnueabihf" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARMHF}" + +pack_aarch64-linux-gnu: + extends: .pack_linux-gnu_template + image: ${CROSS_ARM_IMAGE} + needs: + - job: build_aarch64-linux-gnu + - job: build_newlib + variables: + CONF_HOST: "aarch64-linux-gnu" + PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" + +pack_x86_64-w64-mingw32: + extends: .pack_template + needs: + - job: build_x86_64-w64-mingw32 + - job: build_newlib + variables: + CONF_HOST: "x86_64-w64-mingw32" + PLATFORM_NAME: "${PLATFORM_NAME_WIN}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + LIBS_ARCHIVE_TOOL: "${ARCHIVE_TOOL_WIN}" + LIBS_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" + LIBS_ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" + +.pack_apple-darwin_template: + extends: .pack_template + variables: + ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" + ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + LIBS_ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" + LIBS_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" + LIBS_ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + +pack_x86_64-apple-darwin: + extends: .pack_apple-darwin_template + needs: + - job: build_x86_64-apple-darwin + - job: build_newlib + variables: + CONF_HOST: "x86_64-apple-darwin21.1" + PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" + +pack_aarch64-apple-darwin: + extends: .pack_apple-darwin_template + needs: + - job: build_aarch64-apple-darwin + - job: build_newlib + variables: + CONF_HOST: "aarch64-apple-darwin21.1" + PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" + +test_x86_64-linux-gnu: + stage: test + tags: [ "amd64", "build" ] + needs: + - job: pack_x86_64-linux-gnu + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + script: + - *get_release_name + - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} + # getting testsuite + - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/llvm-xtensa-testsuite.git + # preparing testsuite + - export PATH=${PWD}/esp-clang/bin:$PATH + - cd llvm-xtensa-testsuite + # qemu + - ./qemu_esp32_install.sh + # run testsuite for esp32 + - ./run_esp32_tests.sh + +upload_to_http: + stage: private_deploy + when: manual + allow_failure: true + tags: [ "deploy", "shiny" ] + variables: + # force the fetch strategy to clean old archives up in dist/ dir + GIT_STRATEGY: fetch + needs: + - job: pack_x86_64-linux-gnu + - job: pack_arm-linux-gnueabihf + - job: pack_aarch64-linux-gnu + - job: pack_x86_64-w64-mingw32 + - job: pack_x86_64-apple-darwin + - job: pack_aarch64-apple-darwin + before_script: + - !reference [.use_ci_tools, script] + script: + - cit_add_ssh_key "${HTTP_UPLOAD_KEY}" + # List of archives + - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) + - cd ${DIST_DIR} + - ls -l $FILES + - scp ${FILES} ${HTTP_UPLOAD_DIR}/ct-ng/llvm-builds + # Show info + - echo -e "\nArchives were published there:\n\n$(for n in ${FILES}; do echo "${HTTP_PUBLIC_DIR}/ct-ng/llvm-builds/${n}"; done)\n" + +upload_to_github: + stage: public_deploy + when: manual + allow_failure: true + only: + - tags + tags: [ "amd64", "internet" ] + image: espressif/github-hub:2 + variables: + GIT_STRATEGY: fetch + GITHUB_TOKEN: "${GH_TOKEN}" + GITHUB_REPO: "${GH_REPO_HTTPS}" + TAG: "${CI_COMMIT_TAG}" + needs: + - job: pack_x86_64-linux-gnu + - job: pack_arm-linux-gnueabihf + - job: pack_aarch64-linux-gnu + - job: pack_x86_64-w64-mingw32 + - job: pack_x86_64-apple-darwin + - job: pack_aarch64-apple-darwin + before_script: [] + script: + - ls -l dist*/ + - git remote add github ${GH_REPO_HTTPS} + - hub release show ${TAG} || { echo "Please create a release on GitHub with ${TAG} tag at first"; exit 1; } + # List of archives + - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) + - cd ${DIST_DIR} + - ls -l $FILES + # Upload archives + - for n in ${FILES}; do hub release edit -m "" -a "${n}" "${TAG}"; done From 3186bda563c823c345005821f13535f69f70f9b9 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:14 +0300 Subject: [PATCH 106/289] esp/ci: Adds MacOS binaries signing stage --- .gitlab-ci.yml | 1 + .universal-toolchain-release.yml | 59 +++++++++++++++++++++++++++++--- 2 files changed, 56 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d4a2c816f97ef..e152a1c90a6e9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,6 +1,7 @@ stages: - build - pack + - sign - private_deploy - test - public_deploy diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index baf00964c2439..3f15eb9637a31 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -349,6 +349,57 @@ test_x86_64-linux-gnu: # run testsuite for esp32 - ./run_esp32_tests.sh +.macos_codesign: &macos_codesign + stage: sign + tags: [ "darwin", "amd64" ] + resource_group: macos_codesign + artifacts: + paths: + - ${DIST_DIR}/ + when: always + expire_in: 3 day + variables: + KEYCHAIN_NAME: "llvm.keychain" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" + ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + script: + - *get_release_name + - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} + - rm -rf ${DIST_DIR} + - TOOLCHAIN_PATH=$PWD/esp-clang + - echo $MACOS_CERTIFICATE | base64 --decode > $PWD/certificate.p12 + - security create-keychain -p $KEYCHAIN_PWD $KEYCHAIN_NAME || true + - security import $PWD/certificate.p12 -k $KEYCHAIN_NAME -P $MACOS_CERTIFICATE_PWD -T /usr/bin/codesign + - security set-key-partition-list -S apple-tool:,apple:,codesign -s -k $KEYCHAIN_PWD $KEYCHAIN_NAME + - security list-keychains -d user -s ~/Library/Keychains/$KEYCHAIN_NAME + - security find-identity -v -p codesigning + - security unlock-keychain -p $KEYCHAIN_PWD $KEYCHAIN_NAME + - /usr/bin/codesign -v --force --options runtime -s $IDENTITY_ID $TOOLCHAIN_PATH/bin/* $TOOLCHAIN_PATH/lib/*.dylib + - security delete-keychain $KEYCHAIN_NAME + - codesign -dvv $TOOLCHAIN_PATH/bin/* + - DISTRO_DIR=$PWD/${DIST_DIR} + - *package_toolchain + - *package_libs + after_script: + - security find-identity -v + - security delete-keychain $KEYCHAIN_NAME + - security find-identity -v + +sign_x86_64-apple-darwin: + extends: .macos_codesign + needs: + - pack_x86_64-apple-darwin + variables: + PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" + +sign_aarch64-apple-darwin: + extends: .macos_codesign + needs: + - pack_aarch64-apple-darwin + variables: + PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" + upload_to_http: stage: private_deploy when: manual @@ -362,8 +413,8 @@ upload_to_http: - job: pack_arm-linux-gnueabihf - job: pack_aarch64-linux-gnu - job: pack_x86_64-w64-mingw32 - - job: pack_x86_64-apple-darwin - - job: pack_aarch64-apple-darwin + - job: sign_x86_64-apple-darwin + - job: sign_aarch64-apple-darwin before_script: - !reference [.use_ci_tools, script] script: @@ -394,8 +445,8 @@ upload_to_github: - job: pack_arm-linux-gnueabihf - job: pack_aarch64-linux-gnu - job: pack_x86_64-w64-mingw32 - - job: pack_x86_64-apple-darwin - - job: pack_aarch64-apple-darwin + - job: sign_x86_64-apple-darwin + - job: sign_aarch64-apple-darwin before_script: [] script: - ls -l dist*/ From 78397d1bfecbf8fab7ac33dad5ca585c9257d3b3 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 17:42:04 +0300 Subject: [PATCH 107/289] [Xtensa] Xtensa ABI 128bit arg alignment - Forces 128bit arguments to have 128bit alignment as per the Xtensa ABI in LLVM & Clang. - Adds a check in the Xtensa calling convention to ensure 128bit aligned arguments are always passed as the first argument _or_ passed via the stack. --- llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 2a2569e633770..74da2fc6d5f18 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -34,7 +34,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXtensaTarget() { static std::string computeDataLayout(const Triple &TT, StringRef CPU, const TargetOptions &Options, bool IsLittle) { - std::string Ret = "e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32"; + std::string Ret = "e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-i128:128-n32"; return Ret; } From 382069f318aecffd6b9bfed468f017036499dd33 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 27 Sep 2023 01:35:28 +0300 Subject: [PATCH 108/289] [Xtensa] Fix Call ABI for 16 byte alignment. --- clang/lib/CodeGen/Targets/Xtensa.cpp | 7 ++++--- clang/test/CodeGen/xtensa-abi.c | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/clang/lib/CodeGen/Targets/Xtensa.cpp b/clang/lib/CodeGen/Targets/Xtensa.cpp index 3d22b18f146b2..65f5d5383454e 100644 --- a/clang/lib/CodeGen/Targets/Xtensa.cpp +++ b/clang/lib/CodeGen/Targets/Xtensa.cpp @@ -94,9 +94,8 @@ ABIArgInfo XtensaABIInfo::classifyArgumentType(QualType Ty, if (Size < 32 && Ty->isIntegralOrEnumerationType() && !MustUseStack) { return extendType(Ty); } - if (Size == 64) - return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); - return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 32)); + // Assume that type has 32, 64 or 128 bits + return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); } // Aggregates which are <= 6*32 will be passed in registers if possible, @@ -107,6 +106,8 @@ ABIArgInfo XtensaABIInfo::classifyArgumentType(QualType Ty, } else if (NeededAlign == (2 * 32)) { return ABIArgInfo::getDirect(llvm::ArrayType::get( llvm::IntegerType::get(getVMContext(), 64), NeededArgGPRs / 2)); + } else if (NeededAlign == (4 * 32)) { + return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 128)); } else { return ABIArgInfo::getDirect(llvm::ArrayType::get( llvm::IntegerType::get(getVMContext(), 32), NeededArgGPRs)); diff --git a/clang/test/CodeGen/xtensa-abi.c b/clang/test/CodeGen/xtensa-abi.c index df7a99d77bc0f..25b1fbd2d1b97 100644 --- a/clang/test/CodeGen/xtensa-abi.c +++ b/clang/test/CodeGen/xtensa-abi.c @@ -12,3 +12,17 @@ char *bufalloc () } // CHECK: define dso_local noalias ptr @bufalloc() #0 { + +struct S16 { int a[4]; } __attribute__ ((aligned (16))); + +void callee_struct_a16b_1(struct S16 a) {} + +// CHECK: define dso_local void @callee_struct_a16b_1(i128 %a.coerce) + +void callee_struct_a16b_2(struct S16 a, int b) {} + +// CHECK: define dso_local void @callee_struct_a16b_2(i128 %a.coerce, i32 noundef %b) + +void callee_struct_a16b_3(int a, struct S16 b) {} + +// CHECK: define dso_local void @callee_struct_a16b_3(i32 noundef %a, %struct.S16* noundef byval(%struct.S16) align 16 %b) From ddaea285159bfa654f2f041b8c8df3b452711041 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:15 +0300 Subject: [PATCH 109/289] [Xtensa] Add IR test for 16byte alignment. --- .../test/CodeGen/Xtensa/calling-conv-call8.ll | 170 ++++++++++++++++++ 1 file changed, 170 insertions(+) create mode 100644 llvm/test/CodeGen/Xtensa/calling-conv-call8.ll diff --git a/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll b/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll new file mode 100644 index 0000000000000..16056b99101fb --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll @@ -0,0 +1,170 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32 -O1 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA-STRUCT16 %s +; RUN: llc -mtriple=xtensa -mcpu=esp32 -O1 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA-I128 %s + +%struct.S = type { [4 x i32] } + +@v = dso_local global i32 0, align 4 + +define dso_local void @caller_struct_a128b_1([4 x i32] %0) { + +; XTENSA-STRUCT16-LABEL: caller_struct_a128b_1: +; XTENSA-STRUCT16-NEXT: .cfi_startproc +; XTENSA-STRUCT16: # %bb.0: +; XTENSA-STRUCT16-NEXT: entry a1, 32 +; XTENSA-STRUCT16-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-STRUCT16-NEXT: l32r a8, .LCPI0_0 +; XTENSA-STRUCT16-NEXT: mov.n a10, a2 +; XTENSA-STRUCT16-NEXT: mov.n a11, a3 +; XTENSA-STRUCT16-NEXT: mov.n a12, a4 +; XTENSA-STRUCT16-NEXT: mov.n a13, a5 +; XTENSA-STRUCT16-NEXT: callx8 a8 +; XTENSA-STRUCT16-NEXT: retw.n + + call void @callee_struct_a128b_1([4 x i32] %0) + ret void +} + +declare dso_local void @callee_struct_a128b_1([4 x i32]) + +define dso_local void @caller_struct_a128b_2([4 x i32] %0) { +; XTENSA-STRUCT16-LABEL: caller_struct_a128b_2: +; XTENSA-STRUCT16: .cfi_startproc +; XTENSA-STRUCT16-NEXT: # %bb.0: +; XTENSA-STRUCT16-NEXT: entry a1, 32 +; XTENSA-STRUCT16-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-STRUCT16-NEXT: l32r a8, .LCPI1_0 +; XTENSA-STRUCT16-NEXT: l32i.n a14, a8, 0 +; XTENSA-STRUCT16-NEXT: l32r a8, .LCPI1_1 +; XTENSA-STRUCT16-NEXT: mov.n a10, a2 +; XTENSA-STRUCT16-NEXT: mov.n a11, a3 +; XTENSA-STRUCT16-NEXT: mov.n a12, a4 +; XTENSA-STRUCT16-NEXT: mov.n a13, a5 +; XTENSA-STRUCT16-NEXT: callx8 a8 +; XTENSA-STRUCT16-NEXT: retw.n + + %2 = load i32, i32* @v, align 4 + call void @callee_struct_a128b_2([4 x i32] %0, i32 noundef %2) + ret void +} + +declare dso_local void @callee_struct_a128b_2([4 x i32], i32 noundef) + +define dso_local void @caller_struct_a128b_3([4 x i32] %0) { +; XTENSA-STRUCT16-LABEL: caller_struct_a128b_3: +; XTENSA-STRUCT16: .cfi_startproc +; XTENSA-STRUCT16-NEXT: # %bb.0: +; XTENSA-STRUCT16-NEXT: entry a1, 64 +; XTENSA-STRUCT16-NEXT: .cfi_def_cfa_offset 64 +; XTENSA-STRUCT16-NEXT: s32i.n a5, a1, 28 +; XTENSA-STRUCT16-NEXT: s32i.n a4, a1, 24 +; XTENSA-STRUCT16-NEXT: s32i.n a3, a1, 20 +; XTENSA-STRUCT16-NEXT: s32i.n a2, a1, 16 +; XTENSA-STRUCT16-NEXT: l32r a8, .LCPI2_0 +; XTENSA-STRUCT16-NEXT: l32i.n a10, a8, 0 +; XTENSA-STRUCT16-NEXT: l32i.n a8, a1, 28 +; XTENSA-STRUCT16-NEXT: s32i.n a8, a1, 12 +; XTENSA-STRUCT16-NEXT: l32i.n a8, a1, 24 +; XTENSA-STRUCT16-NEXT: s32i.n a8, a1, 8 +; XTENSA-STRUCT16-NEXT: l32i.n a8, a1, 20 +; XTENSA-STRUCT16-NEXT: s32i.n a8, a1, 4 +; XTENSA-STRUCT16-NEXT: l32i.n a8, a1, 16 +; XTENSA-STRUCT16-NEXT: s32i.n a8, a1, 0 +; XTENSA-STRUCT16-NEXT: l32r a8, .LCPI2_1 +; XTENSA-STRUCT16-NEXT: callx8 a8 +; XTENSA-STRUCT16-NEXT: retw.n + + %2 = alloca %struct.S, align 16 + %3 = extractvalue [4 x i32] %0, 0 + %4 = getelementptr inbounds %struct.S, %struct.S* %2, i32 0, i32 0, i32 0 + store i32 %3, i32* %4, align 16 + %5 = extractvalue [4 x i32] %0, 1 + %6 = getelementptr inbounds %struct.S, %struct.S* %2, i32 0, i32 0, i32 1 + store i32 %5, i32* %6, align 4 + %7 = extractvalue [4 x i32] %0, 2 + %8 = getelementptr inbounds %struct.S, %struct.S* %2, i32 0, i32 0, i32 2 + store i32 %7, i32* %8, align 8 + %9 = extractvalue [4 x i32] %0, 3 + %10 = getelementptr inbounds %struct.S, %struct.S* %2, i32 0, i32 0, i32 3 + store i32 %9, i32* %10, align 4 + %11 = load i32, i32* @v, align 4 + call void @callee_struct_a128b_3(i32 noundef %11, %struct.S* noundef nonnull byval(%struct.S) align 16 %2) + ret void +} + +declare dso_local void @callee_struct_a128b_3(i32 noundef, %struct.S* noundef byval(%struct.S) align 16) + +define dso_local void @caller_i128b_1(i128 noundef %0) { +; XTENSA-I128-LABEL: caller_i128b_1: +; XTENSA-I128: .cfi_startproc +; XTENSA-I128-NEXT: # %bb.0: +; XTENSA-I128-NEXT: entry a1, 32 +; XTENSA-I128-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-I128-NEXT: l32r a8, .LCPI3_0 +; XTENSA-I128-NEXT: mov.n a10, a2 +; XTENSA-I128-NEXT: mov.n a11, a3 +; XTENSA-I128-NEXT: mov.n a12, a4 +; XTENSA-I128-NEXT: mov.n a13, a5 +; XTENSA-I128-NEXT: callx8 a8 +; XTENSA-I128-NEXT: retw.n + + call void @callee_i128b_1(i128 noundef %0) + ret void +} + +declare dso_local void @callee_i128b_1(i128 noundef) + +define dso_local void @caller_i128b_2(i128 noundef %0) { +; XTENSA-I128-LABEL: caller_i128b_2: +; XTENSA-I128: .cfi_startproc +; XTENSA-I128-NEXT: # %bb.0: +; XTENSA-I128-NEXT: entry a1, 32 +; XTENSA-I128-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-I128-NEXT: l32r a8, .LCPI4_0 +; XTENSA-I128-NEXT: l32i.n a14, a8, 0 +; XTENSA-I128-NEXT: l32r a8, .LCPI4_1 +; XTENSA-I128-NEXT: mov.n a10, a2 +; XTENSA-I128-NEXT: mov.n a11, a3 +; XTENSA-I128-NEXT: mov.n a12, a4 +; XTENSA-I128-NEXT: mov.n a13, a5 +; XTENSA-I128-NEXT: callx8 a8 +; XTENSA-I128-NEXT: retw.n + + %2 = load i32, i32* @v, align 4 + call void @callee_i128b_2(i128 noundef %0, i32 noundef %2) + ret void +} + +declare dso_local void @callee_i128b_2(i128 noundef, i32 noundef) + +define dso_local void @caller_i128b_3(i128 noundef %0) { +; XTENSA-I128-LABEL: caller_i128b_3: +; XTENSA-I128: .cfi_startproc +; XTENSA-I128-NEXT: # %bb.0: +; XTENSA-I128-NEXT: entry a1, 64 +; XTENSA-I128-NEXT: .cfi_def_cfa_offset 64 +; XTENSA-I128-NEXT: s32i.n a5, a1, 28 +; XTENSA-I128-NEXT: s32i.n a4, a1, 24 +; XTENSA-I128-NEXT: s32i.n a3, a1, 20 +; XTENSA-I128-NEXT: s32i.n a2, a1, 16 +; XTENSA-I128-NEXT: l32r a8, .LCPI5_0 +; XTENSA-I128-NEXT: l32i.n a10, a8, 0 +; XTENSA-I128-NEXT: l32i.n a8, a1, 28 +; XTENSA-I128-NEXT: s32i.n a8, a1, 12 +; XTENSA-I128-NEXT: l32i.n a8, a1, 24 +; XTENSA-I128-NEXT: s32i.n a8, a1, 8 +; XTENSA-I128-NEXT: l32i.n a8, a1, 20 +; XTENSA-I128-NEXT: s32i.n a8, a1, 4 +; XTENSA-I128: l32r a8, .LCPI5_1 +; XTENSA-I128-NEXT: callx8 a8 +; XTENSA-I128-NEXT: retw.n + + %2 = alloca i128, align 16 + %3 = load i32, i32* @v, align 4 + store i128 %0, i128* %2, align 16 + call void @callee_i128b_3(i32 noundef %3, i128* noundef nonnull byval(i128) align 16 %2) + ret void +} + +declare dso_local void @callee_i128b_3(i32 noundef, i128* noundef byval(i128) align 16) From 2d40eeb93c66552e970402d30ea9d3cbf728b765 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:16 +0300 Subject: [PATCH 110/289] esp/ci: Run LLD tests. Output test logs in lld-tests.log --- .universal-toolchain-release.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index 3f15eb9637a31..5292918a32a97 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -58,6 +58,7 @@ artifacts: paths: - ${DIST_DIR}/ + - ${BUILD_DIR}/lld-tests.log - ${BUILD_DIR}/tests.log - ${BUILD_DIR}/build.log when: always @@ -94,6 +95,9 @@ touch ${BUILD_PATH}/tests.log; chmod o+w ${BUILD_PATH}/tests.log; runuser -l test_runner -c 'cmake --build '${LLVM_BUILD_PATH}' --target check-all 2>&1 > '${BUILD_PATH}'/tests.log'; + touch ${BUILD_PATH}/lld-tests.log; + chmod o+w ${BUILD_PATH}/lld-tests.log; + runuser -l test_runner -c 'cmake --build '${LLVM_BUILD_PATH}' --target lld-test 2>&1 > '${BUILD_PATH}'/lld-tests.log'; fi - export DISTRO_DIR=$PWD/$DIST_DIR - pushd ${BUILD_PATH} From cdee9f613050fbe519fbbb2f5852d851f19fbdc6 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:16 +0300 Subject: [PATCH 111/289] [Xtensa] Fix atomic rmw operation. Fix register liveness in emitAtomicRMW function. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index c3f71b524c3f2..2cb5ccb25f2dd 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -2470,7 +2470,9 @@ MachineBasicBlock *XtensaTargetLowering::emitAtomicRMW(MachineInstr &MI, const TargetRegisterClass *RC = getRegClassFor(MVT::i32); unsigned R1 = MRI.createVirtualRegister(RC); - BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), R1).add(AtomicValAddr).addImm(0); + BuildMI(*BB, MI, DL, TII.get(Xtensa::L32I), R1) + .addReg(AtomicValAddr.getReg()) + .addImm(0); BB = BBLoop; @@ -2534,7 +2536,7 @@ MachineBasicBlock *XtensaTargetLowering::emitAtomicRMW(MachineInstr &MI, BuildMI(BB, DL, TII.get(Xtensa::WSR), Xtensa::SCOMPARE1).addReg(AtomicValPhi); BuildMI(BB, DL, TII.get(Xtensa::S32C1I), R4) .addReg(R2) - .addReg(AtomicValAddr.getReg()) + .addReg(AtomicValAddr.getReg(), getKillRegState(AtomicValAddr.isDead())) .addImm(0); BuildMI(BB, DL, TII.get(Xtensa::MOV_N), AtomicValLoop).addReg(R4); From 22b5ed69750db2eb6e387fd02f6f5225af1a6d12 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:17 +0300 Subject: [PATCH 112/289] [Xtensa] Fix Hardware Loop pass. If block with LOOPEND instruction has smaller offset then loop heeader block then try to find appropriate place for LOOPEND instruction after loop header --- llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp | 54 ++++++++++++++++--- 1 file changed, 46 insertions(+), 8 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp b/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp index 48b0a515ee985..7da0886cd072a 100644 --- a/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp @@ -153,8 +153,8 @@ bool XtensaFixupHwLoops::runOnMachineFunction(MachineFunction &mf) { // Scan loop and find hardware loop pseudo instructions LOOPSTART and LOOPEND. // Transform LOOPSTART to Xtensa instructions and remove LOOPEND. bool XtensaFixupHwLoops::fixupLoopInstrs(MachineLoop *L) { - // const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); MachineBasicBlock &MBB = *(L->getHeader()); + const TargetInstrInfo *TII = MBB.getParent()->getSubtarget().getInstrInfo(); bool Changed = false; unsigned Num = MBB.getNumber(); unsigned Offset = BlockInfo[Num].Offset; @@ -168,10 +168,17 @@ bool XtensaFixupHwLoops::fixupLoopInstrs(MachineLoop *L) { MachineInstr *PredI1 = nullptr; MachineInstr *FirstMI = nullptr; + // Find appropriate place for the LOOPEND block for (auto MBI = L->block_begin(), MBIE = L->block_end(); MBI != MBIE; ++MBI) { - if (LastBlockOffset < BlockInfo[(*MBI)->getNumber()].Offset) { - LastBlockOffset = BlockInfo[(*MBI)->getNumber()].Offset; - LastBlock = (*MBI); + MachineBasicBlock *TBB = nullptr, *FBB = nullptr; + SmallVector Cond; + if (!TII->analyzeBranch(*(*MBI), TBB, FBB, Cond)) { + if (FBB && TBB) { + if (LastBlockOffset < BlockInfo[(*MBI)->getNumber()].Offset) { + LastBlockOffset = BlockInfo[(*MBI)->getNumber()].Offset; + LastBlock = (*MBI); + } + } } } @@ -220,12 +227,43 @@ bool XtensaFixupHwLoops::fixupLoopInstrs(MachineLoop *L) { DebugLoc DL = PII->getDebugLoc(); unsigned OffsetLE = BlockInfo[PMBB->getNumber()].Offset; - // Check if loop end is placed before loop header - // In such case add special MBB after loop header and create jump - // from loop end to it + // In most cases we expect that blocks in loop are ordered by such manner that block + // with LOOPSTART instruction preceeds block with LOOPEND instruction. + // But in some cases after transformations loop block which contains LOOPEND instruction + // maybe placed before LOOPSTART block during code generaion. We must handle such situation + // because "loop" instruction placed instead of LOOPSTART must have positive offset in the target + // field to the LOOPEND block. + // So, in such situation we add new LOOPEND block after the LOOPSTART block and create jump from old + // LOOPEND block to the new LOOPEND block adn set new LOOPEND block then as target for "loop" instruction if (OffsetLE < LHOffset) { LoopEnd = MF->CreateMachineBasicBlock(); - MF->insert(++LastBlock->getIterator(), LoopEnd); + + // If last block in the loop is whithin 256 byte offset from loop instruction + // then just place LOOPEND block after the last block. + if ((LastBlockOffset - LHOffset) < 256) { + //Insert after appropriate block + MF->insert(++LastBlock->getIterator(), LoopEnd); + } else { + // If loop is to large for hardware loop instructuin offset then + // place LoopEnd block just after loop header + MF->insert(++MBB.getIterator(), LoopEnd); + MachineBasicBlock *TBB = nullptr, *FBB = nullptr; + SmallVector Cond; + if (!TII->analyzeBranch(MBB, TBB, FBB, Cond)) { + if (!FBB) { + // LH block just falls through to its succ + for (auto I = MBB.succ_begin(), E = MBB.succ_end(); I != E; + ++I) { + MachineBasicBlock *Succ = *I; + if (Succ != TBB) { + BuildMI(MBB, MBB.end(), DL, TII->get(Xtensa::J)) + .addMBB(Succ); + } + } + } + } + } + LoopEnd->transferSuccessors(PMBB); LoopEnd->splice(LoopEnd->end(), PMBB, PII, PMBB->end()); From fecd9281bd28233945a9ad5cb97a53399e4a1fe4 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 18 Sep 2024 20:21:03 +0300 Subject: [PATCH 113/289] [Xtensa] Fix temporary mul.ll and hwloop_inner_loop.ll tests. --- llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll | 18 +++--- llvm/test/CodeGen/Xtensa/mul.ll | 56 ++++++++++--------- 2 files changed, 39 insertions(+), 35 deletions(-) diff --git a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll index 2ad5b57d5e15e..4c784fd1a8187 100644 --- a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll +++ b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll @@ -14,24 +14,24 @@ define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #0 { ; CHECK-NEXT: .LBB0_2: # %for.body ; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 ; CHECK-NEXT: add.n a2, a9, a2 -; CHECK-NEXT: j .LBB0_5 +; CHECK-NEXT: j .LBB0_4 ; CHECK-NEXT: .LBB0_3: # %for.body ; CHECK-NEXT: # =>This Loop Header: Depth=1 -; CHECK-NEXT: # Child Loop BB0_6 Depth 2 +; CHECK-NEXT: # Child Loop BB0_5 Depth 2 ; CHECK-NEXT: nop ; CHECK-NEXT: nop -; CHECK-NEXT: loop a4, .LBB0_6 +; CHECK-NEXT: loop a4, .LBB0_5 ; CHECK-NEXT: mov.n a9, a8 ; CHECK-NEXT: bge a8, a2, .LBB0_2 -; CHECK-NEXT: # %bb.4: # %for.body -; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: mull a9, a2, a3 -; CHECK-NEXT: j .LBB0_2 -; CHECK-NEXT: .LBB0_5: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: .LBB0_4: # in Loop: Header=BB0_3 Depth=1 ; CHECK-NEXT: nop -; CHECK-NEXT: .LBB0_6: # Parent Loop BB0_3 Depth=1 +; CHECK-NEXT: .LBB0_5: # Parent Loop BB0_3 Depth=1 ; CHECK-NEXT: # => This Inner Loop Header: Depth=2 ; CHECK-NEXT: j .LBB0_7 +; CHECK-NEXT: .LBB0_6: # %for.body +; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 +; CHECK-NEXT: mull a9, a2, a3 +; CHECK-NEXT: j .LBB0_2 ; CHECK-NEXT: .LBB0_7: # %for.cond.cleanup ; CHECK-NEXT: retw.n entry: diff --git a/llvm/test/CodeGen/Xtensa/mul.ll b/llvm/test/CodeGen/Xtensa/mul.ll index 7aaa2e1d00af4..72d821814cb66 100644 --- a/llvm/test/CodeGen/Xtensa/mul.ll +++ b/llvm/test/CodeGen/Xtensa/mul.ll @@ -522,17 +522,19 @@ define i64 @muli64_m3840(i64 %a) nounwind { define i128 @muli128_m3840(i128 %a) nounwind { ; XTENSA-LABEL: muli128_m3840: -; XTENSA: addi a8, a1, -16 +; XTENSA: addi a8, a1, -32 ; XTENSA-NEXT: or a1, a8, a8 -; XTENSA-NEXT: s32i a0, a1, 8 # 4-byte Folded Spill -; XTENSA-NEXT: movi a7, -1 -; XTENSA-NEXT: s32i a7, a1, 4 -; XTENSA-NEXT: s32i a7, a1, 0 -; XTENSA-NEXT: l32r a6, .LCPI30_0 +; XTENSA-NEXT: s32i a0, a1, 28 # 4-byte Folded Spill +; XTENSA-NEXT: movi a8, -1 +; XTENSA-NEXT: s32i a8, a1, 12 +; XTENSA-NEXT: s32i a8, a1, 8 +; XTENSA-NEXT: s32i a8, a1, 4 +; XTENSA-NEXT: l32r a8, .LCPI30_0 +; XTENSA-NEXT: s32i a8, a1, 0 ; XTENSA-NEXT: l32r a8, .LCPI30_1 ; XTENSA-NEXT: callx0 a8 -; XTENSA-NEXT: l32i a0, a1, 8 # 4-byte Folded Reload -; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: l32i a0, a1, 28 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 32 ; XTENSA-NEXT: or a1, a8, a8 ; XTENSA-NEXT: ret %1 = mul i128 %a, -3840 @@ -541,17 +543,19 @@ define i128 @muli128_m3840(i128 %a) nounwind { define i128 @muli128_m63(i128 %a) nounwind { ; XTENSA-LABEL: muli128_m63: -; XTENSA: addi a8, a1, -16 +; XTENSA: addi a8, a1, -32 ; XTENSA-NEXT: or a1, a8, a8 -; XTENSA-NEXT: s32i a0, a1, 8 # 4-byte Folded Spill -; XTENSA-NEXT: movi a7, -1 -; XTENSA-NEXT: s32i a7, a1, 4 -; XTENSA-NEXT: s32i a7, a1, 0 -; XTENSA-NEXT: movi a6, -63 +; XTENSA-NEXT: s32i a0, a1, 28 # 4-byte Folded Spill +; XTENSA-NEXT: movi a8, -1 +; XTENSA-NEXT: s32i a8, a1, 12 +; XTENSA-NEXT: s32i a8, a1, 8 +; XTENSA-NEXT: s32i a8, a1, 4 +; XTENSA-NEXT: movi a8, -63 +; XTENSA-NEXT: s32i a8, a1, 0 ; XTENSA-NEXT: l32r a8, .LCPI31_0 ; XTENSA-NEXT: callx0 a8 -; XTENSA-NEXT: l32i a0, a1, 8 # 4-byte Folded Reload -; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: l32i a0, a1, 28 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 32 ; XTENSA-NEXT: or a1, a8, a8 ; XTENSA-NEXT: ret %1 = mul i128 %a, -63 @@ -560,22 +564,22 @@ define i128 @muli128_m63(i128 %a) nounwind { define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind { ; XTENSA-LABEL: mulhsu_i64: -; XTENSA: addi a8, a1, -16 -; XTENSA-NEXT: or a1, a8, a8 -; XTENSA-NEXT: s32i a0, a1, 8 # 4-byte Folded Spill -; XTENSA-NEXT: or a7, a5, a5 -; XTENSA-NEXT: or a6, a4, a4 -; XTENSA-NEXT: srai a8, a7, 31 -; XTENSA-NEXT: s32i a8, a1, 4 -; XTENSA-NEXT: s32i a8, a1, 0 +; XTENSA: addi a8, a1, -32 +; XTENSA-NEXT: or a1, a8, a8 +; XTENSA-NEXT: s32i a0, a1, 28 # 4-byte Folded Spill +; XTENSA-NEXT: srai a8, a5, 31 +; XTENSA-NEXT: s32i a8, a1, 12 +; XTENSA-NEXT: s32i a8, a1, 8 +; XTENSA-NEXT: s32i a5, a1, 4 +; XTENSA-NEXT: s32i a4, a1, 0 ; XTENSA-NEXT: movi a4, 0 ; XTENSA-NEXT: l32r a8, .LCPI32_0 ; XTENSA-NEXT: or a5, a4, a4 ; XTENSA-NEXT: callx0 a8 ; XTENSA-NEXT: or a2, a4, a4 ; XTENSA-NEXT: or a3, a5, a5 -; XTENSA-NEXT: l32i a0, a1, 8 # 4-byte Folded Reload -; XTENSA-NEXT: addi a8, a1, 16 +; XTENSA-NEXT: l32i a0, a1, 28 # 4-byte Folded Reload +; XTENSA-NEXT: addi a8, a1, 32 ; XTENSA-NEXT: or a1, a8, a8 ; XTENSA-NEXT: ret %1 = zext i64 %a to i128 From 90bd39f7bfd8b231d34756443e09eb5fa65d2059 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 17:44:21 +0300 Subject: [PATCH 114/289] [Xtensa] Add LLD linker support --- lld/ELF/Arch/Xtensa.cpp | 173 ++++++++++++++++++ lld/ELF/CMakeLists.txt | 1 + lld/ELF/InputFiles.cpp | 2 + lld/ELF/Target.cpp | 2 + lld/ELF/Target.h | 1 + lld/test/ELF/xtensa-reloc.s | 17 ++ lld/test/lit.cfg.py | 1 + .../llvm/BinaryFormat/ELFRelocs/Xtensa.def | 6 + 8 files changed, 203 insertions(+) create mode 100644 lld/ELF/Arch/Xtensa.cpp create mode 100644 lld/test/ELF/xtensa-reloc.s diff --git a/lld/ELF/Arch/Xtensa.cpp b/lld/ELF/Arch/Xtensa.cpp new file mode 100644 index 0000000000000..31603068b8df7 --- /dev/null +++ b/lld/ELF/Arch/Xtensa.cpp @@ -0,0 +1,173 @@ +//===- Xtensa.cpp ---------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "InputFiles.h" +#include "Symbols.h" +#include "Target.h" +#include +#include +#include + +using namespace llvm; +using namespace llvm::object; +using namespace llvm::support::endian; +using namespace llvm::ELF; +using namespace lld; +using namespace lld::elf; + +namespace { + +class Xtensa final : public TargetInfo { +public: + Xtensa(); + RelExpr getRelExpr(RelType type, const Symbol &s, + const uint8_t *loc) const override; + void relocate(uint8_t *loc, const Relocation &rel, + uint64_t val) const override; +}; + +} // namespace + +Xtensa::Xtensa() {} + +RelExpr Xtensa::getRelExpr(RelType type, const Symbol &s, + const uint8_t *loc) const { + switch (type) { + case R_XTENSA_32: + return R_ABS; + case R_XTENSA_SLOT0_OP: + // This relocation is used for various instructions, with varying ways to + // calculate the relocation value. This is unlike most ELF architectures, + // and is arguably bad design (see the comment on R_386_GOT32 in X86.cpp). + // But that's what compilers emit, so it needs to be supported. + // + // We work around this by returning R_PC here and calculating the PC address + // in Xtensa::relocate based on the relative value. That's ugly. A better + // solution would be to look at the instruction here and emit various + // Xtensa-specific RelTypes, but that has another problem: the RelExpr enum + // is at its maximum size of 64. This will need to be fixed eventually, but + // for now hack around it and return R_PC. + return R_PC; + case R_XTENSA_ASM_EXPAND: + // This relocation appears to be emitted by the GNU Xtensa compiler as a + // linker relaxation hint. For example, for the following code: + // + // .section .foo + // .align 4 + // foo: + // nop + // nop + // call0 bar + // .align 4 + // bar: + // + // The call0 instruction is compiled to a l32r and callx0 instruction. + // The LLVM Xtensa backend does not emit this relocation. + // Because it's a relaxation hint, this relocation can be ignored for now + // until linker relaxations are implemented. + return R_NONE; + case R_XTENSA_PDIFF8: + case R_XTENSA_PDIFF16: + case R_XTENSA_PDIFF32: + case R_XTENSA_NDIFF8: + case R_XTENSA_NDIFF16: + case R_XTENSA_NDIFF32: + // > Xtensa relocations to mark the difference of two local symbols. + // > These are only needed to support linker relaxation and can be ignored + // > when not relaxing. + // Source: + // https://github.com/espressif/binutils-gdb/commit/30ce8e47fad9b057b6d7af9e1d43061126d34d20: + // Because we don't do linker relaxation, we can ignore these relocations. + return R_NONE; + default: + error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) + + ") against symbol " + toString(s)); + return R_NONE; + } +} + +static inline bool isRRI8Branch(uint8_t *loc) { + if ((loc[0] & 0x0f) == 0b0111) { + // instructions: ball, bany, bbc, bbci, bbs, bbsi, beq, bge, bgeu, blt, + // bltu, bnall, bne, bnone + return true; + } + if ((loc[0] & 0b11'1111) == 0b10'0110) { + // instructions: beqi, bgei, bnei, blti + return true; + } + if ((loc[0] & 0b1011'1111) == 0b1011'0110) { + // instructions: bgeui, bltui + return true; + } + // some other instruction + return false; +} + +void Xtensa::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const { + switch (rel.type) { + case R_XTENSA_32: + write32le(loc, val); + break; + case R_XTENSA_SLOT0_OP: { + // HACK: calculate the instruction location based on the PC-relative + // relocation value. + uint64_t dest = rel.sym->getVA(rel.addend); + uint64_t p = dest - val; + + // This relocation is used for various instructions. + // Look at the instruction to determine how to do the relocation. + uint8_t opcode = loc[0] & 0x0f; + if (opcode == 0b0001) { // RI16 format: l32r + uint64_t val = dest - ((p + 3) & (uint64_t)0xfffffffc); + checkInt(loc, static_cast(val) >> 2, 16, rel); + checkAlignment(loc, val, 4, rel); + write16le(loc + 1, static_cast(val) >> 2); + } else if (opcode == 0b0101) { // call0, call4, call8, call12 (CALL format) + uint64_t val = dest - ((p + 4) & (uint64_t)0xfffffffc); + checkInt(loc, static_cast(val) >> 2, 18, rel); + checkAlignment(loc, val, 4, rel); + const int64_t target = static_cast(val) >> 2; + loc[0] = (loc[0] & 0b0011'1111) | ((target & 0b0000'0011) << 6); + loc[1] = target >> 2; + loc[2] = target >> 10; + } else if ((loc[0] & 0x3f) == 0b00'0110) { // j (CALL format) + uint64_t val = dest - p + 4; + checkInt(loc, static_cast(val), 18, rel); + loc[0] = (loc[0] & 0b0011'1111) | ((val & 0b0000'0011) << 6); + loc[1] = val >> 2; + loc[2] = val >> 10; + } else if (isRRI8Branch(loc)) { // RRI8 format (various branch instructions) + uint64_t v = val - 4; + checkInt(loc, static_cast(v), 8, rel); + loc[2] = v & 0xff; + } else if ((loc[0] & 0b1000'1111) == 0b1000'1100) { // RI16 format: beqz.n, bnez.n + uint64_t v = val - 4; + checkUInt(loc, v, 6, rel); + loc[0] = (loc[0] & 0xcf) | (v & 0x30); + loc[1] = (loc[1] & 0x0f) | ((v & 0x0f) << 4); + } else if ((loc[0] & 0b0011'1111) == 0b0001'0110) { // BRI12 format: beqz, bgez, bltz, bnez + uint64_t v = val - 4; + checkInt(loc, static_cast(v), 12, rel); + loc[1] = ((loc[1] & 0x0f)) | ((v & 0x0f) << 4); + loc[2] = (v >> 4) & 0xff; + } else { + error(getErrorLocation(loc) + + "unknown opcode for relocation: " + std::to_string(loc[0])); + } + break; + } + default: + llvm_unreachable("unknown relocation"); + } +} + +TargetInfo *elf::getXtensaTargetInfo() { + static Xtensa target; + return ⌖ +} diff --git a/lld/ELF/CMakeLists.txt b/lld/ELF/CMakeLists.txt index 83d816ddb0601..5ec4f7e870e35 100644 --- a/lld/ELF/CMakeLists.txt +++ b/lld/ELF/CMakeLists.txt @@ -36,6 +36,7 @@ add_lld_library(lldELF Arch/SystemZ.cpp Arch/X86.cpp Arch/X86_64.cpp + Arch/Xtensa.cpp ARMErrataFix.cpp CallGraphSort.cpp DWARF.cpp diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp index 48f5a9609ecfb..48a60ec60c73e 100644 --- a/lld/ELF/InputFiles.cpp +++ b/lld/ELF/InputFiles.cpp @@ -1678,6 +1678,8 @@ static uint16_t getBitcodeMachineKind(StringRef path, const Triple &t) { return t.isOSIAMCU() ? EM_IAMCU : EM_386; case Triple::x86_64: return EM_X86_64; + case Triple::xtensa: + return EM_XTENSA; default: error(path + ": could not infer e_machine from bitcode target triple " + t.str()); diff --git a/lld/ELF/Target.cpp b/lld/ELF/Target.cpp index 3e221646ce247..15d1c80aff081 100644 --- a/lld/ELF/Target.cpp +++ b/lld/ELF/Target.cpp @@ -91,6 +91,8 @@ TargetInfo *elf::getTarget() { return getSystemZTargetInfo(); case EM_X86_64: return getX86_64TargetInfo(); + case EM_XTENSA: + return getXtensaTargetInfo(); default: fatal("unsupported e_machine value: " + Twine(config->emachine)); } diff --git a/lld/ELF/Target.h b/lld/ELF/Target.h index 0cefa31813566..a692db6b9e626 100644 --- a/lld/ELF/Target.h +++ b/lld/ELF/Target.h @@ -191,6 +191,7 @@ TargetInfo *getSPARCV9TargetInfo(); TargetInfo *getSystemZTargetInfo(); TargetInfo *getX86TargetInfo(); TargetInfo *getX86_64TargetInfo(); +TargetInfo *getXtensaTargetInfo(); template TargetInfo *getMipsTargetInfo(); struct ErrorPlace { diff --git a/lld/test/ELF/xtensa-reloc.s b/lld/test/ELF/xtensa-reloc.s new file mode 100644 index 0000000000000..7007756aa2a89 --- /dev/null +++ b/lld/test/ELF/xtensa-reloc.s @@ -0,0 +1,17 @@ +# REQUIRES: xtensa +# RUN: llvm-mc -filetype=obj -triple=xtensa -mcpu=esp32 %s -o %t.o +# RUN: ld.lld %t.o --defsym=a=0x2000 --section-start=.CALL=0x1000 --defsym=b=40 -o %t +# RUN: llvm-objdump -d --print-imm-hex %t | FileCheck %s + +.section .CALL,"ax",@progbits +# CHECK-LABEL: section .CALL: +# CHECK: call0 . +4096 +# CHECK-NEXT: call0 . +4096 +# CHECK-NEXT: call0 . +4092 +# CHECK-NEXT: call0 . +4088 +# CHECK-NEXT: call0 . -4068 + call0 a + call0 a + call0 a + call0 a + call0 b diff --git a/lld/test/lit.cfg.py b/lld/test/lit.cfg.py index d309c2ad4ee28..6a40667defcac 100644 --- a/lld/test/lit.cfg.py +++ b/lld/test/lit.cfg.py @@ -86,6 +86,7 @@ "SystemZ": "systemz", "WebAssembly": "wasm", "X86": "x86", + 'Xtensa': 'xtensa', }, ), ("--assertion-mode", {"ON": "asserts"}), diff --git a/llvm/include/llvm/BinaryFormat/ELFRelocs/Xtensa.def b/llvm/include/llvm/BinaryFormat/ELFRelocs/Xtensa.def index 6791a842181ff..c2e11259164bb 100644 --- a/llvm/include/llvm/BinaryFormat/ELFRelocs/Xtensa.def +++ b/llvm/include/llvm/BinaryFormat/ELFRelocs/Xtensa.def @@ -58,3 +58,9 @@ ELF_RELOC(R_XTENSA_TLS_TPOFF, 53) ELF_RELOC(R_XTENSA_TLS_FUNC, 54) ELF_RELOC(R_XTENSA_TLS_ARG, 55) ELF_RELOC(R_XTENSA_TLS_CALL, 56) +ELF_RELOC(R_XTENSA_PDIFF8, 57) +ELF_RELOC(R_XTENSA_PDIFF16, 58) +ELF_RELOC(R_XTENSA_PDIFF32, 59) +ELF_RELOC(R_XTENSA_NDIFF8, 60) +ELF_RELOC(R_XTENSA_NDIFF16, 61) +ELF_RELOC(R_XTENSA_NDIFF32, 62) From e366c6014f1150b496e9c3ff93e297873d8f21a3 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 1 Apr 2024 15:34:35 +0300 Subject: [PATCH 115/289] [Xtensa][LLD] add more tests --- lld/test/ELF/xtensa-reloc.s | 44 +++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/lld/test/ELF/xtensa-reloc.s b/lld/test/ELF/xtensa-reloc.s index 7007756aa2a89..611e671e7646f 100644 --- a/lld/test/ELF/xtensa-reloc.s +++ b/lld/test/ELF/xtensa-reloc.s @@ -1,17 +1,57 @@ # REQUIRES: xtensa # RUN: llvm-mc -filetype=obj -triple=xtensa -mcpu=esp32 %s -o %t.o -# RUN: ld.lld %t.o --defsym=a=0x2000 --section-start=.CALL=0x1000 --defsym=b=40 -o %t +# RUN: ld.lld %t.o --defsym=a=0x2000 --section-start=.CALL=0x1000 --defsym=b=0x40 --defsym=c=0x140 --section-start=.BRANCH=0x5000 --defsym=d=0x5010 --section-start=.BR12=0x100 -o %t # RUN: llvm-objdump -d --print-imm-hex %t | FileCheck %s +.section .BR12,"ax",@progbits +# CHECK-LABEL:section .BR12 +# CHECK: beqz a2, . +64 +# CHECK-NEXT: bnez a3, . +61 +# CHECK-NEXT: bgez a4, . +58 +# CHECK-NEXT: bltz a5, . +55 + beqz a2, c + bnez a3, c + bgez a4, c + bltz a5, c + .section .CALL,"ax",@progbits # CHECK-LABEL: section .CALL: # CHECK: call0 . +4096 # CHECK-NEXT: call0 . +4096 # CHECK-NEXT: call0 . +4092 # CHECK-NEXT: call0 . +4088 -# CHECK-NEXT: call0 . -4068 +# CHECK-NEXT: j . +4092 +# CHECK-NEXT: j . +4089 +# CHECK-NEXT: j . +4086 +# CHECK-NEXT: j . -4045 +# CHECK-NEXT: j . -3792 +# CHECK-NEXT: call0 . -4056 +# CHECK-NEXT: call0 . -3804 +# CHECK-NEXT: l32r a3, . -4065 +# CHECK-NEXT: callx0 a3 +# CHECK-NEXT: l32r a4, . -3815 +# CHECK-NEXT: callx0 a4 call0 a call0 a call0 a call0 a + j a + j a + j a + j b + j c call0 b + call0 c + l32r a3, b + callx0 a3 + l32r a4, c + callx0 a4 + +.section .BRANCH,"ax",@progbits +# CHECK-LABEL: section .BRANCH: +# CHECK: beq a3, a4, . +16 +# CHECK-NEXT: ball a3, a4, . +13 +# CHECK-NEXT: blt a3, a4, . +10 + beq a3, a4, d + ball a3, a4, d + blt a3, a4, d From 684ea9e658e65595862c1998881b2eb629ce2b68 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 1 Apr 2024 16:12:11 +0300 Subject: [PATCH 116/289] [Xtensa][LLD] Fix J formula --- lld/ELF/Arch/Xtensa.cpp | 10 +++++----- lld/test/ELF/xtensa-reloc.s | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/lld/ELF/Arch/Xtensa.cpp b/lld/ELF/Arch/Xtensa.cpp index 31603068b8df7..f6d75fde7ac29 100644 --- a/lld/ELF/Arch/Xtensa.cpp +++ b/lld/ELF/Arch/Xtensa.cpp @@ -137,11 +137,11 @@ void Xtensa::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const { loc[1] = target >> 2; loc[2] = target >> 10; } else if ((loc[0] & 0x3f) == 0b00'0110) { // j (CALL format) - uint64_t val = dest - p + 4; - checkInt(loc, static_cast(val), 18, rel); - loc[0] = (loc[0] & 0b0011'1111) | ((val & 0b0000'0011) << 6); - loc[1] = val >> 2; - loc[2] = val >> 10; + uint64_t valJ = val - 4; + checkInt(loc, static_cast(valJ), 18, rel); + loc[0] = (loc[0] & 0b0011'1111) | ((valJ & 0b0000'0011) << 6); + loc[1] = valJ >> 2; + loc[2] = valJ >> 10; } else if (isRRI8Branch(loc)) { // RRI8 format (various branch instructions) uint64_t v = val - 4; checkInt(loc, static_cast(v), 8, rel); diff --git a/lld/test/ELF/xtensa-reloc.s b/lld/test/ELF/xtensa-reloc.s index 611e671e7646f..e14151ae4a814 100644 --- a/lld/test/ELF/xtensa-reloc.s +++ b/lld/test/ELF/xtensa-reloc.s @@ -20,11 +20,11 @@ # CHECK-NEXT: call0 . +4096 # CHECK-NEXT: call0 . +4092 # CHECK-NEXT: call0 . +4088 -# CHECK-NEXT: j . +4092 -# CHECK-NEXT: j . +4089 -# CHECK-NEXT: j . +4086 -# CHECK-NEXT: j . -4045 -# CHECK-NEXT: j . -3792 +# CHECK-NEXT: j . +4084 +# CHECK-NEXT: j . +4081 +# CHECK-NEXT: j . +4078 +# CHECK-NEXT: j . -4053 +# CHECK-NEXT: j . -3800 # CHECK-NEXT: call0 . -4056 # CHECK-NEXT: call0 . -3804 # CHECK-NEXT: l32r a3, . -4065 From 53892bfeadf59f3878e481138b37e4abdee4572d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 19:18:03 +0300 Subject: [PATCH 117/289] [Xtensa] Add emit constant pool option. --- clang/include/clang/Driver/Options.td | 1 + clang/lib/Driver/ToolChains/Clang.cpp | 5 +++++ .../Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp | 4 +++- .../lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h | 5 +++++ llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 8 ++++++++ llvm/lib/Target/Xtensa/XtensaSubtarget.h | 2 ++ 7 files changed, 25 insertions(+), 1 deletion(-) diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 09497e8d0d100..8260f21144b2c 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -6406,6 +6406,7 @@ def mfix_esp32_psram_cache_strategy_EQ : Joined<["-"], "mfix-esp32-psram-cache-s HelpText<" Psram cache fix strategies : memw, nops">, Values<"memw, nops">; def mlongcalls : Flag<["-"], "mlongcalls">, Group; +def mtext_section_literals : Flag<["-"], "mtext-section-literals">, Group; // These are legacy user-facing driver-level option spellings. They are always // aliases for options that are spelled using the more common Unix / GNU flag diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index b079f47d8f09a..50f8dd593f7a3 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -2444,6 +2444,11 @@ void Clang::AddXtensaTargetArgs(const ArgList &Args, } } } + + if (Args.getLastArg(options::OPT_mtext_section_literals) != nullptr) { + CmdArgs.push_back("-mllvm"); + CmdArgs.push_back("-mtext-section-literals"); + } } void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename, diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp index 43ca314f9d0c1..9b3906a5cb7ce 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp @@ -116,7 +116,9 @@ void XtensaTargetELFStreamer::startLiteralSection(MCSection *BaseSection) { StringRef LiteralSectionPrefix = getLiteralSectionPrefix(); std::string SectionName; - if (LiteralSectionPrefix != "") { + if (getTextSectionLiterals()) { + SectionName = BaseSection->getName(); + } else if (LiteralSectionPrefix != "") { SectionName = LiteralSectionPrefix.str() + ".literal"; } else { SectionName = getLiteralSectionName(BaseSection->getName()); diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h index 73df9c25d28e8..00df88ff36b41 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h @@ -18,6 +18,7 @@ class formatted_raw_ostream; class XtensaTargetStreamer : public MCTargetStreamer { StringRef LiteralSectionPrefix = ""; + bool TextSectionLiterals = false; public: XtensaTargetStreamer(MCStreamer &S); @@ -36,6 +37,10 @@ class XtensaTargetStreamer : public MCTargetStreamer { void setLiteralSectionPrefix(StringRef Name) { LiteralSectionPrefix = Name; } StringRef getLiteralSectionPrefix() { return LiteralSectionPrefix; } + + void setTextSectionLiterals() { TextSectionLiterals = true; } + + bool getTextSectionLiterals() { return TextSectionLiterals; } }; class XtensaTargetAsmStreamer : public XtensaTargetStreamer { diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp index a3dbb40924d70..ba66086844b37 100644 --- a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp @@ -150,6 +150,7 @@ void XtensaAsmPrinter::emitConstantPool() { auto *TS = static_cast(OutStreamer->getTargetStreamer()); MCSection *CS = getObjFileLowering().SectionForGlobal(&F, TM); + TS->setTextSectionLiterals(); TS->startLiteralSection(CS); int CPIdx = 0; diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index ec57ff2bd23db..3d0ee165c16d5 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -22,6 +22,14 @@ using namespace llvm; +static cl::opt TextSectionLiterals("mtext-section-literals", + cl::init(false), cl::Hidden); + +bool XtensaSubtarget::useTextSectionLiterals() const +{ + return TextSectionLiterals; +} + XtensaSubtarget & XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { StringRef CPUName = CPU; diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index d9a423d232ea4..1db34d354d575 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -211,6 +211,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasESP32S3Ops() const { return HasESP32S3Ops; } + bool useTextSectionLiterals() const; + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; From 83731b0d1462051e9a897a6a52dc3b7980efb4d5 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:19 +0300 Subject: [PATCH 118/289] [Xtensa] Add support of the mcmodel option. For large mcmodel always emit contsant pool just before code. --- llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 3d0ee165c16d5..45953ff2ad451 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -27,6 +27,11 @@ static cl::opt TextSectionLiterals("mtext-section-literals", bool XtensaSubtarget::useTextSectionLiterals() const { + // If code model is large then always place literals in + // test section. + if (TLInfo.getTargetMachine().getCodeModel() == CodeModel::Large) + return true; + return TextSectionLiterals; } From 8bf1f26b66182a5adfb7489ef0c4561f8aa18031 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 18 Sep 2024 20:42:27 +0300 Subject: [PATCH 119/289] [Xtensa] Fix temporary clang/lib/Driver/Toolchains/Xtensa.cpp, enable "-fuse-ld" --- clang/lib/Basic/Targets/Xtensa.h | 2 +- clang/lib/Driver/ToolChains/Xtensa.cpp | 225 +++++++++++++++---------- clang/lib/Driver/ToolChains/Xtensa.h | 3 + 3 files changed, 141 insertions(+), 89 deletions(-) diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index 26dba0225014f..a23b28684e709 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -49,7 +49,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { WIntType = UnsignedInt; UseZeroLengthBitfieldAlignment = true; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32; - resetDataLayout("e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32"); + resetDataLayout("e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-i128:128-n32"); } void getTargetDefines(const LangOptions &Opts, diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 271abd98f18ec..2a00dc6e9ef40 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -38,57 +38,80 @@ XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, : Generic_ELF(D, Triple, Args) { GCCInstallation.init(Triple, Args); - if (!GCCInstallation.isValid()) { - llvm_unreachable("Unexpected Xtensa GCC toolchain version"); - } - - Multilibs = GCCInstallation.getMultilibs(); - SelectedMultilibs.assign({GCCInstallation.getMultilib()}); - - GCCLibAndIncVersion = GCCInstallation.getVersion().Text; - GCCToolchainName = GCCInstallation.getTriple().str(); - SmallString<128> Path(GCCInstallation.getParentLibPath()); - llvm::sys::path::append(Path, ".."); - GCCToolchainDir = Path.c_str(); - - for (auto *A : Args) { - std::string Str = A->getAsString(Args); - if (!Str.compare("-mlongcalls")) - A->claim(); - if (!Str.compare("-fno-tree-switch-conversion")) - A->claim(); + if (GCCInstallation.isValid()) { + for (auto *A : Args) { + std::string Str = A->getAsString(Args); + if (!Str.compare("-mlongcalls")) + A->claim(); + if (!Str.compare("-fno-tree-switch-conversion")) + A->claim(); + + // Currently don't use integrated assembler for assembler input files + if ((IsIntegratedAsm) && (Str.length() > 2)) { + std::string ExtSubStr = Str.substr(Str.length() - 2); + if (!ExtSubStr.compare(".s")) + IsIntegratedAsm = false; + if (!ExtSubStr.compare(".S")) + IsIntegratedAsm = false; + } + } // Currently don't use integrated assembler for assembler input files - if ((IsIntegratedAsm) && (Str.length() > 2)) { - std::string ExtSubStr = Str.substr(Str.length() - 2); - if (!ExtSubStr.compare(".s")) + if (IsIntegratedAsm) { + if (Args.getLastArgValue(options::OPT_x) == "assembler") IsIntegratedAsm = false; - if (!ExtSubStr.compare(".S")) + + if (Args.getLastArgValue(options::OPT_x) == "assembler-with-cpp") IsIntegratedAsm = false; } - } - - // Currently don't use integrated assembler for assembler input files - if (IsIntegratedAsm) { - if (Args.getLastArgValue(options::OPT_x) == "assembler") - IsIntegratedAsm = false; - if (Args.getLastArgValue(options::OPT_x) == "assembler-with-cpp") - IsIntegratedAsm = false; + Multilibs = GCCInstallation.getMultilibs(); + SelectedMultilibs.assign({GCCInstallation.getMultilib()}); + + GCCLibAndIncVersion = GCCInstallation.getVersion().Text; + GCCToolchainName = GCCInstallation.getTriple().str(); + SmallString<128> Path(GCCInstallation.getParentLibPath()); + llvm::sys::path::append(Path, ".."); + GCCToolchainDir = Path.c_str(); + + SmallString<128> Libs1(GCCToolchainDir); + llvm::sys::path::append(Libs1, "lib", "gcc", GCCToolchainName, + GCCLibAndIncVersion); + if (!SelectedMultilibs.back().gccSuffix().empty()) + llvm::sys::path::append(Libs1, SelectedMultilibs.back().gccSuffix()); + getFilePaths().push_back(Libs1.c_str()); + + SmallString<128> Libs2(GCCToolchainDir); + llvm::sys::path::append(Libs2, GCCToolchainName, "lib"); + if (!SelectedMultilibs.back().gccSuffix().empty()) + llvm::sys::path::append(Libs2, SelectedMultilibs.back().gccSuffix()); + getFilePaths().push_back(Libs2.c_str()); + + ToolChain::path_list &PPaths = getProgramPaths(); + // Multilib cross-compiler GCC installations put ld in a triple-prefixed + // directory of the GCC installation parent dir. + StringRef ParentDir = llvm::sys::path::parent_path(GCCInstallation.getParentLibPath()); + + SmallString<128> PathTripleBin(ParentDir); + llvm::sys::path::append(PathTripleBin, GCCInstallation.getTriple().str()); + llvm::sys::path::append(PathTripleBin, "bin"); + PPaths.push_back(PathTripleBin.c_str()); + + SmallString<128> PathBin(ParentDir); + llvm::sys::path::append(PathBin, "bin"); + PPaths.push_back(PathBin.c_str()); + + if (!getDriver().SysRoot.empty()) { + SmallString<128> SysRoot(computeSysRoot()); + llvm::sys::path::append(SysRoot, "lib"); + getFilePaths().push_back(SysRoot.c_str()); + } + } else { + getProgramPaths().push_back(D.Dir); + SmallString<128> SysRoot(computeSysRoot()); + llvm::sys::path::append(SysRoot, "lib"); + getFilePaths().push_back(SysRoot.c_str()); } - - SmallString<128> Libs1(GCCToolchainDir); - llvm::sys::path::append(Libs1, "lib", "gcc", GCCToolchainName, - GCCLibAndIncVersion); - if (!SelectedMultilibs.back().gccSuffix().empty()) - llvm::sys::path::append(Libs1, SelectedMultilibs.back().gccSuffix()); - getFilePaths().push_back(Libs1.c_str()); - - SmallString<128> Libs2(GCCToolchainDir); - llvm::sys::path::append(Libs2, GCCToolchainName, "lib"); - if (!SelectedMultilibs.back().gccSuffix().empty()) - llvm::sys::path::append(Libs2, SelectedMultilibs.back().gccSuffix()); - getFilePaths().push_back(Libs2.c_str()); } Tool *XtensaToolChain::buildLinker() const { @@ -105,18 +128,25 @@ void XtensaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, DriverArgs.hasArg(options::OPT_nostdlibinc)) return; - if (!GCCInstallation.isValid()) - return; - - SmallString<128> Path1(getDriver().ResourceDir); - llvm::sys::path::append(Path1, "include"); - SmallString<128> Path2(GCCToolchainDir); - llvm::sys::path::append(Path2, GCCToolchainName, "sys-include"); - SmallString<128> Path3(GCCToolchainDir); - llvm::sys::path::append(Path3, GCCToolchainName, "include"); - - const StringRef Paths[] = {Path1, Path2, Path3}; - addSystemIncludes(DriverArgs, CC1Args, Paths); + if (!getDriver().SysRoot.empty()) { + SmallString<128> Dir(getDriver().SysRoot); + llvm::sys::path::append(Dir, "include"); + addSystemInclude(DriverArgs, CC1Args, Dir.str()); + } else if (GCCInstallation.isValid()) { + SmallString<128> Path1(getDriver().ResourceDir); + llvm::sys::path::append(Path1, "include"); + SmallString<128> Path2(GCCToolchainDir); + llvm::sys::path::append(Path2, GCCToolchainName, "sys-include"); + SmallString<128> Path3(GCCToolchainDir); + llvm::sys::path::append(Path3, GCCToolchainName, "include"); + + const StringRef Paths[] = {Path1, Path2, Path3}; + addSystemIncludes(DriverArgs, CC1Args, Paths); + } else { + SmallString<128> Dir(computeSysRoot()); + llvm::sys::path::append(Dir, "include"); + addSystemInclude(DriverArgs, CC1Args, Dir.str()); + } } void XtensaToolChain::addLibStdCxxIncludePaths( @@ -125,17 +155,32 @@ void XtensaToolChain::addLibStdCxxIncludePaths( if (!GCCInstallation.isValid()) return; - SmallString<128> BaseDir(GCCToolchainDir); - llvm::sys::path::append(BaseDir, GCCToolchainName, "include", "c++", - GCCLibAndIncVersion); - SmallString<128> TargetDir(BaseDir); - llvm::sys::path::append(TargetDir, GCCToolchainName); - SmallString<128> TargetDirBackward(BaseDir); - llvm::sys::path::append(TargetDirBackward, "backward"); - - addLibStdCXXIncludePaths(BaseDir, "", "", DriverArgs, CC1Args); - addLibStdCXXIncludePaths(TargetDir, "", "", DriverArgs, CC1Args); - addLibStdCXXIncludePaths(TargetDirBackward, "", "", DriverArgs, CC1Args); + const GCCVersion &Version = GCCInstallation.getVersion(); + StringRef TripleStr = GCCInstallation.getTriple().str(); + addLibStdCXXIncludePaths(computeSysRoot() + "/include/c++/" + Version.Text, + TripleStr, "", DriverArgs, CC1Args); +} + +std::string XtensaToolChain::computeSysRoot() const { + if (!getDriver().SysRoot.empty()) + return getDriver().SysRoot; + + SmallString<128> SysRootDir; + if (GCCInstallation.isValid()) { + StringRef LibDir = GCCInstallation.getParentLibPath(); + StringRef TripleStr = GCCInstallation.getTriple().str(); + llvm::sys::path::append(SysRootDir, LibDir, "..", TripleStr); + } else { + // Use the triple as provided to the driver. Unlike the parsed triple + // this has not been normalized to always contain every field. + llvm::sys::path::append(SysRootDir, getDriver().Dir, "..", + getDriver().getTargetTriple()); + } + + if (!llvm::sys::fs::exists(SysRootDir)) + return std::string(); + + return std::string(SysRootDir.str()); } ToolChain::CXXStdlibType @@ -172,9 +217,6 @@ void tools::xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, const auto &TC = static_cast(getToolChain()); - if (TC.GCCToolchainName == "") - llvm_unreachable("Unable to find Xtensa GCC assembler"); - claimNoWarnArgs(Args); ArgStringList CmdArgs; @@ -218,17 +260,24 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, bool WantCRTs = !Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles); const auto &ToolChain = - static_cast(getToolChain()); - - if (ToolChain.GCCToolchainName == "") - llvm_unreachable("Unable to find Xtensa GCC linker"); - - if (!Args.getLastArgValue(options::OPT_fuse_ld_EQ).empty()) { - Linker.assign(ToolChain.GetLinkerPath()); + static_cast(getToolChain()); + const Driver &D = ToolChain.getDriver(); + + if (!D.SysRoot.empty()) + CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); + + bool LinkerIsLLD; + std::string LinkerPath = ToolChain.GetLinkerPath(&LinkerIsLLD); + if (ToolChain.GCCToolchainName != "") { + if (!LinkerIsLLD) { + Linker.assign(ToolChain.GCCToolchainDir); + llvm::sys::path::append( + Linker, "bin", ToolChain.GCCToolchainName + "-" + getShortName()); + } else { + Linker.assign(LinkerPath); + } } else { - Linker.assign(ToolChain.GCCToolchainDir); - llvm::sys::path::append(Linker, "bin", - ToolChain.GCCToolchainName + "-" + getShortName()); + Linker.assign(LinkerPath); } const char *crtbegin, *crtend; @@ -237,17 +286,17 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, crtbegin = "crtbegin.o"; crtend = "crtend.o"; } else { - assert (RuntimeLib == ToolChain::RLT_CompilerRT); + assert(RuntimeLib == ToolChain::RLT_CompilerRT); crtbegin = ToolChain.getCompilerRTArgString(Args, "crtbegin", ToolChain::FT_Object); - crtend = ToolChain.getCompilerRTArgString(Args, "crtend", - ToolChain::FT_Object); + crtend = + ToolChain.getCompilerRTArgString(Args, "crtend", ToolChain::FT_Object); } if (WantCRTs) { // TODO: The crt0.o is not used for esp targets, but maybe used in // future for other vendors - //CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); + // CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin))); } @@ -258,7 +307,7 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, Args.addAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_e, options::OPT_s, options::OPT_t, options::OPT_u_Group}); - + if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { if (ToolChain.ShouldLinkCXXStdlib(Args)) @@ -271,9 +320,9 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-o"); CmdArgs.push_back(Output.getFilename()); - C.addCommand(std::make_unique( - JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker), - CmdArgs, Inputs)); + C.addCommand( + std::make_unique(JA, *this, ResponseFileSupport::AtFileCurCP(), + Args.MakeArgString(Linker), CmdArgs, Inputs)); } // Get features by CPU name diff --git a/clang/lib/Driver/ToolChains/Xtensa.h b/clang/lib/Driver/ToolChains/Xtensa.h index 38f8f1b2c8d5f..bef3883742db5 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.h +++ b/clang/lib/Driver/ToolChains/Xtensa.h @@ -47,6 +47,9 @@ class LLVM_LIBRARY_VISIBILITY XtensaToolChain : public Generic_ELF { std::string GCCLibAndIncVersion = ""; std::string GCCToolchainName = ""; std::string GCCToolchainDir = ""; + +private: + std::string computeSysRoot() const override; }; } // end namespace toolchains From 24752c1f108daf27329db68ef1adfd11d8a81680 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:20 +0300 Subject: [PATCH 120/289] esp/ci: Fixes Windows release archives --- .gitlab-ci.yml | 19 ++++++------------- .legacy-release.yml | 4 ++-- .universal-toolchain-release.yml | 24 ++++++++---------------- 3 files changed, 16 insertions(+), 31 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e152a1c90a6e9..dbe33f2bd2c31 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -15,14 +15,11 @@ variables: CLANG_VER: "15.0.0" GCC_REL_NAME: "esp-2022r1" GCC_REL_VER: "gcc11_2_0" - NEWLIB_REPO: "newlib-cygwin" NEWLIB_REF: "esp-2022r1" - BINUTILS_REPO: "binutils-gdb" BINUTILS_REF: "esp-2022r1-binutils" - XTENSA_OVERLAYS_REPO: "xtensa-overlays" XTENSA_OVERLAYS_REF: "master" - LLVM_GCC_TESTSUITE_REF: "release_universal_clang_toolchain" - XTENSA_CLANG_TOOLCHAIN_REF: "release_universal_clang_toolchain" + LLVM_GCC_TESTSUITE_REF: "esp-15.0.0-20221201" + XTENSA_CLANG_TOOLCHAIN_REF: "esp-15.0.0-20221201" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 PLATFORM_NAME_LINUX: "linux-amd64" @@ -39,10 +36,10 @@ variables: ARCHIVE_TOOL_WIN: "zip -9 -r" UNARCHIVE_TOOL_WIN: "unzip" ARCHIVE_EXT_WIN: "zip" - # Use Linux xz compressor to minimize Windows build artifact size. - # Upon release archive will be re-packed into zip format for uploading to GH. - ARCHIVE_TOOL_WIN_INT: ${ARCHIVE_TOOL_LINUX} - UNARCHIVE_TOOL_WIN_INT: ${UNARCHIVE_TOOL_LINUX} + + PACK_ARCHIVE_TOOL_WIN: "tar -h -cJf" + PACK_UNARCHIVE_TOOL_WIN: "${UNARCHIVE_TOOL_LINUX}" + PACK_ARCHIVE_EXT_WIN: "${ARCHIVE_EXT_LINUX}" ARCHIVE_TOOL_MACOS: "tar -cJf" UNARCHIVE_TOOL_MACOS: "tar -xf" @@ -52,10 +49,6 @@ variables: UNARCHIVE_TOOL_NEWLIB: ${UNARCHIVE_TOOL_LINUX} ARCHIVE_EXT_NEWLIB: ${ARCHIVE_EXT_LINUX} - LIBS_ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - LIBS_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - LIBS_ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - DIST_DIR: "dist" BUILD_DIR: "build" DOWNLOADS_DIR: "downloads" diff --git a/.legacy-release.yml b/.legacy-release.yml index c46195f3fe475..2042369e9049d 100644 --- a/.legacy-release.yml +++ b/.legacy-release.yml @@ -104,11 +104,11 @@ linux_amd64_testsuite: - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} # getting testsuite - - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/llvm-xtensa-testsuite.git + - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/${LLVM_TESTSUITE_REPO}.git # preparing testsuite - export PATH=${PWD}/${XTENSA_CLANG_TOOLCHAIN}/bin/:$PATH - - cd llvm-xtensa-testsuite + - cd ${LLVM_TESTSUITE_REPO} # qemu - ./qemu_esp32_install.sh diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index 5292918a32a97..27eebaf98e6c7 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -5,7 +5,7 @@ REL_NUM=$(git describe --abbrev=7) REL_NAME=${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} - LIBS_ARCHIVE_NAME=libs_${REL_NAME}.${LIBS_ARCHIVE_EXT} + LIBS_ARCHIVE_NAME=libs_${REL_NAME}.${ARCHIVE_EXT} echo "PLATFORM_NAME: $PLATFORM_NAME" echo "REL_NUM: $REL_NUM" echo "REL_NAME: $REL_NAME" @@ -35,7 +35,7 @@ # Pack libs to be used for Rust, Go etc. .package_libs: &package_libs | - ${LIBS_ARCHIVE_TOOL} ${LIBS_ARCHIVE_NAME} esp-clang/lib/libclang* esp-clang/lib/clang/${CLANG_VER}/include + eval ${ARCHIVE_TOOL} ${LIBS_ARCHIVE_NAME} esp-clang/lib/clang/${CLANG_VER}/include esp-clang/lib/lib{clang,LLVM}* ${LIBS_PACK_EXTRA_PATHS:-} mkdir -p ${DISTRO_DIR} mv ${LIBS_ARCHIVE_NAME} ${DISTRO_DIR}/ echo "${LIBS_ARCHIVE_NAME}" > ${DISTRO_DIR}/file_libs-${PLATFORM_NAME} @@ -254,9 +254,6 @@ build_newlib: ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - LIBS_ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - LIBS_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - LIBS_ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" pack_x86_64-linux-gnu: extends: .pack_linux-gnu_template @@ -295,12 +292,10 @@ pack_x86_64-w64-mingw32: variables: CONF_HOST: "x86_64-w64-mingw32" PLATFORM_NAME: "${PLATFORM_NAME_WIN}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - LIBS_ARCHIVE_TOOL: "${ARCHIVE_TOOL_WIN}" - LIBS_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" - LIBS_ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" + ARCHIVE_TOOL: "${PACK_ARCHIVE_TOOL_WIN}" + UNARCHIVE_TOOL: "${PACK_UNARCHIVE_TOOL_WIN}" + ARCHIVE_EXT: "${PACK_ARCHIVE_EXT_WIN}" + LIBS_PACK_EXTRA_PATHS: esp-clang/bin/lib{c++,clang,LLVM,unwind}* .pack_apple-darwin_template: extends: .pack_template @@ -308,9 +303,6 @@ pack_x86_64-w64-mingw32: ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" - LIBS_ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" - LIBS_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" - LIBS_ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" pack_x86_64-apple-darwin: extends: .pack_apple-darwin_template @@ -344,10 +336,10 @@ test_x86_64-linux-gnu: - *get_release_name - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} # getting testsuite - - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/llvm-xtensa-testsuite.git + - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/${LLVM_TESTSUITE_REPO}.git # preparing testsuite - export PATH=${PWD}/esp-clang/bin:$PATH - - cd llvm-xtensa-testsuite + - cd ${LLVM_TESTSUITE_REPO} # qemu - ./qemu_esp32_install.sh # run testsuite for esp32 From 4107731201c8fc0a8739c047e44c8c7b64891ff5 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:21 +0300 Subject: [PATCH 121/289] esp/ci: Check for OOM failures after build --- .universal-toolchain-release.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index 27eebaf98e6c7..75a41258aead0 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -65,6 +65,14 @@ expire_in: 1 day variables: BUILD_TOOLCHAIN_CMD_EXTRA_ARGS: "" + after_script: + # help to identify that build failed due to OOM + - > + if [ $CI_JOB_STATUS == 'failed' ]; then + [ ! -f "${BUILD_DIR}/build.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/build.log || true + [ ! -f "${BUILD_DIR}/tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/tests.log || true + [ ! -f "${BUILD_DIR}/lld-tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/lld-tests.log || true + fi script: - *get_release_name - mkdir ${DOWNLOADS_DIR} From 9c66bf91b0ed0b78103cb8c72e42427b1e7a3ac0 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:21 +0300 Subject: [PATCH 122/289] [LLD][Xtensa] Cover DIFF{8, 16, 32} relocations. --- lld/ELF/Arch/Xtensa.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lld/ELF/Arch/Xtensa.cpp b/lld/ELF/Arch/Xtensa.cpp index f6d75fde7ac29..10d6c3b52d1dc 100644 --- a/lld/ELF/Arch/Xtensa.cpp +++ b/lld/ELF/Arch/Xtensa.cpp @@ -71,6 +71,9 @@ RelExpr Xtensa::getRelExpr(RelType type, const Symbol &s, // Because it's a relaxation hint, this relocation can be ignored for now // until linker relaxations are implemented. return R_NONE; + case R_XTENSA_DIFF8: + case R_XTENSA_DIFF16: + case R_XTENSA_DIFF32: case R_XTENSA_PDIFF8: case R_XTENSA_PDIFF16: case R_XTENSA_PDIFF32: From ea2f535aefbbee600eda12be8deba48786a6c424 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 19:45:28 +0300 Subject: [PATCH 123/289] [Xtensa] Implement constant islands pass The constant islands pass is always executed for large code model. Also currently is disabled support of the hardware loops for large code model, need to add support for hwloops in constant islands pass in future. --- llvm/lib/Target/Xtensa/CMakeLists.txt | 1 + llvm/lib/Target/Xtensa/Xtensa.h | 1 + llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp | 32 + llvm/lib/Target/Xtensa/XtensaAsmPrinter.h | 4 +- .../Target/Xtensa/XtensaConstantIsland.cpp | 1468 +++++++++++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 20 + .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 1 + .../Xtensa/XtensaTargetTransformInfo.cpp | 6 + 8 files changed, 1532 insertions(+), 1 deletion(-) create mode 100644 llvm/lib/Target/Xtensa/XtensaConstantIsland.cpp diff --git a/llvm/lib/Target/Xtensa/CMakeLists.txt b/llvm/lib/Target/Xtensa/CMakeLists.txt index 261c5548d780d..1cdb4fbac450b 100644 --- a/llvm/lib/Target/Xtensa/CMakeLists.txt +++ b/llvm/lib/Target/Xtensa/CMakeLists.txt @@ -17,6 +17,7 @@ add_public_tablegen_target(XtensaCommonTableGen) add_llvm_target(XtensaCodeGen XtensaAsmPrinter.cpp XtensaConstantPoolValue.cpp + XtensaConstantIsland.cpp XtensaESP32PSRAMFix.cpp XtensaFixupHWLoops.cpp XtensaFrameLowering.cpp diff --git a/llvm/lib/Target/Xtensa/Xtensa.h b/llvm/lib/Target/Xtensa/Xtensa.h index 12ab08e914e89..fca6ed897c574 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.h +++ b/llvm/lib/Target/Xtensa/Xtensa.h @@ -29,5 +29,6 @@ FunctionPass *createXtensaSizeReductionPass(); FunctionPass *createXtensaHardwareLoops(); FunctionPass *createXtensaFixupHwLoops(); FunctionPass *createXtensaPSRAMCacheFixPass(); +FunctionPass *createXtensaConstantIslandPass(); } // namespace llvm #endif // LLVM_LIB_TARGET_XTENSA_XTENSA_H diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp index ba66086844b37..9c4542001bd93 100644 --- a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp @@ -45,6 +45,38 @@ getModifierVariantKind(XtensaCP::XtensaCPModifier Modifier) { void XtensaAsmPrinter::emitInstruction(const MachineInstr *MI) { unsigned Opc = MI->getOpcode(); + const MachineConstantPool *MCP = MF->getConstantPool(); + + // If we just ended a constant pool, mark it as such. + if (InConstantPool && Opc != Xtensa::CONSTPOOL_ENTRY) { + OutStreamer->emitDataRegion(MCDR_DataRegionEnd); + InConstantPool = false; + } + + if (Opc == Xtensa::CONSTPOOL_ENTRY) { + // CONSTPOOL_ENTRY - This instruction represents a floating + // constant pool in the function. The first operand is the ID# + // for this instruction, the second is the index into the + // MachineConstantPool that this is, the third is the size in + // bytes of this constant pool entry. + // The required alignment is specified on the basic block holding this MI. + // + unsigned LabelId = (unsigned)MI->getOperand(0).getImm(); + unsigned CPIdx = (unsigned)MI->getOperand(1).getIndex(); + + // If this is the first entry of the pool, mark it. + if (!InConstantPool) { + if (OutStreamer->hasRawTextSupport()) { + OutStreamer->emitRawText(StringRef("\t.literal_position\n")); + } + OutStreamer->emitDataRegion(MCDR_DataRegion); + InConstantPool = true; + } + const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPIdx]; + + emitMachineConstantPoolEntry(MCPE, LabelId); + return; + } switch (Opc) { case Xtensa::BR_JT: diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h index 1137309cd9a45..e4e7cb0f67303 100644 --- a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.h @@ -27,7 +27,9 @@ class raw_ostream; class LLVM_LIBRARY_VISIBILITY XtensaAsmPrinter : public AsmPrinter { const MCSubtargetInfo *STI; - + /// InConstantPool - Maintain state when emitting a sequence of constant + /// pool entries so we can properly mark them as data regions. + bool InConstantPool = false; public: explicit XtensaAsmPrinter(TargetMachine &TM, std::unique_ptr Streamer) diff --git a/llvm/lib/Target/Xtensa/XtensaConstantIsland.cpp b/llvm/lib/Target/Xtensa/XtensaConstantIsland.cpp new file mode 100644 index 0000000000000..f0db5bbe065ea --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaConstantIsland.cpp @@ -0,0 +1,1468 @@ +//===- XtensaConstantIslandPass.cpp - Emit Pc Relative loads +//----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This pass is used to make Pc relative loads of constants. +// +// Loading constants inline is expensive on Xtensa and it's in general better +// to place the constant nearby in code space and then it can be loaded with a +// simple l32r instruction. +// +// The constants can be not just numbers but addresses of functions and labels. +// This can be particularly helpful in static relocation mode for embedded +// non-linux targets. +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "XtensaConstantPoolValue.h" +#include "XtensaMachineFunctionInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/LivePhysRegs.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Type.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Format.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include +#include +#include +#include +#include + +using namespace llvm; + +#define DEBUG_TYPE "xtensa-constant-islands" + +STATISTIC(NumCPEs, "Number of constpool entries"); +STATISTIC(NumSplit, "Number of uncond branches inserted"); +STATISTIC(NumCBrFixed, "Number of cond branches fixed"); +STATISTIC(NumUBrFixed, "Number of uncond branches fixed"); + +// FIXME: This option should be removed once it has received sufficient testing. +static cl::opt + AlignConstantIslands("xtensa-align-constant-islands", cl::Hidden, + cl::init(true), + cl::desc("Align constant islands in code")); + +// Rather than do make check tests with huge amounts of code, we force +// the test to use this amount. +static cl::opt ConstantIslandsSmallOffset( + "xtensa-constant-islands-small-offset", cl::init(0), + cl::desc("Make small offsets be this amount for testing purposes"), + cl::Hidden); + +// TODO +// This defines for L32R and J instruction displacemnt for +// testing purposes only +#define MAX_DISP_L32R 262144 +#define BITS_JUMP 18 + +static unsigned int branchTargetOperand(MachineInstr *MI) { + switch (MI->getOpcode()) { + case Xtensa::J: + return 0; + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + return 2; + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + return 1; + case Xtensa::BT: + case Xtensa::BF: + return 1; + } + llvm_unreachable("Unknown branch type"); +} + +namespace { + +using Iter = MachineBasicBlock::iterator; +using ReverseIter = MachineBasicBlock::reverse_iterator; + +/// XtensaConstantIslands - Due to limited PC-relative displacements, Xtensa +/// requires constant pool entries to be scattered among the instructions +/// inside a function. To do this, it completely ignores the normal LLVM +/// constant pool; instead, it places constants wherever it feels like with +/// special instructions. +/// +/// The terminology used in this pass includes: +/// Islands - Clumps of constants placed in the function. +/// Water - Potential places where an island could be formed. +/// CPE - A constant pool entry that has been placed somewhere, which +/// tracks a list of users. + +class XtensaConstantIslands : public MachineFunctionPass { + /// BasicBlockInfo - Information about the offset and size of a single + /// basic block. + struct BasicBlockInfo { + /// Offset - Distance from the beginning of the function to the beginning + /// of this basic block. + /// + /// Offsets are computed assuming worst case padding before an aligned + /// block. This means that subtracting basic block offsets always gives a + /// conservative estimate of the real distance which may be smaller. + /// + /// Because worst case padding is used, the computed offset of an aligned + /// block may not actually be aligned. + unsigned Offset = 0; + + /// Size - Size of the basic block in bytes. If the block contains + /// inline assembly, this is a worst case estimate. + /// + /// The size does not include any alignment padding whether from the + /// beginning of the block, or from an aligned jump table at the end. + unsigned Size = 0; + + BasicBlockInfo() = default; + + unsigned postOffset() const { return Offset + Size; } + }; + + std::vector BBInfo; + + /// WaterList - A sorted list of basic blocks where islands could be placed + /// (i.e. blocks that don't fall through to the following block, due + /// to a return, unreachable, or unconditional branch). + std::vector WaterList; + + /// NewWaterList - The subset of WaterList that was created since the + /// previous iteration by inserting unconditional branches. + SmallSet NewWaterList; + + using water_iterator = std::vector::iterator; + + /// CPUser - One user of a constant pool, keeping the machine instruction + /// pointer, the constant pool being referenced, and the max displacement + /// allowed from the instruction to the CP. The LowWaterMark records the + /// lowest basic block where a new CPEntry can be placed. To ensure this + /// pass terminates, the CP entries are initially placed at the second block + /// of the function and then move monotonically to higher addresses. The + /// exception to this rule is when the current CP entry for a particular + /// CPUser is out of range, but there is another CP entry for the same + /// constant value in range. We want to use the existing in-range CP + /// entry, but if it later moves out of range, the search for new water + /// should resume where it left off. The LowWaterMark is used to record + /// that point. + struct CPUser { + MachineInstr *MI; + MachineInstr *CPEMI; + MachineBasicBlock *LowWaterMark; + + private: + unsigned MaxDisp; + + public: + CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp) + : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp) { + LowWaterMark = CPEMI->getParent(); + } + + /// getMaxDisp - Returns the maximum displacement supported by MI. + unsigned getMaxDisp() const { + unsigned xMaxDisp = + ConstantIslandsSmallOffset ? ConstantIslandsSmallOffset : MaxDisp; + return xMaxDisp; + } + + void setMaxDisp(unsigned val) { MaxDisp = val; } + }; + + /// CPUsers - Keep track of all of the machine instructions that use various + /// constant pools and their max displacement. + std::vector CPUsers; + + /// CPEntry - One per constant pool entry, keeping the machine instruction + /// pointer, the constpool index, and the number of CPUser's which + /// reference this entry. + struct CPEntry { + MachineInstr *CPEMI; + unsigned CPI; + unsigned RefCount; + + CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0) + : CPEMI(cpemi), CPI(cpi), RefCount(rc) {} + }; + + /// CPEntries - Keep track of all of the constant pool entry machine + /// instructions. For each original constpool index (i.e. those that + /// existed upon entry to this pass), it keeps a vector of entries. + /// Original elements are cloned as we go along; the clones are + /// put in the vector of the original element, but have distinct CPIs. + std::vector> CPEntries; + + /// ImmBranch - One per immediate branch, keeping the machine instruction + /// pointer, conditional or unconditional, the max displacement, + /// and (if isCond is true) the corresponding unconditional branch + /// opcode. + struct ImmBranch { + MachineInstr *MI; + unsigned MaxDisp : 31; + bool isCond : 1; + int UncondBr; + + ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, int ubr) + : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {} + }; + + /// ImmBranches - Keep track of all the immediate branch instructions. + /// + std::vector ImmBranches; + + const XtensaSubtarget *STI = nullptr; + const XtensaInstrInfo *TII; + const TargetRegisterInfo *TRI; + XtensaFunctionInfo *MFI; + MachineFunction *MF = nullptr; + MachineConstantPool *MCP = nullptr; + MachineBasicBlock *InitConstantMBB = nullptr; + std::unique_ptr RS; + LivePhysRegs LiveRegs; + + unsigned PICLabelUId; + bool PrescannedForConstants = false; + + void initPICLabelUId(unsigned UId) { PICLabelUId = UId; } + + unsigned createPICLabelUId() { return PICLabelUId++; } + +public: + static char ID; + + XtensaConstantIslands() : MachineFunctionPass(ID) {} + + StringRef getPassName() const override { return "Xtensa Constant Islands"; } + + bool runOnMachineFunction(MachineFunction &F) override; + + MachineFunctionProperties getRequiredProperties() const override { + return MachineFunctionProperties().set( + MachineFunctionProperties::Property::NoVRegs); + } + + void doInitialPlacement(std::vector &CPEMIs); + CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI); + Align getCPEAlign(const MachineInstr &CPEMI); + void initializeFunctionInfo(const std::vector &CPEMIs); + unsigned getOffsetOf(MachineInstr *MI) const; + unsigned getUserOffset(CPUser &) const; + void dumpBBs(); + + bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset, + unsigned Disp); + + void computeBlockSize(MachineBasicBlock *MBB); + MachineBasicBlock *splitBlockBeforeInstr(MachineInstr &MI); + void updateForInsertedWaterBlock(MachineBasicBlock *NewBB); + void adjustBBOffsetsAfter(MachineBasicBlock *BB); + bool decrementCPEReferenceCount(unsigned CPI, MachineInstr *CPEMI); + int findInRangeCPEntry(CPUser &U, unsigned UserOffset); + bool findAvailableWater(CPUser &U, unsigned UserOffset, + water_iterator &WaterIter); + void createNewWater(unsigned CPUserIndex, unsigned UserOffset, + MachineBasicBlock *&NewMBB); + bool handleConstantPoolUser(unsigned CPUserIndex); + void removeDeadCPEMI(MachineInstr *CPEMI); + bool removeUnusedCPEntries(); + bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset, + MachineInstr *CPEMI, unsigned Disp, + bool DoDump = false); + bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water, CPUser &U, + unsigned &Growth); + bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp); + bool fixupImmediateBr(ImmBranch &Br); + bool fixupConditionalBr(ImmBranch &Br); + bool fixupUnconditionalBr(ImmBranch &Br); + void removeEntryJump(); +}; + +} // end anonymous namespace + +char XtensaConstantIslands::ID = 0; + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +/// print block size and offset information - debugging +LLVM_DUMP_METHOD void XtensaConstantIslands::dumpBBs() { + for (unsigned J = 0, E = BBInfo.size(); J != E; ++J) { + const BasicBlockInfo &BBI = BBInfo[J]; + dbgs() << format("%08x %bb.%u\t", BBI.Offset, J) + << format(" size=%#x\n", BBInfo[J].Size); + } +} +#endif + +bool XtensaConstantIslands::runOnMachineFunction(MachineFunction &mf) { + MF = &mf; + MCP = mf.getConstantPool(); + STI = &mf.getSubtarget(); + LLVM_DEBUG(dbgs() << "constant island machine function " + << "\n"); + TII = (const XtensaInstrInfo *)STI->getInstrInfo(); + MFI = MF->getInfo(); + + TRI = STI->getRegisterInfo(); + + if (!STI->useTextSectionLiterals()) + return false; + + if (TRI->trackLivenessAfterRegAlloc(*MF)) + RS.reset(new RegScavenger()); + + LLVM_DEBUG(dbgs() << "constant island processing " + << "\n"); + + // Renumber all of the machine basic blocks in the function, guaranteeing that + // the numbers agree with the position of the block in the function. + MF->RenumberBlocks(); + + bool MadeChange = false; + + // Perform the initial placement of the constant pool entries. To start with, + // we put them all at the end of the function. + std::vector CPEMIs; + doInitialPlacement(CPEMIs); + + // Renumber all of the machine basic blocks in the function, guaranteeing + // that the numbers agree with the position of the block in the function. + MF->RenumberBlocks(); + + /// The next UID to take is the first unused one. + initPICLabelUId(CPEMIs.size()); + + // Do the initial scan of the function, building up information about the + // sizes of each block, the location of all the water, and finding all of the + // constant pool users. + initializeFunctionInfo(CPEMIs); + CPEMIs.clear(); + LLVM_DEBUG(dumpBBs()); + + /// Remove dead constant pool entries. + MadeChange |= removeUnusedCPEntries(); + + // Iteratively place constant pool entries and fix up branches until there + // is no change. + unsigned NoCPIters = 0, NoBRIters = 0; + (void)NoBRIters; + while (true) { + LLVM_DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n'); + bool CPChange = false; + for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) { + CPChange |= handleConstantPoolUser(i); + } + if (CPChange && ++NoCPIters > 30) + report_fatal_error("Constant Island pass failed to converge!"); + LLVM_DEBUG(dumpBBs()); + + // Clear NewWaterList now. If we split a block for branches, it should + // appear as "new water" for the next iteration of constant pool placement. + NewWaterList.clear(); + + LLVM_DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n'); + bool BRChange = false; + for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i) + BRChange |= fixupImmediateBr(ImmBranches[i]); + if (BRChange && ++NoBRIters > 30) + report_fatal_error("Branch Fix Up pass failed to converge!"); + LLVM_DEBUG(dumpBBs()); + + if (!CPChange && !BRChange) + break; + + MadeChange = true; + } + removeEntryJump(); + LLVM_DEBUG(dbgs() << '\n'; dumpBBs()); + BBInfo.clear(); + WaterList.clear(); + CPUsers.clear(); + CPEntries.clear(); + ImmBranches.clear(); + return MadeChange; +} + +/// BBHasFallthrough - Return true if the specified basic block can fallthrough +/// into the block immediately after it. +static bool BBHasFallthrough(MachineBasicBlock *MBB) { + // Get the next machine basic block in the function. + MachineFunction::iterator MBBI = MBB->getIterator(); + // Can't fall off end of function. + if (std::next(MBBI) == MBB->getParent()->end()) + return false; + + MachineBasicBlock *NextBB = &*std::next(MBBI); + return llvm::is_contained(MBB->successors(), NextBB); +} + +/// doInitialPlacement - Perform the initial placement of the constant pool +/// entries. To start with, we put them all at the end of the function. +void XtensaConstantIslands::doInitialPlacement( + std::vector &CPEMIs) { + // Create the basic block to hold the CPE's. + MachineBasicBlock *BB = MF->CreateMachineBasicBlock(); + + // TODO + MachineBasicBlock *Entry = &MF->front(); + MachineBasicBlock *NewEntry = MF->CreateMachineBasicBlock(); + + MF->insert(Entry->getIterator(), NewEntry); + BuildMI(NewEntry, DebugLoc(), TII->get(Xtensa::J)).addMBB(Entry); + NewEntry->addSuccessor(Entry); + NewEntry->setAlignment(Entry->getAlignment()); + + // Copy live-in information to new block. + for (const MachineBasicBlock::RegisterMaskPair &RegMaskPair : + Entry->liveins()) + NewEntry->addLiveIn(RegMaskPair); + + // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). + const Align MaxAlign = MCP->getConstantPoolAlign(); + + BB->setAlignment(AlignConstantIslands ? MaxAlign : Align(4)); + + MF->insert(Entry->getIterator(), BB); + + // The function needs to be as aligned as the basic blocks. The linker may + // move functions around based on their alignment. + MF->ensureAlignment(BB->getAlignment()); + + // Order the entries in BB by descending alignment. That ensures correct + // alignment of all entries as long as BB is sufficiently aligned. Keep + // track of the insertion point for each alignment. We are going to bucket + // sort the entries as they are created. + SmallVector InsPoint(Log2(MaxAlign) + 1, + BB->end()); + + // Add all of the constants from the constant pool to the end block, use an + // identity mapping of CPI's to CPE's. + const std::vector &CPs = MCP->getConstants(); + + const DataLayout &TD = MF->getDataLayout(); + for (unsigned i = 0, e = CPs.size(); i != e; ++i) { + unsigned Size = CPs[i].getSizeInBytes(TD); + assert(Size >= 4 && "Too small constant pool entry"); + Align Alignment = CPs[i].getAlign(); + // Verify that all constant pool entries are a multiple of their alignment. + // If not, we would have to pad them out so that instructions stay aligned. + assert(isAligned(Alignment, Size) && "CP Entry not multiple of 4 bytes!"); + + // Insert CONSTPOOL_ENTRY before entries with a smaller alignment. + unsigned LogAlign = Log2(Alignment); + MachineBasicBlock::iterator InsAt = InsPoint[LogAlign]; + + MachineInstr *CPEMI = + BuildMI(*BB, InsAt, DebugLoc(), TII->get(Xtensa::CONSTPOOL_ENTRY)) + .addImm(i) + .addConstantPoolIndex(i) + .addImm(Size); + + CPEMIs.push_back(CPEMI); + + // Ensure that future entries with higher alignment get inserted before + // CPEMI. This is bucket sort with iterators. + for (unsigned a = LogAlign + 1; a <= Log2(MaxAlign); ++a) + if (InsPoint[a] == InsAt) + InsPoint[a] = CPEMI; + // Add a new CPEntry, but no corresponding CPUser yet. + CPEntries.emplace_back(1, CPEntry(CPEMI, i)); + ++NumCPEs; + LLVM_DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = " + << Size << ", align = " << Alignment.value() << '\n'); + } + InitConstantMBB = BB; + LLVM_DEBUG(BB->dump()); +} + +/// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI, +/// look up the corresponding CPEntry. +XtensaConstantIslands::CPEntry * +XtensaConstantIslands::findConstPoolEntry(unsigned CPI, + const MachineInstr *CPEMI) { + std::vector &CPEs = CPEntries[CPI]; + // Number of entries per constpool index should be small, just do a + // linear search. + for (CPEntry &CPE : CPEs) { + if (CPE.CPEMI == CPEMI) + return &CPE; + } + return nullptr; +} + +/// getCPEAlign - Returns the required alignment of the constant pool entry +/// represented by CPEMI. Alignment is measured in log2(bytes) units. +Align XtensaConstantIslands::getCPEAlign(const MachineInstr &CPEMI) { + assert(CPEMI.getOpcode() == Xtensa::CONSTPOOL_ENTRY); + + // Everything is 4-byte aligned unless AlignConstantIslands is set. + if (!AlignConstantIslands) + return Align(4); + + unsigned CPI = CPEMI.getOperand(1).getIndex(); + assert(CPI < MCP->getConstants().size() && "Invalid constant pool index."); + return MCP->getConstants()[CPI].getAlign(); +} + +/// initializeFunctionInfo - Do the initial scan of the function, building up +/// information about the sizes of each block, the location of all the water, +/// and finding all of the constant pool users. +void XtensaConstantIslands::initializeFunctionInfo( + const std::vector &CPEMIs) { + BBInfo.clear(); + BBInfo.resize(MF->getNumBlockIDs()); + + // First thing, compute the size of all basic blocks, and see if the function + // has any inline assembly in it. If so, we have to be conservative about + // alignment assumptions, as we don't know for sure the size of any + // instructions in the inline assembly. + for (MachineBasicBlock &MBB : *MF) + computeBlockSize(&MBB); + + // Compute block offsets. + adjustBBOffsetsAfter(&MF->front()); + + // Now go back through the instructions and build up our data structures. + for (MachineBasicBlock &MBB : *MF) { + // If this block doesn't fall through into the next MBB, then this is + // 'water' that a constant pool island could be placed. + if (!BBHasFallthrough(&MBB)) + WaterList.push_back(&MBB); + for (MachineInstr &MI : MBB) { + if (MI.isDebugInstr()) + continue; + + int Opc = MI.getOpcode(); + if (MI.isBranch()) { + bool isCond = false; + unsigned Bits = 0; + unsigned Scale = 1; + int UOpc = Xtensa::J; + switch (Opc) { + default: + continue; // Ignore other branches for now + case Xtensa::J: + Bits = BITS_JUMP; + Scale = 1; + isCond = false; + break; + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + Bits = 8; + Scale = 1; + isCond = true; + break; + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + Bits = 12; + Scale = 1; + isCond = true; + break; + case Xtensa::BT: + case Xtensa::BF: + Bits = 8; + Scale = 1; + isCond = true; + break; + } + // Record this immediate branch. + unsigned MaxOffs = ((1 << (Bits - 1)) - 1) * Scale - 4; + ImmBranches.push_back(ImmBranch(&MI, MaxOffs, isCond, UOpc)); + } + + if (Opc == Xtensa::CONSTPOOL_ENTRY) + continue; + + // Scan the instructions for constant pool operands. + for (const MachineOperand &MO : MI.operands()) + if (MO.isCPI()) { + // We found one. The addressing mode tells us the max displacement + // from the PC that this instruction permits. + unsigned CPI = MO.getIndex(); + MachineInstr *CPEMI = CPEMIs[CPI]; + + switch (Opc) { + default: + llvm_unreachable("Unknown addressing mode for CP reference!"); + case Xtensa::L32R: + CPUsers.push_back(CPUser(&MI, CPEMI, MAX_DISP_L32R)); + break; + } + + // Increment corresponding CPEntry reference count. + CPEntry *CPE = findConstPoolEntry(CPI, CPEMI); + assert(CPE && "Cannot find a corresponding CPEntry!"); + CPE->RefCount++; + + // Instructions can only use one CP entry, don't bother scanning the + // rest of the operands. + break; + } + } + } +} + +/// computeBlockSize - Compute the size and some alignment information for MBB. +/// This function updates BBInfo directly. +void XtensaConstantIslands::computeBlockSize(MachineBasicBlock *MBB) { + BasicBlockInfo &BBI = BBInfo[MBB->getNumber()]; + BBI.Size = 0; + + for (const MachineInstr &MI : *MBB) { + if (MI.getOpcode() == Xtensa::CONSTPOOL_ENTRY) { + BBI.Size += 4; + } else { + BBI.Size += TII->getInstSizeInBytes(MI); + } + } +} + +/// getOffsetOf - Return the current offset of the specified machine instruction +/// from the start of the function. This offset changes as stuff is moved +/// around inside the function. +unsigned XtensaConstantIslands::getOffsetOf(MachineInstr *MI) const { + MachineBasicBlock *MBB = MI->getParent(); + + // The offset is composed of two things: the sum of the sizes of all MBB's + // before this instruction's block, and the offset from the start of the block + // it is in. + unsigned Offset = BBInfo[MBB->getNumber()].Offset; + + // Sum instructions before MI in MBB. + for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) { + assert(I != MBB->end() && "Didn't find MI in its own basic block?"); + Offset += TII->getInstSizeInBytes(*I); + } + return Offset; +} + +/// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB +/// ID. +static bool CompareMBBNumbers(const MachineBasicBlock *LHS, + const MachineBasicBlock *RHS) { + return LHS->getNumber() < RHS->getNumber(); +} + +/// updateForInsertedWaterBlock - When a block is newly inserted into the +/// machine function, it upsets all of the block numbers. Renumber the blocks +/// and update the arrays that parallel this numbering. +void XtensaConstantIslands::updateForInsertedWaterBlock( + MachineBasicBlock *NewBB) { + // Renumber the MBB's to keep them consecutive. + NewBB->getParent()->RenumberBlocks(NewBB); + + // Insert an entry into BBInfo to align it properly with the (newly + // renumbered) block numbers. + BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo()); + + // Next, update WaterList. Specifically, we need to add NewMBB as having + // available water after it. + water_iterator IP = llvm::lower_bound(WaterList, NewBB, CompareMBBNumbers); + WaterList.insert(IP, NewBB); +} + +unsigned XtensaConstantIslands::getUserOffset(CPUser &U) const { + return getOffsetOf(U.MI); +} + +/// Split the basic block containing MI into two blocks, which are joined by +/// an unconditional branch. Update data structures and renumber blocks to +/// account for this change and returns the newly created block. +MachineBasicBlock * +XtensaConstantIslands::splitBlockBeforeInstr(MachineInstr &MI) { + MachineBasicBlock *OrigBB = MI.getParent(); + + // Collect liveness information at MI. + LivePhysRegs LRs(*MF->getSubtarget().getRegisterInfo()); + LRs.addLiveOuts(*OrigBB); + auto LivenessEnd = ++MachineBasicBlock::iterator(MI).getReverse(); + for (MachineInstr &LiveMI : make_range(OrigBB->rbegin(), LivenessEnd)) + LRs.stepBackward(LiveMI); + + // Create a new MBB for the code after the OrigBB. + MachineBasicBlock *NewBB = + MF->CreateMachineBasicBlock(OrigBB->getBasicBlock()); + MachineFunction::iterator MBBI = ++OrigBB->getIterator(); + MF->insert(MBBI, NewBB); + + // Splice the instructions starting with MI over to NewBB. + NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end()); + + // Add an unconditional branch from OrigBB to NewBB. + // Note the new unconditional branch is not being recorded. + // There doesn't seem to be meaningful DebugInfo available; this doesn't + // correspond to anything in the source. + BuildMI(OrigBB, DebugLoc(), TII->get(Xtensa::J)).addMBB(NewBB); + ++NumSplit; + + // Update the CFG. All succs of OrigBB are now succs of NewBB. + NewBB->transferSuccessors(OrigBB); + + // OrigBB branches to NewBB. + OrigBB->addSuccessor(NewBB); + + // Update live-in information in the new block. + MachineRegisterInfo &MRI = MF->getRegInfo(); + for (MCPhysReg L : LRs) + if (!MRI.isReserved(L)) + NewBB->addLiveIn(L); + + // Update internal data structures to account for the newly inserted MBB. + // This is almost the same as updateForInsertedWaterBlock, except that + // the Water goes after OrigBB, not NewBB. + MF->RenumberBlocks(NewBB); + + // Insert an entry into BBInfo to align it properly with the (newly + // renumbered) block numbers. + BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo()); + + // Next, update WaterList. Specifically, we need to add OrigMBB as having + // available water after it (but not if it's already there, which happens + // when splitting before a conditional branch that is followed by an + // unconditional branch - in that case we want to insert NewBB). + water_iterator IP = llvm::lower_bound(WaterList, OrigBB, CompareMBBNumbers); + MachineBasicBlock *WaterBB = *IP; + if (WaterBB == OrigBB) + WaterList.insert(std::next(IP), NewBB); + else + WaterList.insert(IP, OrigBB); + NewWaterList.insert(OrigBB); + + // Figure out how large the OrigBB is. As the first half of the original + // block, it cannot contain a tablejump. The size includes + // the new jump we added. (It should be possible to do this without + // recounting everything, but it's very confusing, and this is rarely + // executed.) + computeBlockSize(OrigBB); + + // Figure out how large the NewMBB is. As the second half of the original + // block, it may contain a tablejump. + computeBlockSize(NewBB); + + // All BBOffsets following these blocks must be modified. + adjustBBOffsetsAfter(OrigBB); + +#if 0 + //TODO + // Need to fix live-in lists if we track liveness. + if (TRI->trackLivenessAfterRegAlloc(*MF)) + computeAndAddLiveIns(LiveRegs, *NewBB); +#endif + return NewBB; +} + +/// isOffsetInRange - Checks whether UserOffset (the location of a constant pool +/// reference) is within MaxDisp of TrialOffset (a proposed location of a +/// constant pool entry). +bool XtensaConstantIslands::isOffsetInRange(unsigned UserOffset, + unsigned TrialOffset, + unsigned MaxDisp) { + UserOffset = (UserOffset + 3) & (~0x3); + if ((UserOffset >= TrialOffset) && (UserOffset - TrialOffset <= MaxDisp)) { + return true; + } + return false; +} + +/// isWaterInRange - Returns true if a CPE placed after the specified +/// Water (a basic block) will be in range for the specific MI. +/// +/// Compute how much the function will grow by inserting a CPE after Water. +bool XtensaConstantIslands::isWaterInRange(unsigned UserOffset, + MachineBasicBlock *Water, CPUser &U, + unsigned &Growth) { + unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(); + unsigned NextBlockOffset; + Align NextBlockAlignment; + MachineFunction::const_iterator NextBlock = ++Water->getIterator(); + if (NextBlock == MF->end()) { + NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); + NextBlockAlignment = Align(1); + } else { + NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset; + NextBlockAlignment = NextBlock->getAlignment(); + } + unsigned Size = U.CPEMI->getOperand(2).getImm(); + unsigned CPEEnd = CPEOffset + Size; + + // The CPE may be able to hide in the alignment padding before the next + // block. It may also cause more padding to be required if it is more aligned + // that the next block. + if (CPEEnd > NextBlockOffset) { + Growth = CPEEnd - NextBlockOffset; + // Compute the padding that would go at the end of the CPE to align the next + // block. + Growth += offsetToAlignment(CPEEnd, NextBlockAlignment); + + // If the CPE is to be inserted before the instruction, that will raise + // the offset of the instruction. Also account for unknown alignment padding + // in blocks between CPE and the user. + if (CPEOffset < UserOffset) + UserOffset += Growth; + } else + // CPE fits in existing padding. + Growth = 0; + + return isOffsetInRange(UserOffset, CPEOffset, U.getMaxDisp()); +} + +/// isCPEntryInRange - Returns true if the distance between specific MI and +/// specific ConstPool entry instruction can fit in MI's displacement field. +bool XtensaConstantIslands::isCPEntryInRange(MachineInstr *MI, + unsigned UserOffset, + MachineInstr *CPEMI, + unsigned MaxDisp, bool DoDump) { + unsigned CPEOffset = getOffsetOf(CPEMI); + + if (DoDump) { + LLVM_DEBUG({ + unsigned Block = MI->getParent()->getNumber(); + const BasicBlockInfo &BBI = BBInfo[Block]; + dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm() + << " max delta=" << MaxDisp + << format(" insn address=%#x", UserOffset) << " in " + << printMBBReference(*MI->getParent()) << ": " + << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI + << format("CPE address=%#x offset=%+d: ", CPEOffset, + int(CPEOffset - UserOffset)); + }); + } + + return isOffsetInRange(UserOffset, CPEOffset, MaxDisp); +} + +#ifndef NDEBUG +/// BBIsJumpedOver - Return true of the specified basic block's only predecessor +/// unconditionally branches to its only successor. +static bool BBIsJumpedOver(MachineBasicBlock *MBB) { + if (MBB->pred_size() != 1 || MBB->succ_size() != 1) + return false; + MachineBasicBlock *Succ = *MBB->succ_begin(); + MachineBasicBlock *Pred = *MBB->pred_begin(); + MachineInstr *PredMI = &Pred->back(); + + if (PredMI->getOpcode() == Xtensa::J) + return PredMI->getOperand(0).getMBB() == Succ; + return false; +} +#endif + +void XtensaConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) { + unsigned BBNum = BB->getNumber(); + for (unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) { + // Get the offset and known bits at the end of the layout predecessor. + // Include the alignment of the current block. + unsigned Offset = BBInfo[i - 1].Offset + BBInfo[i - 1].Size; + Align BlockAlignment = MF->getBlockNumbered(i)->getAlignment(); + BBInfo[i].Offset = Offset + offsetToAlignment(Offset, BlockAlignment); + } +} + +/// decrementCPEReferenceCount - find the constant pool entry with index CPI +/// and instruction CPEMI, and decrement its refcount. If the refcount +/// becomes 0 remove the entry and instruction. Returns true if we removed +/// the entry, false if we didn't. +bool XtensaConstantIslands::decrementCPEReferenceCount(unsigned CPI, + MachineInstr *CPEMI) { + // Find the old entry. Eliminate it if it is no longer used. + CPEntry *CPE = findConstPoolEntry(CPI, CPEMI); + assert(CPE && "Unexpected!"); + if (--CPE->RefCount == 0) { + removeDeadCPEMI(CPEMI); + CPE->CPEMI = nullptr; + --NumCPEs; + return true; + } + return false; +} + +/// LookForCPEntryInRange - see if the currently referenced CPE is in range; +/// if not, see if an in-range clone of the CPE is in range, and if so, +/// change the data structures so the user references the clone. Returns: +/// 0 = no existing entry found +/// 1 = entry found, and there were no code insertions or deletions +/// 2 = entry found, and there were code insertions or deletions +int XtensaConstantIslands::findInRangeCPEntry(CPUser &U, unsigned UserOffset) { + MachineInstr *UserMI = U.MI; + MachineInstr *CPEMI = U.CPEMI; + + // Check to see if the CPE is already in-range. + if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp())) { + LLVM_DEBUG(dbgs() << "In range\n"); + return 1; + } + + // No. Look for previously created clones of the CPE that are in range. + unsigned CPI = CPEMI->getOperand(1).getIndex(); + std::vector &CPEs = CPEntries[CPI]; + for (CPEntry &CPE : CPEs) { + // We already tried this one + if (CPE.CPEMI == CPEMI) + continue; + // Removing CPEs can leave empty entries, skip + if (CPE.CPEMI == nullptr) + continue; + if (isCPEntryInRange(UserMI, UserOffset, CPE.CPEMI, U.getMaxDisp())) { + LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#" << CPE.CPI + << "\n"); + // Point the CPUser node to the replacement + U.CPEMI = CPE.CPEMI; + // Change the CPI in the instruction operand to refer to the clone. + for (MachineOperand &MO : UserMI->operands()) + if (MO.isCPI()) { + MO.setIndex(CPE.CPI); + break; + } + // Adjust the refcount of the clone... + CPE.RefCount++; + // ...and the original. If we didn't remove the old entry, none of the + // addresses changed, so we don't need another pass. + return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1; + } + } + return 0; +} + +/// getUnconditionalBrDisp - Returns the maximum displacement that can fit in +/// the specific unconditional branch instruction. +static inline unsigned getUnconditionalBrDisp(int Opc) { + // Currently only J instruction is used + return (1 << (BITS_JUMP - 1)); +} + +/// findAvailableWater - Look for an existing entry in the WaterList in which +/// we can place the CPE referenced from U so it's within range of U's MI. +/// Returns true if found, false if not. If it returns true, WaterIter +/// is set to the WaterList entry. +/// To ensure that this pass +/// terminates, the CPE location for a particular CPUser is only allowed to +/// move to a lower address, so search backward from the end of the list and +/// prefer the first water that is in range. +bool XtensaConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset, + water_iterator &WaterIter) { + if (WaterList.empty()) + return false; + + unsigned BestGrowth = ~0u; + for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();; + --IP) { + MachineBasicBlock *WaterBB = *IP; + // Check if water is in range and is either at a higher address than the + // current "low water mark" or a new water block that was created since + // the previous iteration by inserting an unconditional branch. In the + // latter case, we want to allow resetting the low water mark back to + // this new water since we haven't seen it before. Inserting branches + // should be relatively uncommon and when it does happen, we want to be + // sure to take advantage of it for all the CPEs near that block, so that + // we don't insert more branches than necessary. + unsigned Growth; + if (isWaterInRange(UserOffset, WaterBB, U, Growth) && + (WaterBB->getNumber() > U.LowWaterMark->getNumber() || + NewWaterList.count(WaterBB)) && + Growth < BestGrowth) { + // This is the least amount of required padding seen so far. + BestGrowth = Growth; + WaterIter = IP; + LLVM_DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB) + << " Growth=" << Growth << '\n'); + + // Keep looking unless it is perfect. + if (BestGrowth == 0) + return true; + } + if (IP == B) + break; + } + return BestGrowth != ~0u; +} + +/// createNewWater - No existing WaterList entry will work for +/// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the +/// block is used if in range, and the conditional branch munged so control +/// flow is correct. Otherwise the block is split to create a hole with an +/// unconditional branch around it. In either case NewMBB is set to a +/// block following which the new island can be inserted (the WaterList +/// is not adjusted). +void XtensaConstantIslands::createNewWater(unsigned CPUserIndex, + unsigned UserOffset, + MachineBasicBlock *&NewMBB) { + CPUser &U = CPUsers[CPUserIndex]; + MachineInstr *UserMI = U.MI; + MachineBasicBlock *UserMBB = UserMI->getParent(); + NewMBB = splitBlockBeforeInstr(*UserMI); +} + +/// handleConstantPoolUser - Analyze the specified user, checking to see if it +/// is out-of-range. If so, pick up the constant pool value and move it some +/// place in-range. Return true if we changed any addresses (thus must run +/// another pass of branch lengthening), false otherwise. +bool XtensaConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) { + CPUser &U = CPUsers[CPUserIndex]; + MachineInstr *UserMI = U.MI; + MachineInstr *CPEMI = U.CPEMI; + unsigned CPI = CPEMI->getOperand(1).getIndex(); + unsigned Size = CPEMI->getOperand(2).getImm(); + // Compute this only once, it's expensive. + unsigned UserOffset = getUserOffset(U); + + // See if the current entry is within range, or there is a clone of it + // in range. + int result = findInRangeCPEntry(U, UserOffset); + if (result == 1) + return false; + else if (result == 2) + return true; + + // Look for water where we can place this CPE. + MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock(); + MachineBasicBlock *NewMBB; + water_iterator IP; + if (findAvailableWater(U, UserOffset, IP)) { + LLVM_DEBUG(dbgs() << "Found water in range\n"); + MachineBasicBlock *WaterBB = *IP; + + // If the original WaterList entry was "new water" on this iteration, + // propagate that to the new island. This is just keeping NewWaterList + // updated to match the WaterList, which will be updated below. + if (NewWaterList.erase(WaterBB)) + NewWaterList.insert(NewIsland); + + // The new CPE goes before the following block (NewMBB). + NewMBB = &*++WaterBB->getIterator(); + } else { + // No water found. + LLVM_DEBUG(dbgs() << "No water found\n"); + createNewWater(CPUserIndex, UserOffset, NewMBB); + + // splitBlockBeforeInstr adds to WaterList, which is important when it is + // called while handling branches so that the water will be seen on the + // next iteration for constant pools, but in this context, we don't want + // it. Check for this so it will be removed from the WaterList. + // Also remove any entry from NewWaterList. + MachineBasicBlock *WaterBB = &*--NewMBB->getIterator(); + IP = llvm::find(WaterList, WaterBB); + if (IP != WaterList.end()) + NewWaterList.erase(WaterBB); + + // We are adding new water. Update NewWaterList. + NewWaterList.insert(NewIsland); + } + + // Remove the original WaterList entry; we want subsequent insertions in + // this vicinity to go after the one we're about to insert. This + // considerably reduces the number of times we have to move the same CPE + // more than once and is also important to ensure the algorithm terminates. + if (IP != WaterList.end()) + WaterList.erase(IP); + + // Okay, we know we can put an island before NewMBB now, do it! + MF->insert(NewMBB->getIterator(), NewIsland); + + // Update internal data structures to account for the newly inserted MBB. + updateForInsertedWaterBlock(NewIsland); + + // Decrement the old entry, and remove it if refcount becomes 0. + decrementCPEReferenceCount(CPI, CPEMI); + + // No existing clone of this CPE is within range. + // We will be generating a new clone. Get a UID for it. + unsigned ID = createPICLabelUId(); + + // Now that we have an island to add the CPE to, clone the original CPE and + // add it to the island. + U.LowWaterMark = NewIsland; + U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(Xtensa::CONSTPOOL_ENTRY)) + .addImm(ID) + .addConstantPoolIndex(CPI) + .addImm(Size); + CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1)); + ++NumCPEs; + + // Mark the basic block as aligned as required by the const-pool entry. + NewIsland->setAlignment(getCPEAlign(*U.CPEMI)); + + // Increase the size of the island block to account for the new entry. + BBInfo[NewIsland->getNumber()].Size += Size; + adjustBBOffsetsAfter(&*--NewIsland->getIterator()); + + // Finally, change the CPI in the instruction operand to be ID. + for (MachineOperand &MO : UserMI->operands()) + if (MO.isCPI()) { + MO.setIndex(ID); + break; + } + + LLVM_DEBUG( + dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI + << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset)); + + return true; +} + +/// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update +/// sizes and offsets of impacted basic blocks. +void XtensaConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) { + MachineBasicBlock *CPEBB = CPEMI->getParent(); + unsigned Size = CPEMI->getOperand(2).getImm(); + CPEMI->eraseFromParent(); + BBInfo[CPEBB->getNumber()].Size -= Size; + // All succeeding offsets have the current size value added in, fix this. + if (CPEBB->empty()) { + BBInfo[CPEBB->getNumber()].Size = 0; + + // This block no longer needs to be aligned. + CPEBB->setAlignment(Align(1)); + } else { + // Entries are sorted by descending alignment, so realign from the front. + CPEBB->setAlignment(getCPEAlign(*CPEBB->begin())); + } + + adjustBBOffsetsAfter(CPEBB); + // An island has only one predecessor BB and one successor BB. Check if + // this BB's predecessor jumps directly to this BB's successor. This + // shouldn't happen currently. + assert(!BBIsJumpedOver(CPEBB) && "How did this happen?"); + // FIXME: remove the empty blocks after all the work is done? +} + +/// removeUnusedCPEntries - Remove constant pool entries whose refcounts +/// are zero. +bool XtensaConstantIslands::removeUnusedCPEntries() { + unsigned MadeChange = false; + for (std::vector &CPEs : CPEntries) { + for (CPEntry &CPE : CPEs) { + if (CPE.RefCount == 0 && CPE.CPEMI) { + removeDeadCPEMI(CPE.CPEMI); + CPE.CPEMI = nullptr; + MadeChange = true; + } + } + } + return MadeChange; +} + +/// isBBInRange - Returns true if the distance between specific MI and +/// specific BB can fit in MI's displacement field. +bool XtensaConstantIslands::isBBInRange(MachineInstr *MI, + MachineBasicBlock *DestBB, + unsigned MaxDisp) { + unsigned PCAdj = 4; + unsigned BrOffset = getOffsetOf(MI) + PCAdj; + unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset; + + LLVM_DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB) + << " from " << printMBBReference(*MI->getParent()) + << " max delta=" << MaxDisp << " from " << getOffsetOf(MI) + << " to " << DestOffset << " offset " + << int(DestOffset - BrOffset) << "\t" << *MI); + if (BrOffset <= DestOffset) { + // Branch before the Dest. + if (DestOffset - BrOffset < MaxDisp) + return true; + } else { + if (BrOffset - DestOffset <= MaxDisp) + return true; + } + return false; +} + +/// fixupImmediateBr - Fix up an immediate branch whose destination is too far +/// away to fit in its displacement field. +bool XtensaConstantIslands::fixupImmediateBr(ImmBranch &Br) { + MachineInstr *MI = Br.MI; + + if (MI->getOpcode() == Xtensa::JX) + return false; + + // TOOO: currently we don't fix J in start block + if (MI->getParent()->getNumber() == 0) + return false; + + unsigned TargetOperand = branchTargetOperand(MI); + MachineBasicBlock *DestBB = MI->getOperand(TargetOperand).getMBB(); + + // Check to see if the DestBB is already in-range. + if (isBBInRange(MI, DestBB, Br.MaxDisp)) + return false; + + if (!Br.isCond) + return fixupUnconditionalBr(Br); + return fixupConditionalBr(Br); +} + +/// fixupUnconditionalBr - Fix up an unconditional branch whose destination is +/// too far away to fit in its displacement field. If the LR register has been +/// spilled in the epilogue, then we can use BL to implement a far jump. +/// Otherwise, add an intermediate branch instruction to a branch. +/// fixupUnconditionalBr - Fix up an unconditional branch whose destination is +/// too far away to fit in its displacement field. If the LR register has been +/// spilled in the epilogue, then we can use BSR to implement a far jump. +/// Otherwise, add an intermediate branch instruction to a branch. +bool XtensaConstantIslands::fixupUnconditionalBr(ImmBranch &Br) { + MachineInstr *MI = Br.MI; + MachineBasicBlock *MBB = MI->getParent(); + MachineBasicBlock *DestBB = TII->getBranchDestBlock(*MI); + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + MachineConstantPool *ConstantPool = MF->getConstantPool(); + + XtensaConstantPoolValue *C = + XtensaConstantPoolMBB::Create(MF->getFunction().getContext(), DestBB, 0); + unsigned CPSize = ConstantPool->getConstants().size(); + unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4)); + Register DestReg = MRI.createVirtualRegister(&Xtensa::ARRegClass); + + MachineInstr *CPEMI = nullptr; + // Add a new CPEntry, but no corresponding CPUser yet. + unsigned ID = Idx; + if (CPSize == Idx) { + ID = createPICLabelUId(); + CPEMI = + BuildMI(InitConstantMBB, DebugLoc(), TII->get(Xtensa::CONSTPOOL_ENTRY)) + .addImm(ID) + .addConstantPoolIndex(Idx) + .addImm(4); + CPEntries.emplace_back(1, CPEntry(CPEMI, Idx, 1)); + ++NumCPEs; + BBInfo[InitConstantMBB->getNumber()].Size += 4; + adjustBBOffsetsAfter(InitConstantMBB); + } else { + std::vector &CPEs = CPEntries[Idx]; + for (CPEntry &CPE : CPEs) { + if ((CPE.CPEMI != nullptr) && + isCPEntryInRange(MI, MAX_DISP_L32R, CPE.CPEMI, MAX_DISP_L32R, true)) { + CPEMI = CPE.CPEMI; + CPE.RefCount++; + } + } + if (CPEMI == nullptr) { + ID = createPICLabelUId(); + CPEMI = BuildMI(InitConstantMBB, DebugLoc(), + TII->get(Xtensa::CONSTPOOL_ENTRY)) + .addImm(ID) + .addConstantPoolIndex(Idx) + .addImm(4); + CPEntries[Idx].push_back(CPEntry(CPEMI, ID, 1)); + ++NumCPEs; + BBInfo[InitConstantMBB->getNumber()].Size += 4; + adjustBBOffsetsAfter(InitConstantMBB); + } else { + ID = CPEMI->getOperand(0).getImm(); + } + } + MachineInstr *L32R = + BuildMI(*MBB, MI, DebugLoc(), TII->get(Xtensa::L32R), DestReg) + .addConstantPoolIndex(ID); + + MI->setDesc(TII->get(Xtensa::JX)); + MI->removeOperand(0); + MI->addOperand(MachineOperand::CreateReg(DestReg, true)); + + RegScavenger RS; + RS.enterBasicBlockEnd(*MBB); + unsigned Scav = RS.scavengeRegisterBackwards(Xtensa::ARRegClass, + L32R->getIterator(), false, 0); + MRI.replaceRegWith(DestReg, Scav); + MRI.clearVirtRegs(); + RS.setRegUsed(Scav); + CPUsers.push_back(CPUser(L32R, CPEMI, MAX_DISP_L32R)); + BBInfo[MBB->getNumber()].Size += 3; + adjustBBOffsetsAfter(MBB); + ++NumUBrFixed; + + LLVM_DEBUG(dbgs() << " Changed B to long jump " << *MI); + return true; +} + +// TODO +/// fixupConditionalBr - Fix up a conditional branch whose destination is too +/// far away to fit in its displacement field. It is converted to an inverse +/// conditional branch + an unconditional branch to the destination. +bool XtensaConstantIslands::fixupConditionalBr(ImmBranch &Br) { + MachineInstr *MI = Br.MI; + MachineBasicBlock *DestBB = TII->getBranchDestBlock(*MI); + + SmallVector Cond; + Cond.push_back(MachineOperand::CreateImm(MI->getOpcode())); + Cond.push_back(MI->getOperand(0)); + TII->reverseBranchCondition(Cond); + // Add an unconditional branch to the destination and invert the branch + // condition to jump over it: + // bteqz L1 + // => + // bnez L2 + // b L1 + // L2: + + // If the branch is at the end of its MBB and that has a fall-through block, + // direct the updated conditional branch to the fall-through block. Otherwise, + // split the MBB before the next instruction. + MachineBasicBlock *MBB = MI->getParent(); + MachineInstr *BMI = &MBB->back(); + bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB); + + ++NumCBrFixed; + if (BMI != MI) { + if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) && + BMI->isUnconditionalBranch()) { + // Last MI in the BB is an unconditional branch. Can we simply invert the + // condition and swap destinations: + // beqz L1 + // b L2 + // => + // bnez L2 + // b L1 + MachineBasicBlock *NewDest = TII->getBranchDestBlock(*BMI); + if (isBBInRange(MI, NewDest, Br.MaxDisp)) { + LLVM_DEBUG( + dbgs() << " Invert Bcc condition and swap its destination with " + << *BMI); + BMI->getOperand(BMI->getNumExplicitOperands() - 1).setMBB(DestBB); + MI->getOperand(MI->getNumExplicitOperands() - 1).setMBB(NewDest); + + MI->setDesc(TII->get(Cond[0].getImm())); + return true; + } + } + } + + if (NeedSplit) { + splitBlockBeforeInstr(*MI); + // No need for the branch to the next block. We're adding an unconditional + // branch to the destination. + int Delta = TII->getInstSizeInBytes(MBB->back()); + BBInfo[MBB->getNumber()].Size -= Delta; + MBB->back().eraseFromParent(); + + // The conditional successor will be swapped between the BBs after this, so + // update CFG. + MBB->addSuccessor(DestBB); + std::next(MBB->getIterator())->removeSuccessor(DestBB); + } + MachineBasicBlock *NextBB = &*++MBB->getIterator(); + + LLVM_DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB) + << " also invert condition and change dest. to " + << printMBBReference(*NextBB) << "\n"); + + // Insert a new conditional branch and a new unconditional branch. + // Also update the ImmBranch as well as adding a new entry for the new branch. + switch (MI->getOpcode()) { + case Xtensa::BEQ: + case Xtensa::BNE: + case Xtensa::BLT: + case Xtensa::BLTU: + case Xtensa::BGE: + case Xtensa::BGEU: + BuildMI(MBB, DebugLoc(), TII->get(Cond[0].getImm())) + .addReg(MI->getOperand(0).getReg()) + .addReg(MI->getOperand(1).getReg()) + .addMBB(NextBB); + break; + case Xtensa::BEQI: + case Xtensa::BNEI: + case Xtensa::BLTI: + case Xtensa::BLTUI: + case Xtensa::BGEI: + case Xtensa::BGEUI: + BuildMI(MBB, DebugLoc(), TII->get(Cond[0].getImm())) + .addReg(MI->getOperand(0).getReg()) + .addImm(MI->getOperand(1).getImm()) + .addMBB(NextBB); + break; + case Xtensa::BEQZ: + case Xtensa::BNEZ: + case Xtensa::BLTZ: + case Xtensa::BGEZ: + BuildMI(MBB, DebugLoc(), TII->get(Cond[0].getImm())) + .addReg(MI->getOperand(0).getReg()) + .addMBB(NextBB); + break; + case Xtensa::BT: + case Xtensa::BF: + BuildMI(MBB, DebugLoc(), TII->get(Cond[0].getImm())) + .addReg(MI->getOperand(0).getReg()) + .addMBB(NextBB); + break; + } + + Br.MI = &MBB->back(); + BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back()); + BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB); + BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back()); + unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr); + ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr)); + + // Remove the old conditional branch. It may or may not still be in MBB. + BBInfo[MI->getParent()->getNumber()].Size -= TII->getInstSizeInBytes(*MI); + MI->eraseFromParent(); + adjustBBOffsetsAfter(MBB); + return true; +} + +// Check first constant island. If it is empty, then we can remove first block, +// which contains jump instruction to the third block and first constant +// insland. +void XtensaConstantIslands::removeEntryJump() { + MachineFunction *MF = InitConstantMBB->getParent(); + MachineBasicBlock *Entry = &MF->front(); + if (InitConstantMBB->empty()) { + Entry->removeSuccessor(Entry->getSingleSuccessor()); + MF->remove(Entry); + MF->remove(InitConstantMBB); + MF->RenumberBlocks(); + } +} + +/// Returns a pass that converts branches to long branches. +FunctionPass *llvm::createXtensaConstantIslandPass() { + return new XtensaConstantIslands(); +} diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index e44f126d69a15..e8a458d25f2f1 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1587,6 +1587,26 @@ let usesCustomInserter = 1, Predicates = [HasS32C1I] in { [(set AR:$dst, (atomic_load_umax_i32 AR:$ptr, AR:$arg))]>; } +//===----------------------------------------------------------------------===// +// Xtensa CONSTPOOL_ENTRY +//===----------------------------------------------------------------------===// + +// An operand for the CONSTPOOL_ENTRY pseudo-instruction. +def cpinst_operand : Operand { + // let PrintMethod = "printCPInstOperand"; +} + +// CONSTPOOL_ENTRY - This instruction represents a floating constant pool in +// the function. The first operand is the ID# for this instruction, the second +// is the index into the MachineConstantPool that this is, the third is the +// size in bytes of this constant pool entry. +// +let hasSideEffects = 0, isNotDuplicable = 1 in +def CONSTPOOL_ENTRY : +Pseudo<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx, + i32imm:$size), "foo", []>; + + //===----------------------------------------------------------------------===// // Xtensa ESP32S2 Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 74da2fc6d5f18..020ae4a997572 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -154,6 +154,7 @@ void XtensaPassConfig::addPreEmitPass() { addPass(createXtensaSizeReductionPass()); addPass(createXtensaFixupHwLoops()); addPass(&BranchRelaxationPassID); + addPass(createXtensaConstantIslandPass()); } TargetPassConfig *XtensaTargetMachine::createPassConfig(PassManagerBase &PM) { diff --git a/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp b/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp index 62ad8b6b00997..2be6c2d394f60 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp @@ -20,6 +20,12 @@ bool XtensaTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) { + // Disable hw loops when literals are placed in text section. + // TODO: Implement support of hw loops in ConstantIslands pass + if (ST->useTextSectionLiterals()) { + return false; + } + if (DisableLowOverheadLoops) return false; From e82c9c59de22dfff9aebfc167e1e45654b2cd44c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:22 +0300 Subject: [PATCH 124/289] [Xtensa] Disable hardware loops by default. --- llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp b/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp index 2be6c2d394f60..4accacb20f2ad 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetTransformInfo.cpp @@ -13,7 +13,7 @@ using namespace llvm; #define DEBUG_TYPE "xtensatti" static cl::opt DisableLowOverheadLoops( - "disable-xtensa-hwloops", cl::Hidden, cl::init(false), + "disable-xtensa-hwloops", cl::Hidden, cl::init(true), cl::desc("Disable the generation of hardware loops")); bool XtensaTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, From ad5d3e6e5fdc16bc649b27de8de5a690560e5363 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:22 +0300 Subject: [PATCH 125/289] [Xtensa] Improve fixup error messages in asm backend. --- .../Xtensa/MCTargetDesc/XtensaAsmBackend.cpp | 14 +++++++------- .../MC/Xtensa/Relocations/fixups-diagnostics.s | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp index 7da92dc4c2af6..c34ec76db2f3c 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaAsmBackend.cpp @@ -91,7 +91,7 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, case Xtensa::fixup_xtensa_branch_6: { Value -= 4; if (!isInt<6>(Value)) - Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + Ctx.reportError(Fixup.getLoc(), "branch 6-bit fixup value out is of range"); unsigned Hi2 = (Value >> 4) & 0x3; unsigned Lo4 = Value & 0xf; return (Hi2 << 4) | (Lo4 << 12); @@ -99,36 +99,36 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, case Xtensa::fixup_xtensa_branch_8: Value -= 4; if (!isInt<8>(Value)) - Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + Ctx.reportError(Fixup.getLoc(), "branch 8-bit fixup value out of range"); return (Value & 0xff); case Xtensa::fixup_xtensa_branch_12: Value -= 4; if (!isInt<12>(Value)) - Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + Ctx.reportError(Fixup.getLoc(), "branch 12-bit fixup value out of range"); return (Value & 0xfff); case Xtensa::fixup_xtensa_jump_18: Value -= 4; if (!isInt<18>(Value)) - Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + Ctx.reportError(Fixup.getLoc(), "jump fixup value out of range"); return (Value & 0x3ffff); case Xtensa::fixup_xtensa_call_18: Value -= 4; if (!isInt<20>(Value)) - Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + Ctx.reportError(Fixup.getLoc(), "call fixup value out of range"); if (Value & 0x3) Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); return (Value & 0xffffc) >> 2; case Xtensa::fixup_xtensa_loop_8: Value -= 4; if (!isUInt<8>(Value)) - Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + Ctx.reportError(Fixup.getLoc(), "loop fixup value out of range"); return (Value & 0xff); case Xtensa::fixup_xtensa_l32r_16: unsigned Offset = Fixup.getOffset(); if (Offset & 0x3) Value -= 4; if (!isInt<18>(Value) && (Value & 0x20000)) - Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + Ctx.reportError(Fixup.getLoc(), "l32r fixup value out of range"); if (Value & 0x3) Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); return (Value & 0x3fffc) >> 2; diff --git a/llvm/test/MC/Xtensa/Relocations/fixups-diagnostics.s b/llvm/test/MC/Xtensa/Relocations/fixups-diagnostics.s index e0eac900552ce..d0d7b4d0f8857 100644 --- a/llvm/test/MC/Xtensa/Relocations/fixups-diagnostics.s +++ b/llvm/test/MC/Xtensa/Relocations/fixups-diagnostics.s @@ -2,9 +2,9 @@ .align 4 - beq a0, a1, LBL1 # CHECK: :[[@LINE]]:3: error: fixup value out of range + beq a0, a1, LBL1 # CHECK: :[[@LINE]]:3: error: branch 8-bit fixup value out of range LBL0: - beqz a0, LBL2 # CHECK: :[[@LINE]]:3: error: fixup value out of range + beqz a0, LBL2 # CHECK: :[[@LINE]]:3: error: branch 12-bit fixup value out of range call0 LBL0 # CHECK: :[[@LINE]]:3: error: fixup value must be 4-byte aligned From 362f5abab5a590b050b430fc549219f876e8fbb1 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 18 Sep 2024 23:41:02 +0300 Subject: [PATCH 126/289] [Xtensa] Temporary fix hwloop tests. --- llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll | 31 +++++++------------ .../CodeGen/Xtensa/hwloop_unsuitable_loop.ll | 17 +++++----- 2 files changed, 21 insertions(+), 27 deletions(-) diff --git a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll index 4c784fd1a8187..5308fdccfc276 100644 --- a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll +++ b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll @@ -7,32 +7,25 @@ define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #0 { ; CHECK-LABEL: test_hwloop: ; CHECK: entry a1, 32 ; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: blti a4, 1, .LBB0_7 -; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: movi.n a8, 0 +; CHECK-NEXT: bge a8, a4, .LBB0_5 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: movi.n a9, 0 ; CHECK-NEXT: j .LBB0_3 ; CHECK-NEXT: .LBB0_2: # %for.body ; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: add.n a2, a9, a2 -; CHECK-NEXT: j .LBB0_4 +; CHECK-NEXT: add.n a2, a10, a2 +; CHECK-NEXT: addi.n a8, a8, 1 +; CHECK-NEXT: bge a8, a4, .LBB0_5 ; CHECK-NEXT: .LBB0_3: # %for.body -; CHECK-NEXT: # =>This Loop Header: Depth=1 -; CHECK-NEXT: # Child Loop BB0_5 Depth 2 -; CHECK-NEXT: nop -; CHECK-NEXT: nop -; CHECK-NEXT: loop a4, .LBB0_5 -; CHECK-NEXT: mov.n a9, a8 -; CHECK-NEXT: bge a8, a2, .LBB0_2 -; CHECK-NEXT: .LBB0_4: # in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: nop -; CHECK-NEXT: .LBB0_5: # Parent Loop BB0_3 Depth=1 -; CHECK-NEXT: # => This Inner Loop Header: Depth=2 -; CHECK-NEXT: j .LBB0_7 -; CHECK-NEXT: .LBB0_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: mov.n a10, a9 +; CHECK-NEXT: bge a9, a2, .LBB0_2 +; CHECK-NEXT: # %bb.4: # %for.body ; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: mull a9, a2, a3 +; CHECK-NEXT: mull a10, a2, a3 ; CHECK-NEXT: j .LBB0_2 -; CHECK-NEXT: .LBB0_7: # %for.cond.cleanup +; CHECK-NEXT: .LBB0_5: # %for.cond.cleanup ; CHECK-NEXT: retw.n entry: %cmp7 = icmp sgt i32 %n, 0 diff --git a/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll index 2262ecde5bc9a..f34729e58d37e 100644 --- a/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll +++ b/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll @@ -6,24 +6,25 @@ define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #1 { ; CHECK-LABEL: test_hwloop: ; CHECK: entry a1, 32 ; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: blti a4, 1, .LBB0_5 -; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: movi.n a8, 0 +; CHECK-NEXT: bge a8, a4, .LBB0_5 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: movi.n a9, 0 ; CHECK-NEXT: j .LBB0_3 ; CHECK-NEXT: .LBB0_2: # %for.body ; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: add.n a2, a9, a2 +; CHECK-NEXT: add.n a2, a10, a2 ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP -; CHECK-NEXT: addi.n a4, a4, -1 -; CHECK-NEXT: beqz a4, .LBB0_5 +; CHECK-NEXT: addi.n a8, a8, 1 +; CHECK-NEXT: bge a8, a4, .LBB0_5 ; CHECK-NEXT: .LBB0_3: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: mov.n a9, a8 -; CHECK-NEXT: bge a8, a2, .LBB0_2 +; CHECK-NEXT: mov.n a10, a9 +; CHECK-NEXT: bge a9, a2, .LBB0_2 ; CHECK-NEXT: # %bb.4: # %for.body ; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 -; CHECK-NEXT: mull a9, a2, a3 +; CHECK-NEXT: mull a10, a2, a3 ; CHECK-NEXT: j .LBB0_2 ; CHECK-NEXT: .LBB0_5: # %for.cond.cleanup ; CHECK-NEXT: retw.n From 832401f715ddd5e53e80188d0d82d766dffa2ee5 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:24 +0300 Subject: [PATCH 127/289] esp/ci: change clang version to 16. --- .gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index dbe33f2bd2c31..81eae5d98a6e0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -12,13 +12,13 @@ variables: # move all these to CI/CD settings REL_SFX: "llvm" - CLANG_VER: "15.0.0" + CLANG_VER: "16" GCC_REL_NAME: "esp-2022r1" GCC_REL_VER: "gcc11_2_0" NEWLIB_REF: "esp-2022r1" BINUTILS_REF: "esp-2022r1-binutils" XTENSA_OVERLAYS_REF: "master" - LLVM_GCC_TESTSUITE_REF: "esp-15.0.0-20221201" + LLVM_GCC_TESTSUITE_REF: "esp-16.0.0-20230419" XTENSA_CLANG_TOOLCHAIN_REF: "esp-15.0.0-20221201" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 From daaa5d9944edad52e2703412491e36394a71bf41 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 19:57:13 +0300 Subject: [PATCH 128/289] toolchain: Adds compiler-rt multilibs support for Espressif toolchains --- clang/lib/Driver/ToolChains/CommonArgs.cpp | 13 +++++++++++++ clang/lib/Driver/ToolChains/CommonArgs.h | 6 ++++++ clang/lib/Driver/ToolChains/Gnu.cpp | 11 ++++++++++- clang/lib/Driver/ToolChains/RISCVToolchain.cpp | 12 ++++++++++++ 4 files changed, 41 insertions(+), 1 deletion(-) diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 77d29cef1eeb1..069560e2b6e47 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -2930,3 +2930,16 @@ void tools::addMCModel(const Driver &D, const llvm::opt::ArgList &Args, } } } + +void tools::addEspMultilibsPaths(const Driver &D, const MultilibSet &Multilibs, + const Multilib &Multilib, + StringRef CPU, + StringRef InstallPath, + ToolChain::path_list &Paths) { + if (const auto &PathsCallback = Multilibs.filePathsCallback()) + for (const auto &Path : PathsCallback(Multilib)) { + SmallString<256> LibPath(D.ResourceDir); + llvm::sys::path::append(LibPath, D.getTargetTriple(), CPU, Path, "lib"); + addPathIfExists(D, LibPath, Paths); + } +} diff --git a/clang/lib/Driver/ToolChains/CommonArgs.h b/clang/lib/Driver/ToolChains/CommonArgs.h index 52818ecde924b..3bde296b43097 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.h +++ b/clang/lib/Driver/ToolChains/CommonArgs.h @@ -228,6 +228,12 @@ void addMCModel(const Driver &D, const llvm::opt::ArgList &Args, const llvm::Reloc::Model &RelocationModel, llvm::opt::ArgStringList &CmdArgs); + +void addEspMultilibsPaths(const Driver &D, const MultilibSet &Multilibs, + const Multilib &Multilib, + StringRef CPU, + StringRef InstallPath, + ToolChain::path_list &Paths); } // end namespace tools } // end namespace driver } // end namespace clang diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index 2f52884ccbfcb..e80dbe815fb75 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -1988,7 +1988,8 @@ static void findXtensaMultilibs(const Driver &D, const ArgList &Args, DetectedMultilibs &Result) { MultilibSet XtensaMultilibs = MultilibSet(); - bool IsESP32 = Args.getLastArgValue(options::OPT_mcpu_EQ, "esp32") == "esp32"; + StringRef cpu = Args.getLastArgValue(options::OPT_mcpu_EQ, "esp32"); + bool IsESP32 = cpu == "esp32"; XtensaMultilibs.push_back(Multilib()); if (IsESP32) @@ -2008,6 +2009,14 @@ static void findXtensaMultilibs(const Driver &D, .flag("-mfix-esp32-psram-cache-issue") .makeMultilib()); + std::string cpu_name = cpu.str(); + XtensaMultilibs + .setFilePathsCallback([cpu_name](const Multilib &M) { + return std::vector( + {M.gccSuffix(), + "/../../../../xtensa-" + cpu_name + "-elf/lib" + M.gccSuffix()}); + }); + Multilib::flags_list Flags; addMultilibFlag( Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, false), "frtti", diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp index 0ea8e591a5e34..258d142cd17ee 100644 --- a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp +++ b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp @@ -22,6 +22,7 @@ using namespace clang::driver::tools; using namespace clang; using namespace llvm::opt; + static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs, const Multilib &Multilib, StringRef InstallPath, @@ -69,6 +70,17 @@ RISCVToolChain::RISCVToolChain(const Driver &D, const llvm::Triple &Triple, } else { getProgramPaths().push_back(D.Dir); } + + if (getTriple().getVendor() == llvm::Triple::Espressif) { + // TODO: need to detect multilibs when GCC installation is not available + addEspMultilibsPaths(D, Multilibs, SelectedMultilibs.back(), + Args.getLastArgValue(options::OPT_mcpu_EQ, "generic-rv32"), + D.Dir, getLibraryPaths()); + addEspMultilibsPaths(D, Multilibs, SelectedMultilibs.back(), + Args.getLastArgValue(options::OPT_mcpu_EQ, "generic-rv32"), + D.Dir, getFilePaths()); + } + getFilePaths().push_back(computeSysRoot() + "/lib"); } From 0ea2cfce711165e3fa7126800bd5812e2a733d17 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:25 +0300 Subject: [PATCH 129/289] esp/ci: Build compiler-rt --- .gitlab-ci.yml | 8 +++++ .universal-toolchain-release.yml | 58 ++++++++++++++++++++++++++++++-- 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 81eae5d98a6e0..7323f4fcb7b1c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -49,6 +49,14 @@ variables: UNARCHIVE_TOOL_NEWLIB: ${UNARCHIVE_TOOL_LINUX} ARCHIVE_EXT_NEWLIB: ${ARCHIVE_EXT_LINUX} + ARCHIVE_TOOL_COMPILER_RT: ${ARCHIVE_TOOL_LINUX} + UNARCHIVE_TOOL_COMPILER_RT: ${UNARCHIVE_TOOL_LINUX} + ARCHIVE_EXT_COMPILER_RT: ${ARCHIVE_EXT_LINUX} + + LIBS_ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + LIBS_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + LIBS_ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + DIST_DIR: "dist" BUILD_DIR: "build" DOWNLOADS_DIR: "downloads" diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index 75a41258aead0..1f51bcd9d9b39 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -96,7 +96,7 @@ # Do not run unit tests for cross-builds. # Run as non-root user because permission tests fail when run by root. - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then - export LLVM_BUILD_PATH=${LLVM_PROJECT_PATH}/llvm/build-${CONF_HOST}-Release; + export LLVM_BUILD_PATH=${BUILD_PATH}/build/llvm/build-${CONF_HOST}-Release; echo "Run unit tests for native build in ${LLVM_BUILD_PATH}"; useradd -m test_runner; chown -R test_runner ${LLVM_BUILD_PATH}; @@ -193,7 +193,7 @@ build_newlib: stage: build tags: [ "amd64", "build" ] needs: - # needs native toolchainfrom this job + # needs native toolchain - job: build_x86_64-linux-gnu artifacts: paths: @@ -230,6 +230,52 @@ build_newlib: - ${ARCHIVE_TOOL_NEWLIB} ${NEWLIB_OVERLAY_DISTRO_PATH}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} esp-clang/ - popd +build_compiler-rt: + stage: build + tags: [ "amd64", "build" ] + needs: + # needs native toolchain with newlib + # newlib is necessary for building tests + - job: build_x86_64-linux-gnu + - job: build_newlib + artifacts: + paths: + - ${DIST_DIR}/ + - ${BUILD_DIR}/build.log + when: always + expire_in: 1 day + variables: + PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" + UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" + ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" + script: + - LLVM_PROJECT_PATH=$PWD + # get ARCHIVE_NAME for Linux release. + - CLANG_ARCHIVE=$PWD/${DIST_DIR}/$(cat ${DIST_DIR}/file_${PLATFORM_NAME_LINUX}) + - NEWLIB_ARCHIVE=$PWD/${DIST_DIR}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} + - mkdir -p ${DOWNLOADS_DIR} + - pushd ${DOWNLOADS_DIR} + # unpack clang + - ${UNARCHIVE_TOOL} ${CLANG_ARCHIVE} + # unpack newlib + - ${UNARCHIVE_TOOL_NEWLIB} ${NEWLIB_ARCHIVE} + # now Linux toolchain with newlib is in $PWD/esp-clang + - export PATH=$PWD/esp-clang/bin:$PATH + - popd + - rm -rf $PWD/${DIST_DIR} + - !reference [.get_clang_toolchain_build_scripts, script] + # build compiler-rt overlay using ESP native (Linux) clang toolchain only + # it will be re-used for cross-buit toolchains (win and mac). + - COMPILER_RT_OVERLAY_DISTRO_PATH=$PWD/${DIST_DIR} + - mkdir -p ${COMPILER_RT_OVERLAY_DISTRO_PATH} + - BUILD_PATH=$PWD/${BUILD_DIR} + - mkdir -p ${BUILD_PATH} + - ./build-toolchain.sh --llvm-path=${LLVM_PROJECT_PATH} --build-llvm=no --build-compiler-rt=yes ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log + - pushd ${BUILD_PATH} + - ${ARCHIVE_TOOL_COMPILER_RT} ${COMPILER_RT_OVERLAY_DISTRO_PATH}/esp-clang-compiler-rt-overlay.${ARCHIVE_EXT_COMPILER_RT} esp-clang/ + - popd + .pack_template: stage: pack tags: [ "amd64", "build" ] @@ -246,6 +292,8 @@ build_newlib: - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} -C ${BUILD_PATH} # unpack newlib - ${UNARCHIVE_TOOL_NEWLIB} ${DIST_DIR}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} -C ${BUILD_PATH} + # unpack compiler-rt + - ${UNARCHIVE_TOOL_COMPILER_RT} ${DIST_DIR}/esp-clang-compiler-rt-overlay.${ARCHIVE_EXT_COMPILER_RT} -C ${BUILD_PATH} - rm -rf ${DIST_DIR} - !reference [.get_clang_toolchain_build_scripts, script] # strip binutils afer newlib is built @@ -268,6 +316,7 @@ pack_x86_64-linux-gnu: needs: - job: build_x86_64-linux-gnu - job: build_newlib + - job: build_compiler-rt variables: CONF_HOST: "x86_64-linux-gnu" PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" @@ -278,6 +327,7 @@ pack_arm-linux-gnueabihf: needs: - job: build_arm-linux-gnueabihf - job: build_newlib + - job: build_compiler-rt variables: CONF_HOST: "arm-linux-gnueabihf" PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARMHF}" @@ -288,6 +338,7 @@ pack_aarch64-linux-gnu: needs: - job: build_aarch64-linux-gnu - job: build_newlib + - job: build_compiler-rt variables: CONF_HOST: "aarch64-linux-gnu" PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" @@ -297,6 +348,7 @@ pack_x86_64-w64-mingw32: needs: - job: build_x86_64-w64-mingw32 - job: build_newlib + - job: build_compiler-rt variables: CONF_HOST: "x86_64-w64-mingw32" PLATFORM_NAME: "${PLATFORM_NAME_WIN}" @@ -317,6 +369,7 @@ pack_x86_64-apple-darwin: needs: - job: build_x86_64-apple-darwin - job: build_newlib + - job: build_compiler-rt variables: CONF_HOST: "x86_64-apple-darwin21.1" PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" @@ -326,6 +379,7 @@ pack_aarch64-apple-darwin: needs: - job: build_aarch64-apple-darwin - job: build_newlib + - job: build_compiler-rt variables: CONF_HOST: "aarch64-apple-darwin21.1" PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" From 0f090b1accae804bbc04071c76aefe5822e24574 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 19 Sep 2024 02:01:54 +0300 Subject: [PATCH 130/289] [Xtensa] toolchain/xtensa: Enable `-frtti` by default for multilib dirs search --- clang/lib/Driver/ToolChains/Gnu.cpp | 18 +++++++++--------- clang/test/Driver/xtensa-toolchain.c | 28 ++++++++++++++-------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index e80dbe815fb75..6180a78aae58a 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -1992,22 +1992,22 @@ static void findXtensaMultilibs(const Driver &D, bool IsESP32 = cpu == "esp32"; XtensaMultilibs.push_back(Multilib()); - if (IsESP32) - XtensaMultilibs.push_back(MultilibBuilder("esp32-psram", {}, {}) - .flag("-mfix-esp32-psram-cache-issue") - .makeMultilib()); - XtensaMultilibs.push_back(MultilibBuilder("no-rtti", {}, {}) - .flag("-frtti", /*Disallow=*/true) .flag("-fno-rtti") + .flag("-frtti", /*Disallow=*/true) .makeMultilib()); - if (IsESP32) + if (IsESP32) { + XtensaMultilibs.push_back(MultilibBuilder("esp32-psram", {}, {}) + .flag("-mfix-esp32-psram-cache-issue") + .makeMultilib()); + XtensaMultilibs.push_back(MultilibBuilder("esp32-psram/no-rtti", {}, {}) + .flag("-mfix-esp32-psram-cache-issue") .flag("-fno-rtti") .flag("-frtti", /*Disallow=*/true) - .flag("-mfix-esp32-psram-cache-issue") .makeMultilib()); + } std::string cpu_name = cpu.str(); XtensaMultilibs @@ -2019,7 +2019,7 @@ static void findXtensaMultilibs(const Driver &D, Multilib::flags_list Flags; addMultilibFlag( - Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, false), "frtti", + Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, true), "frtti", Flags); if (IsESP32) diff --git a/clang/test/Driver/xtensa-toolchain.c b/clang/test/Driver/xtensa-toolchain.c index 7cf4f151de2fc..77f23c284cf52 100644 --- a/clang/test/Driver/xtensa-toolchain.c +++ b/clang/test/Driver/xtensa-toolchain.c @@ -22,7 +22,7 @@ // RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ // RUN: -target xtensa-esp-elf --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -fno-rtti 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL %s // C-XTENSA-ESP32-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" @@ -31,7 +31,7 @@ // RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ // RUN: -target xtensa-esp-elf --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -frtti 2>&1 \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL-RTTI %s // C-XTENSA-ESP32-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" @@ -40,7 +40,7 @@ // RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ // RUN: -target xtensa-esp-elf --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -mfix-esp32-psram-cache-issue 2>&1 \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -fno-rtti -mfix-esp32-psram-cache-issue 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL-PSRAM %s // C-XTENSA-ESP32-BAREMETAL-PSRAM: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" @@ -49,7 +49,7 @@ // RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ // RUN: -target xtensa-esp-elf --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -mfix-esp32-psram-cache-issue -frtti 2>&1 \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -mfix-esp32-psram-cache-issue 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL-PSRAM-RTTI %s // C-XTENSA-ESP32-BAREMETAL-PSRAM-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" @@ -58,7 +58,7 @@ // RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ // RUN: -target xtensa-esp-elf -mcpu=esp32s2 --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -fno-rtti 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S2-BAREMETAL %s // C-XTENSA-ESP32S2-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s2-elf-ld" @@ -67,7 +67,7 @@ // RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ // RUN: -target xtensa-esp-elf -mcpu=esp32s2 --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -frtti 2>&1 \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S2-BAREMETAL-RTTI %s // C-XTENSA-ESP32S2-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s2-elf-ld" @@ -76,7 +76,7 @@ // RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ // RUN: -target xtensa-esp-elf -mcpu=esp32s3 --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -fno-rtti 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S3-BAREMETAL %s // C-XTENSA-ESP32S3-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" @@ -85,7 +85,7 @@ // RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ // RUN: -target xtensa-esp-elf -mcpu=esp32s3 --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -frtti 2>&1 \ +// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ // RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S3-BAREMETAL-RTTI %s // C-XTENSA-ESP32S3-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" @@ -100,8 +100,8 @@ // C-XTENSA-ESP32-SYSROOT-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" // C-XTENSA-ESP32-SYSROOT-BAREMETAL: "--sysroot={{.*}}/Inputs/multilib_xtensa_tree/xtensa-esp32-elf" -// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" -// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" +// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" +// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" // RUN: %clang++ %s -### -no-canonical-prefixes \ // RUN: -target xtensa-esp-elf -mcpu=esp32 -stdlib=libstdc++ --rtlib=platform \ @@ -110,8 +110,8 @@ // CXX-XTENSA-ESP32-BAREMETAL: "-internal-isystem" "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf/include/c++{{/|\\\\}}8.4.0" // CXX-XTENSA-ESP32-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// CXX-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" -// CXX-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" +// CXX-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" +// CXX-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" // RUN: %clang++ %s -### -no-canonical-prefixes \ // RUN: -target xtensa-esp-elf -mcpu=esp32 -stdlib=libstdc++ --rtlib=platform \ @@ -121,5 +121,5 @@ // CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-internal-isystem" "{{.*}}Inputs/multilib_xtensa_tree/xtensa-esp32-elf/include/c++/8.4.0" // CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" -// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" +// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" +// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" From 4176bd8748d2a05314b0133284d238eb16d6f232 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:26 +0300 Subject: [PATCH 131/289] toolchain/esp: Add tests for 'compiler-rt' multilib --- .../generic-rv32/lib/clang_rt.crtbegin.o | 0 .../generic-rv32/lib/clang_rt.crtend.o | 0 .../generic-rv32/lib/libclang_rt.builtins.a | 0 .../rv32i/ilp32/lib/clang_rt.crtbegin.o | 0 .../rv32i/ilp32/lib/clang_rt.crtend.o | 0 .../rv32i/ilp32/lib/libclang_rt.builtins.a | 0 .../ilp32/no-rtti/lib/clang_rt.crtbegin.o | 0 .../rv32i/ilp32/no-rtti/lib/clang_rt.crtend.o | 0 .../ilp32/no-rtti/lib/libclang_rt.builtins.a | 0 .../rv32imac/ilp32/lib/clang_rt.crtbegin.o | 0 .../rv32imac/ilp32/lib/clang_rt.crtend.o | 0 .../rv32imac/ilp32/lib/libclang_rt.builtins.a | 0 .../ilp32/no-rtti/lib/clang_rt.crtbegin.o | 0 .../ilp32/no-rtti/lib/clang_rt.crtend.o | 0 .../ilp32/no-rtti/lib/libclang_rt.builtins.a | 0 .../rv32imafc/ilp32f/lib/clang_rt.crtbegin.o | 0 .../rv32imafc/ilp32f/lib/clang_rt.crtend.o | 0 .../ilp32f/lib/libclang_rt.builtins.a | 0 .../ilp32f/no-rtti/lib/clang_rt.crtbegin.o | 0 .../ilp32f/no-rtti/lib/clang_rt.crtend.o | 0 .../ilp32f/no-rtti/lib/libclang_rt.builtins.a | 0 .../rv32imc/ilp32/lib/clang_rt.crtbegin.o | 0 .../rv32imc/ilp32/lib/clang_rt.crtend.o | 0 .../rv32imc/ilp32/lib/libclang_rt.builtins.a | 0 .../ilp32/no-rtti/lib/clang_rt.crtbegin.o | 0 .../ilp32/no-rtti/lib/clang_rt.crtend.o | 0 .../ilp32/no-rtti/lib/libclang_rt.builtins.a | 0 .../esp32/esp32-psram/lib/clang_rt.crtbegin.o | 0 .../esp32/esp32-psram/lib/clang_rt.crtend.o | 0 .../esp32-psram/lib/libclang_rt.builtins.a | 0 .../no-rtti/lib/clang_rt.crtbegin.o | 0 .../esp32-psram/no-rtti/lib/clang_rt.crtend.o | 0 .../no-rtti/lib/libclang_rt.builtins.a | 0 .../esp32/lib/clang_rt.crtbegin.o | 0 .../esp32/lib/clang_rt.crtend.o | 0 .../esp32/lib/libclang_rt.builtins.a | 0 .../esp32/no-rtti/lib/clang_rt.crtbegin.o | 0 .../esp32/no-rtti/lib/clang_rt.crtend.o | 0 .../esp32/no-rtti/lib/libclang_rt.builtins.a | 0 .../esp32s2/lib/clang_rt.crtbegin.o | 0 .../esp32s2/lib/clang_rt.crtend.o | 0 .../esp32s2/lib/libclang_rt.builtins.a | 0 .../esp32s2/no-rtti/lib/clang_rt.crtbegin.o | 0 .../esp32s2/no-rtti/lib/clang_rt.crtend.o | 0 .../no-rtti/lib/libclang_rt.builtins.a | 0 .../esp32s3/lib/clang_rt.crtbegin.o | 0 .../esp32s3/lib/clang_rt.crtend.o | 0 .../esp32s3/lib/libclang_rt.builtins.a | 0 .../esp32s3/no-rtti/lib/clang_rt.crtbegin.o | 0 .../esp32s3/no-rtti/lib/clang_rt.crtend.o | 0 .../no-rtti/lib/libclang_rt.builtins.a | 0 .../test/Driver/riscv32-esp-toolchain-extra.c | 115 ++++++++++++++++++ .../test/Driver/xtensa-esp-toolchain-extra.c | 111 +++++++++++++++++ 53 files changed, 226 insertions(+) create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtbegin.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtend.o create mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/libclang_rt.builtins.a create mode 100644 clang/test/Driver/riscv32-esp-toolchain-extra.c create mode 100644 clang/test/Driver/xtensa-esp-toolchain-extra.c diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtbegin.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtend.o new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/libclang_rt.builtins.a new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/clang/test/Driver/riscv32-esp-toolchain-extra.c b/clang/test/Driver/riscv32-esp-toolchain-extra.c new file mode 100644 index 0000000000000..13e94e981553c --- /dev/null +++ b/clang/test/Driver/riscv32-esp-toolchain-extra.c @@ -0,0 +1,115 @@ +// A basic clang -cc1 command-line, and simple environment check. + +// The tests here are similar to those in xtensa-toolchain.c, however +// these tests need to create symlinks to test directory trees in order to +// set up the environment and therefore shell support is required. +// REQUIRES: shell, xtensa-registered-target +// UNSUPPORTED: system-windows + +// Compiler-rt multilibs are located at '$INSTALLDIR/lib/clang/15.0.0//mcpu/'. +// At this moment multilib feature for compiler-rt is supported only when GCC installation with the same multilib structure is found. +// It is safe because ESP toolchain still depends on libstdc++ which is part of GCC installation. +// When libc++ wil be supported by toolchain the dependency on GCC multilibs will be removed. + +// RUN: rm -rf %t +// RUN: mkdir -p %t/multilib_riscv_esp_elf_sdk/bin +// RUN: ln -s %clang %t/multilib_riscv_esp_elf_sdk/bin/clang +// RUN: ln -s %S/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld %t/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld +// RUN: ln -s %S/Inputs/multilib_riscv_esp_elf_sdk/lib %t/multilib_riscv_esp_elf_sdk/lib +// RUN: ln -s %S/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf %t/multilib_riscv_esp_elf_sdk/riscv32-esp-elf + +// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ +// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ +// RUN: --target=riscv32-esp-elf -march=rv32i -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32I-RTLIB-COMPILERRT-NORTTI %s + +// C-RV32I-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32I-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32i/ilp32/no-rtti{{/|\\\\}}crt0.o" +// C-RV32I-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtbegin.o" +// C-RV32I-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/libclang_rt.builtins.a" +// C-RV32I-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ +// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ +// RUN: --target=riscv32-esp-elf -march=rv32i -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32I-RTLIB-COMPILERRT-RTTI %s + +// C-RV32I-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32I-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32i/ilp32{{/|\\\\}}crt0.o" +// C-RV32I-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtbegin.o" +// C-RV32I-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/libclang_rt.builtins.a" +// C-RV32I-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ +// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ +// RUN: --target=riscv32-esp-elf -march=rv32imc -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32IMC-RTLIB-COMPILERRT-NORTTI %s + +// C-RV32IMC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32IMC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imc/ilp32/no-rtti{{/|\\\\}}crt0.o" +// C-RV32IMC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtbegin.o" +// C-RV32IMC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/libclang_rt.builtins.a" +// C-RV32IMC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ +// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ +// RUN: --target=riscv32-esp-elf -march=rv32imc -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32IMC-RTLIB-COMPILERRT-RTTI %s + +// C-RV32IMC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32IMC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imc/ilp32{{/|\\\\}}crt0.o" +// C-RV32IMC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtbegin.o" +// C-RV32IMC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/libclang_rt.builtins.a" +// C-RV32IMC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ +// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ +// RUN: --target=riscv32-esp-elf -march=rv32imac -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32IMAC-RTLIB-COMPILERRT-NORTTI %s + +// C-RV32IMAC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32IMAC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imac/ilp32/no-rtti{{/|\\\\}}crt0.o" +// C-RV32IMAC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtbegin.o" +// C-RV32IMAC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/libclang_rt.builtins.a" +// C-RV32IMAC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ +// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ +// RUN: --target=riscv32-esp-elf -march=rv32imac -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32IMAC-RTLIB-COMPILERRT-RTTI %s + +// C-RV32IMAC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32IMAC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imac/ilp32{{/|\\\\}}crt0.o" +// C-RV32IMAC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtbegin.o" +// C-RV32IMAC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/libclang_rt.builtins.a" +// C-RV32IMAC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ +// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ +// RUN: --target=riscv32-esp-elf -march=rv32imafc -mabi=ilp32f --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI %s + +// C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imafc/ilp32f/no-rtti{{/|\\\\}}crt0.o" +// C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtbegin.o" +// C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/libclang_rt.builtins.a" +// C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ +// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ +// RUN: --target=riscv32-esp-elf -march=rv32imafc -mabi=ilp32f --rtlib=compiler-rt -fuse-ld= 2>&1 \ +// RUN: | FileCheck -check-prefix=C-RV32IMAFC-RTLIB-COMPILERRT-RTTI %s + +// C-RV32IMAFC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" +// C-RV32IMAFC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imafc/ilp32f{{/|\\\\}}crt0.o" +// C-RV32IMAFC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtbegin.o" +// C-RV32IMAFC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/libclang_rt.builtins.a" +// C-RV32IMAFC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtend.o" diff --git a/clang/test/Driver/xtensa-esp-toolchain-extra.c b/clang/test/Driver/xtensa-esp-toolchain-extra.c new file mode 100644 index 0000000000000..5e195019820d4 --- /dev/null +++ b/clang/test/Driver/xtensa-esp-toolchain-extra.c @@ -0,0 +1,111 @@ +// A basic clang -cc1 command-line, and simple environment check. + +// The tests here are similar to those in xtensa-toolchain.c, however +// these tests need to create symlinks to test directory trees in order to +// set up the environment and therefore shell support is required. +// REQUIRES: shell, xtensa-registered-target +// UNSUPPORTED: system-windows + +// Compiler-rt multilibs are located at '$INSTALLDIR/lib/clang/15.0.0//mcpu/'. +// At this moment multilib feature for compiler-rt is supported only when GCC installation with the same multilib structure is found. +// It is safe because ESP toolchain still depends on libstdc++ which is part of GCC installation. +// When libc++ wil be supported by toolchain the dependency on GCC multilibs will be removed. + +// RUN: rm -rf %t +// RUN: mkdir -p %t/multilib_xtensa_tree/bin +// RUN: ln -s %clang %t/multilib_xtensa_tree/bin/clang +// RUN: ln -s %S/Inputs/multilib_xtensa_tree/bin/xtensa-esp32-elf-ld %t/multilib_xtensa_tree/bin/xtensa-esp32-elf-ld +// RUN: ln -s %S/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld %t/multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld +// RUN: ln -s %S/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld %t/multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld +// RUN: ln -s %S/Inputs/multilib_xtensa_tree/lib %t/multilib_xtensa_tree/lib +// RUN: ln -s %S/Inputs/multilib_xtensa_tree/xtensa-esp32-elf %t/multilib_xtensa_tree/xtensa-esp32-elf +// RUN: ln -s %S/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf %t/multilib_xtensa_tree/xtensa-esp32s2-elf +// RUN: ln -s %S/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf %t/multilib_xtensa_tree/xtensa-esp32s3-elf + +// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ +// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ +// RUN: --target=xtensa-esp-elf -mcpu=esp32 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-RTLIB-COMPILERRT-NORTTI %s + +// C-XTENSA-ESP32-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtbegin.o" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/libclang_rt.builtins.a" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ +// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ +// RUN: --target=xtensa-esp-elf -mcpu=esp32 --rtlib=compiler-rt -fuse-ld= 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-RTLIB-COMPILERRT-RTTI %s + +// C-XTENSA-ESP32-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtbegin.o" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/libclang_rt.builtins.a" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ +// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ +// RUN: --target=xtensa-esp-elf -mcpu=esp32 --rtlib=compiler-rt -fuse-ld= -mfix-esp32-psram-cache-issue -fno-rtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-NORTTI %s + +// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtbegin.o" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/libclang_rt.builtins.a" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ +// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ +// RUN: --target=xtensa-esp-elf -mcpu=esp32 --rtlib=compiler-rt -fuse-ld= -mfix-esp32-psram-cache-issue 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-RTTI %s + +// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-RTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtbegin.o" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/libclang_rt.builtins.a" +// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ +// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ +// RUN: --target=xtensa-esp-elf -mcpu=esp32s2 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S2-RTLIB-COMPILERRT-NORTTI %s + +// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s2-elf-ld" +// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtbegin.o" +// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/libclang_rt.builtins.a" +// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ +// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ +// RUN: --target=xtensa-esp-elf -mcpu=esp32s2 --rtlib=compiler-rt -fuse-ld= 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S2-RTLIB-COMPILERRT-RTTI %s + +// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s2-elf-ld" +// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtbegin.o" +// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/libclang_rt.builtins.a" +// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ +// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ +// RUN: --target=xtensa-esp-elf -mcpu=esp32s3 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S3-RTLIB-COMPILERRT-NORTTI %s + +// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" +// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtbegin.o" +// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/libclang_rt.builtins.a" +// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtend.o" + +// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ +// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ +// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ +// RUN: --target=xtensa-esp-elf -mcpu=esp32s3 --rtlib=compiler-rt -fuse-ld= 2>&1 \ +// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S3-RTLIB-COMPILERRT-RTTI %s + +// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" +// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtbegin.o" +// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/libclang_rt.builtins.a" +// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtend.o" From 0f3f8d0e2276c2a8ad503b2b32dce373bed8d850 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 25 Mar 2024 15:02:22 +0300 Subject: [PATCH 132/289] toolchain/xtensa: Use GNU linker when no GCC installation is detected --- clang/lib/Driver/ToolChains/Xtensa.cpp | 35 ++++++++++++++++++++------ clang/lib/Driver/ToolChains/Xtensa.h | 2 +- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 2a00dc6e9ef40..2ca09f89ff82c 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -30,8 +30,6 @@ using namespace clang::driver::toolchains; using namespace clang; using namespace llvm::opt; -using tools::addMultilibFlag; - /// Xtensa Toolchain XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, const ArgList &Args) @@ -112,6 +110,16 @@ XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, llvm::sys::path::append(SysRoot, "lib"); getFilePaths().push_back(SysRoot.c_str()); } + + if (getTriple().getVendor() == llvm::Triple::Espressif) { + StringRef CpuName = GetTargetCPUVersion(Args, Triple); + + // TODO: need to detect multilibs when GCC installation is not available + addEspMultilibsPaths(D, Multilibs, SelectedMultilibs.back(), CpuName, + D.Dir, getLibraryPaths()); + addEspMultilibsPaths(D, Multilibs, SelectedMultilibs.back(), CpuName, + D.Dir, getFilePaths()); + } } Tool *XtensaToolChain::buildLinker() const { @@ -201,12 +209,17 @@ XtensaToolChain::GetUnwindLibType(const llvm::opt::ArgList &Args) const { return ToolChain::UNW_None; } -const StringRef XtensaToolChain::GetTargetCPUVersion(const ArgList &Args) { +const StringRef XtensaToolChain::GetTargetCPUVersion(const ArgList &Args, const llvm::Triple &Triple) { + StringRef CPUName; if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) { - StringRef CPUName = A->getValue(); - return CPUName; + CPUName = A->getValue(); + } else if (Triple.getVendor() == llvm::Triple::Espressif) { + // 'esp32' is default for 'xtensa-esp-xxx' targets, + // for generic 'xtensa' target CPU should be always specified explicitly with '-mcpu' + CPUName = "esp32"; + } - return "esp32"; + return CPUName; } void tools::xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, @@ -268,11 +281,17 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, bool LinkerIsLLD; std::string LinkerPath = ToolChain.GetLinkerPath(&LinkerIsLLD); - if (ToolChain.GCCToolchainName != "") { - if (!LinkerIsLLD) { + if (!LinkerIsLLD) { + if (ToolChain.GCCToolchainName != "") { Linker.assign(ToolChain.GCCToolchainDir); llvm::sys::path::append( Linker, "bin", ToolChain.GCCToolchainName + "-" + getShortName()); + } else if (ToolChain.getTriple().getVendor() == llvm::Triple::Espressif) { + // ESP workaround, if there is no GCC installation we need to use xtensa-espXX-elf prefix for ld. + // so guess it basing on selected mcpu + Linker.assign(ToolChain.getDriver().Dir); + llvm::sys::path::append( + Linker, "xtensa-" + ToolChain.GetTargetCPUVersion(Args, ToolChain.getTriple()) + "-elf-" + getShortName()); } else { Linker.assign(LinkerPath); } diff --git a/clang/lib/Driver/ToolChains/Xtensa.h b/clang/lib/Driver/ToolChains/Xtensa.h index bef3883742db5..d7b68e4c10782 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.h +++ b/clang/lib/Driver/ToolChains/Xtensa.h @@ -41,7 +41,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaToolChain : public Generic_ELF { return (IsIntegratedAsm || (GCCToolchainName == "")); } - static const StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args); + static const StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args, const llvm::Triple &Triple); bool IsIntegratedAsm = true; std::string GCCLibAndIncVersion = ""; From cc4282f11f9db9a57ad71dbe60f062f0484d0e52 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:27 +0300 Subject: [PATCH 133/289] tooclahin/xtensa: Add crt0.o to linker command line automatically --- clang/lib/Driver/ToolChains/Xtensa.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 2ca09f89ff82c..0b40155eb2e77 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -315,7 +315,8 @@ void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, if (WantCRTs) { // TODO: The crt0.o is not used for esp targets, but maybe used in // future for other vendors - // CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); + if (ToolChain.getTriple().getVendor() != llvm::Triple::Espressif) + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin))); } From 36afc3bfac15f5dab0fffc45778d6c9fb4790f6c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:27 +0300 Subject: [PATCH 134/289] [Xtensa] Implement __ieee754_sqrtf builtin --- compiler-rt/lib/builtins/CMakeLists.txt | 6 +- .../lib/builtins/xtensa/ieee754_sqrtf.S | 53 ++++++++++++ .../builtins/Unit/xtensa/ieee754_sqrtf_test.c | 81 +++++++++++++++++++ 3 files changed, 139 insertions(+), 1 deletion(-) create mode 100644 compiler-rt/lib/builtins/xtensa/ieee754_sqrtf.S create mode 100644 compiler-rt/test/builtins/Unit/xtensa/ieee754_sqrtf_test.c diff --git a/compiler-rt/lib/builtins/CMakeLists.txt b/compiler-rt/lib/builtins/CMakeLists.txt index df6ccb1ad4007..8dda01105ec8c 100644 --- a/compiler-rt/lib/builtins/CMakeLists.txt +++ b/compiler-rt/lib/builtins/CMakeLists.txt @@ -757,7 +757,11 @@ set(riscv64_SOURCES set(sparc_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) set(sparcv9_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) -set(xtensa_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) +set(xtensa_SOURCES + xtensa/ieee754_sqrtf.S + ${GENERIC_SOURCES} + ${GENERIC_TF_SOURCES} +) set(wasm32_SOURCES ${GENERIC_TF_SOURCES} diff --git a/compiler-rt/lib/builtins/xtensa/ieee754_sqrtf.S b/compiler-rt/lib/builtins/xtensa/ieee754_sqrtf.S new file mode 100644 index 0000000000000..3e5f2a615e67f --- /dev/null +++ b/compiler-rt/lib/builtins/xtensa/ieee754_sqrtf.S @@ -0,0 +1,53 @@ +//===-- ieee754_sqrtf.S - single precision square root --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + + .text + +#ifndef __XTENSA_SOFT_FLOAT__ + .align 4 + .global __ieee754_sqrtf + .type __ieee754_sqrtf, @function +__ieee754_sqrtf: +#ifdef __XTENSA_WINDOWED_ABI__ + entry sp, 16 +#endif + wfr f0, a2 + sqrt0.s f1, f0 + const.s f2, 0 + maddn.s f2, f1, f1 + nexp01.s f3, f0 + const.s f7, 3 + addexp.s f3, f7 + maddn.s f7, f2, f3 + nexp01.s f2, f0 + neg.s f4, f2 + maddn.s f1, f7, f1 + const.s f7, 0 + const.s f5, 0 + const.s f6, 0 + maddn.s f7, f4, f1 + maddn.s f5, f1, f3 + const.s f3, 3 + maddn.s f6, f3, f1 + maddn.s f2, f7, f7 + maddn.s f3, f5, f1 + neg.s f1, f6 + maddn.s f7, f2, f1 + maddn.s f6, f3, f6 + mksadj.s f1, f0 + nexp01.s f0, f0 + maddn.s f0, f7, f7 + neg.s f2, f6 + addexpm.s f7, f1 + addexp.s f2, f1 + divn.s f7, f0, f2 + rfr a2, f7 +#ifdef __XTENSA_WINDOWED_ABI__ + retw +#endif +#endif \ No newline at end of file diff --git a/compiler-rt/test/builtins/Unit/xtensa/ieee754_sqrtf_test.c b/compiler-rt/test/builtins/Unit/xtensa/ieee754_sqrtf_test.c new file mode 100644 index 0000000000000..29a2c168db5ac --- /dev/null +++ b/compiler-rt/test/builtins/Unit/xtensa/ieee754_sqrtf_test.c @@ -0,0 +1,81 @@ +// REQUIRES: xtensa-target-arch +// RUN: %clang_builtins %s %librt -o %t && %run %t +#define SINGLE_PRECISION +#include "fp_lib.h" +#include "int_lib.h" +#include +#include + +#if __xtensa__ +extern float __ieee754_sqrtf(float a); + +uint32_t test_data[32] = { + 0x1f411656, + 0x1f088887, + 0x7fc00000, + 0x7fc00000, + 0x4fb5d274, + 0x7fc00000, + 0x4bb53869, + 0x7fc00000, + 0x4ab511d2, + 0x35350b63, + 0x2a800246, + 0x7fc00000, + 0x426f77ec, + 0x7fc00000, + 0x7fc00000, + 0x541460cd, + 0x39ea0f5a, + 0x2cd13a2c, + 0x7fc00000, + 0x43054444, + 0x3160c8cb, + 0x7fc00000, + 0x4423b1e1, + 0x31f40eb0, + 0x28d6dcdd, + 0x2446a9bc, + 0x22066202, + 0x20e172a9, + 0x204c71ae, + 0x20088887, + 0x1fc11656, + 0x1f888887 +}; + +int test__ieee754_sqrtf(){ + if (__ieee754_sqrtf(-0.00000000) != -0.00000000) + return 1; + if (__ieee754_sqrtf(0.00000000) != 0.00000000) + return 1; + if (!isnan(__ieee754_sqrtf(NAN))) + return 1; + if (__ieee754_sqrtf(INFINITY) != INFINITY) + return 1; + if (__ieee754_sqrtf(4.0) != 2.0) + return 1; + + //Check some simple tests usign precalculated data + uint32_t x = 0x123456; + for (int i = 0; i < 32; i++) { + if (toRep(__ieee754_sqrtf(fromRep(x))) != test_data[i]) + return 1; + x = (x >> 1) | ((x & 1) << 31) ; + } + return 0; +} +#endif + + +int main() +{ +#if __xtensa__ + if (test__ieee754_sqrtf()) + return 1; +#else + printf("skipped\n"); +#endif + + return 0; +} From e9c6c5cc1b2c91161b8103a1fb505ff02f3de4db Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 27 Sep 2023 13:39:24 +0300 Subject: [PATCH 135/289] compiler-rt/tests: Adds specific build options for Espressif targets --- compiler-rt/test/builtins/Unit/lit.cfg.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/compiler-rt/test/builtins/Unit/lit.cfg.py b/compiler-rt/test/builtins/Unit/lit.cfg.py index f63d15919888e..0b6b971bca0d6 100644 --- a/compiler-rt/test/builtins/Unit/lit.cfg.py +++ b/compiler-rt/test/builtins/Unit/lit.cfg.py @@ -104,7 +104,10 @@ def get_libgcc_file_name(): if sys.platform in ["win32"] and execute_external: # Don't pass dosish path separator to msys bash.exe. base_lib = base_lib.replace("\\", "/") - config.substitutions.append(("%librt ", base_lib + " -lc -lm ")) + if config.target_triple in ['xtensa-esp-elf', 'riscv32-esp-elf']: + config.substitutions.append( ("%librt ", "-Wl,--start-group," + base_lib + ',-lm,-lc,--whole-archive,-lgloss,--no-whole-archive,-lc,--whole-archive,-lsys_qemu,--no-whole-archive,--end-group ') ) + else: + config.substitutions.append( ("%librt ", base_lib + ' -lc -lm ') ) builtins_build_crt = get_required_attr(config, "builtins_build_crt") if builtins_build_crt: From b783a7e3a43d976a7b7da6972caa66dc2af7e527 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 25 Mar 2024 17:31:45 +0300 Subject: [PATCH 136/289] compiler-rt/tests: Disable tests failing for Espressif targets --- .../test/builtins/Unit/compiler_rt_fmaxl_test.c | 2 +- compiler-rt/test/builtins/Unit/compiler_rt_logb_test.c | 6 +++++- .../test/builtins/Unit/compiler_rt_logbl_test.c | 2 +- .../test/builtins/Unit/compiler_rt_scalbnf_test.c | 7 +++++++ .../test/builtins/Unit/compiler_rt_scalbnl_test.c | 2 +- compiler-rt/test/builtins/Unit/ctor_dtor.c | 2 ++ compiler-rt/test/builtins/Unit/divsc3_test.c | 9 ++++++++- compiler-rt/test/builtins/Unit/dso_handle.cpp | 2 +- compiler-rt/test/builtins/Unit/extendhfsf2_test.c | 8 ++++++++ compiler-rt/test/builtins/Unit/mulsc3_test.c | 10 +++++++++- compiler-rt/test/builtins/Unit/udivmoddi4_test.c | 3 +++ 11 files changed, 46 insertions(+), 7 deletions(-) diff --git a/compiler-rt/test/builtins/Unit/compiler_rt_fmaxl_test.c b/compiler-rt/test/builtins/Unit/compiler_rt_fmaxl_test.c index 44f75cf0c3c2a..7ab76845a5241 100644 --- a/compiler-rt/test/builtins/Unit/compiler_rt_fmaxl_test.c +++ b/compiler-rt/test/builtins/Unit/compiler_rt_fmaxl_test.c @@ -9,7 +9,7 @@ // Since we are comparing the compiler-rt IEEE implementation against libc's // long double implementation, this test can only succeed if long double // is an IEEE 128-bit floating point number. -#if defined(CRT_HAS_TF_MODE) && defined(CRT_LDBL_IEEE_F128) +#if defined(CRT_HAS_TF_MODE) && defined(CRT_LDBL_IEEE_F128) && !defined(__riscv) int test__compiler_rt_fmaxl(fp_t x, fp_t y) { fp_t crt_value = __compiler_rt_fmaxl(x, y); diff --git a/compiler-rt/test/builtins/Unit/compiler_rt_logb_test.c b/compiler-rt/test/builtins/Unit/compiler_rt_logb_test.c index 2c1be875b0b88..c5610b5413a47 100644 --- a/compiler-rt/test/builtins/Unit/compiler_rt_logb_test.c +++ b/compiler-rt/test/builtins/Unit/compiler_rt_logb_test.c @@ -5,6 +5,8 @@ #include #include "fp_lib.h" +#if !defined(__riscv) + int test__compiler_rt_logb(fp_t x) { #if defined(__ve__) if (fpclassify(x) == FP_SUBNORMAL) @@ -29,6 +31,8 @@ double cases[] = { -0.0, 0.0, 1, -2, 2, -0.5, 0.5, }; +#endif + #ifndef __GLIBC_PREREQ #define __GLIBC_PREREQ(x, y) 0 #endif @@ -37,7 +41,7 @@ int main() { // Do not the run the compiler-rt logb test case if using GLIBC version // < 2.23. Older versions might not compute to the same value as the // compiler-rt value. -#if __GLIBC_PREREQ(2, 23) +#if __GLIBC_PREREQ(2, 23) && !defined(__riscv) const unsigned N = sizeof(cases) / sizeof(cases[0]); unsigned i; for (i = 0; i < N; ++i) { diff --git a/compiler-rt/test/builtins/Unit/compiler_rt_logbl_test.c b/compiler-rt/test/builtins/Unit/compiler_rt_logbl_test.c index f49ce710b0443..53c66f0ca6414 100644 --- a/compiler-rt/test/builtins/Unit/compiler_rt_logbl_test.c +++ b/compiler-rt/test/builtins/Unit/compiler_rt_logbl_test.c @@ -6,7 +6,7 @@ #include #include -#if defined(CRT_HAS_TF_MODE) +#if defined(CRT_HAS_TF_MODE) && !defined(__riscv) int test__compiler_rt_logbl(fp_t x) { # if defined(__ve__) diff --git a/compiler-rt/test/builtins/Unit/compiler_rt_scalbnf_test.c b/compiler-rt/test/builtins/Unit/compiler_rt_scalbnf_test.c index 3ffdde6aed3f2..0a4694864e2df 100644 --- a/compiler-rt/test/builtins/Unit/compiler_rt_scalbnf_test.c +++ b/compiler-rt/test/builtins/Unit/compiler_rt_scalbnf_test.c @@ -8,6 +8,8 @@ #include #include "fp_lib.h" +#if !defined(__xtensa__) && !defined(__riscv) + int test__compiler_rt_scalbnf(const char *mode, fp_t x, int y) { #if defined(__ve__) if (fpclassify(x) == FP_SUBNORMAL) @@ -52,8 +54,10 @@ int iterate_cases(const char *mode) { } return 0; } +#endif int main() { +#if !defined(__xtensa__) && !defined(__riscv) if (iterate_cases("default")) return 1; // Rounding mode tests on supported architectures. __compiler_rt_scalbnf @@ -80,6 +84,9 @@ int main() { fesetround(FE_TONEAREST); if (iterate_cases("FE_TONEAREST")) return 1; #endif +#else + printf("skipped\n"); +#endif return 0; } diff --git a/compiler-rt/test/builtins/Unit/compiler_rt_scalbnl_test.c b/compiler-rt/test/builtins/Unit/compiler_rt_scalbnl_test.c index 0d9bbdfd68e4a..ea8cbc5bbe8bd 100644 --- a/compiler-rt/test/builtins/Unit/compiler_rt_scalbnl_test.c +++ b/compiler-rt/test/builtins/Unit/compiler_rt_scalbnl_test.c @@ -8,7 +8,7 @@ #include #include -#if defined(CRT_HAS_TF_MODE) +#if defined(CRT_HAS_TF_MODE) && !defined(__riscv) int test__compiler_rt_scalbnl(const char *mode, fp_t x, int y) { #if defined(__ve__) diff --git a/compiler-rt/test/builtins/Unit/ctor_dtor.c b/compiler-rt/test/builtins/Unit/ctor_dtor.c index 47560722a9f75..7fa3c8d66d843 100644 --- a/compiler-rt/test/builtins/Unit/ctor_dtor.c +++ b/compiler-rt/test/builtins/Unit/ctor_dtor.c @@ -4,6 +4,8 @@ // RUN: %clang -o %t -no-pie -nostdlib %crt1 %crti %crtbegin %t.o -lc %libgcc %crtend %crtn // RUN: %run %t 2>&1 | FileCheck %s +// UNSUPPORTED: target={{.*-esp-elf.*}} + #include #include diff --git a/compiler-rt/test/builtins/Unit/divsc3_test.c b/compiler-rt/test/builtins/Unit/divsc3_test.c index 870ab6e845385..76e24ea877571 100644 --- a/compiler-rt/test/builtins/Unit/divsc3_test.c +++ b/compiler-rt/test/builtins/Unit/divsc3_test.c @@ -7,6 +7,8 @@ #include #include +// __divsc3 generates LoadStorePIFAddrErrorCause under QEMU +#if !__xtensa__ // Returns: the quotient of (a + ib) / (c + id) @@ -345,9 +347,12 @@ float x[][2] = {INFINITY, INFINITY} }; +#endif int main() { +// __divsc3 generates LoadStorePIFAddrErrorCause under QEMU +#if !__xtensa__ const unsigned N = sizeof(x) / sizeof(x[0]); unsigned i, j; for (i = 0; i < N; ++i) @@ -358,6 +363,8 @@ int main() return 1; } } - +#else + printf("skipped\n"); +#endif return 0; } diff --git a/compiler-rt/test/builtins/Unit/dso_handle.cpp b/compiler-rt/test/builtins/Unit/dso_handle.cpp index 796746992af94..807fd27cfba79 100644 --- a/compiler-rt/test/builtins/Unit/dso_handle.cpp +++ b/compiler-rt/test/builtins/Unit/dso_handle.cpp @@ -6,7 +6,7 @@ // RUN: %clangxx -g -o %t -fno-pic -no-pie -nostdlib %crt1 %crti %crtbegin %t.o %libstdcxx -lc -lm %libgcc %t.so %crtend %crtn // RUN: %run %t 2>&1 | FileCheck %s -// UNSUPPORTED: target={{(arm|aarch64).*}} +// UNSUPPORTED: target={{(arm|aarch64).*}} || target={{.*-esp-elf.*}} #include diff --git a/compiler-rt/test/builtins/Unit/extendhfsf2_test.c b/compiler-rt/test/builtins/Unit/extendhfsf2_test.c index 86150e8fb0d77..dc3cd980e4899 100644 --- a/compiler-rt/test/builtins/Unit/extendhfsf2_test.c +++ b/compiler-rt/test/builtins/Unit/extendhfsf2_test.c @@ -5,6 +5,8 @@ #include "fp_test.h" +#if !defined(__riscv_float_abi_single) + float __extendhfsf2(TYPE_FP16 a); int test__extendhfsf2(TYPE_FP16 a, uint32_t expected) @@ -20,9 +22,11 @@ int test__extendhfsf2(TYPE_FP16 a, uint32_t expected) } char assumption_1[sizeof(TYPE_FP16) * CHAR_BIT == 16] = {0}; +#endif int main() { +#if !defined(__riscv_float_abi_single) // qNaN if (test__extendhfsf2(fromRep16(0x7e00), UINT32_C(0x7fc00000))) @@ -83,5 +87,9 @@ int main() if (test__extendhfsf2(fromRep16(0x7bff), UINT32_C(0x477fe000))) return 1; +#else + printf("skipped\n"); +#endif + return 0; } diff --git a/compiler-rt/test/builtins/Unit/mulsc3_test.c b/compiler-rt/test/builtins/Unit/mulsc3_test.c index e7cac4616a68b..1d405b8922699 100644 --- a/compiler-rt/test/builtins/Unit/mulsc3_test.c +++ b/compiler-rt/test/builtins/Unit/mulsc3_test.c @@ -8,6 +8,9 @@ #include +// __mulsc3 generates LoadStorePIFAddrErrorCause under QEMU +#if !__xtensa__ + // Returns: the product of a + ib and c + id COMPILER_RT_ABI float _Complex @@ -345,9 +348,12 @@ float x[][2] = {INFINITY, INFINITY} }; +#endif int main() { +// __mulsc3 generates LoadStorePIFAddrErrorCause under QEMU +#if !__xtensa__ const unsigned N = sizeof(x) / sizeof(x[0]); unsigned i, j; for (i = 0; i < N; ++i) @@ -358,6 +364,8 @@ int main() return 1; } } - +#else + printf("skipped\n"); +#endif return 0; } diff --git a/compiler-rt/test/builtins/Unit/udivmoddi4_test.c b/compiler-rt/test/builtins/Unit/udivmoddi4_test.c index ed6fea3cd4016..f04a05a3c38c7 100644 --- a/compiler-rt/test/builtins/Unit/udivmoddi4_test.c +++ b/compiler-rt/test/builtins/Unit/udivmoddi4_test.c @@ -213,6 +213,8 @@ du_int tests[][4] = {0x0000000000000001uLL, 0x00000003FFFFFFFDuLL, 0x0000000000000000uLL, 0x0000000000000001uLL}, {0x0000000000000001uLL, 0x00000003FFFFFFFEuLL, 0x0000000000000000uLL, 0x0000000000000001uLL}, {0x0000000000000001uLL, 0x00000003FFFFFFFFuLL, 0x0000000000000000uLL, 0x0000000000000001uLL}, +// huge tests array does not fit into ESP32 memory, so either build fails or qemu crashes +#if !defined(__xtensa__) && !defined(__riscv) {0x0000000000000001uLL, 0x0000001000000000uLL, 0x0000000000000000uLL, 0x0000000000000001uLL}, {0x0000000000000001uLL, 0x0000001000000001uLL, 0x0000000000000000uLL, 0x0000000000000001uLL}, {0x0000000000000001uLL, 0x0000001000000002uLL, 0x0000000000000000uLL, 0x0000000000000001uLL}, @@ -20615,6 +20617,7 @@ du_int tests[][4] = {0xFFFFFFFFFFFFFFFFuLL, 0xFFFFFFFFFFFFFFFDuLL, 0x0000000000000001uLL, 0x0000000000000002uLL}, {0xFFFFFFFFFFFFFFFFuLL, 0xFFFFFFFFFFFFFFFEuLL, 0x0000000000000001uLL, 0x0000000000000001uLL}, {0xFFFFFFFFFFFFFFFFuLL, 0xFFFFFFFFFFFFFFFFuLL, 0x0000000000000001uLL, 0x0000000000000000uLL} +#endif }; int main() From 7fa5100573870040d3b1a6917cbdd0af0d46d2a7 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:28 +0300 Subject: [PATCH 137/289] esp/ci: Update newlib branch --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7323f4fcb7b1c..8156bf468ad5b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -15,7 +15,7 @@ variables: CLANG_VER: "16" GCC_REL_NAME: "esp-2022r1" GCC_REL_VER: "gcc11_2_0" - NEWLIB_REF: "esp-2022r1" + NEWLIB_REF: "esp-4.1.0_20230425" BINUTILS_REF: "esp-2022r1-binutils" XTENSA_OVERLAYS_REF: "master" LLVM_GCC_TESTSUITE_REF: "esp-16.0.0-20230419" From db966e0e06785dd695347a33732703f09feb8d2c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:29 +0300 Subject: [PATCH 138/289] esp/ci: Saves log for 'test_x86_64-linux-gnu' --- .universal-toolchain-release.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index 1f51bcd9d9b39..7f5f0cdfe8a00 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -389,12 +389,19 @@ test_x86_64-linux-gnu: tags: [ "amd64", "build" ] needs: - job: pack_x86_64-linux-gnu + artifacts: + paths: + - ${BUILD_DIR}/tests.log + when: always + expire_in: 1 day variables: PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" script: + - BUILD_PATH=$PWD/$BUILD_DIR + - mkdir -p ${BUILD_PATH} - *get_release_name - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} # getting testsuite @@ -405,7 +412,7 @@ test_x86_64-linux-gnu: # qemu - ./qemu_esp32_install.sh # run testsuite for esp32 - - ./run_esp32_tests.sh + - ./run_esp32_tests.sh 2>&1 > ${BUILD_PATH}/tests.log .macos_codesign: &macos_codesign stage: sign From 43f8b2280eb0a7ada47536bc041dd10c4c017cda Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:29 +0300 Subject: [PATCH 139/289] esp/ci: Update 'llvm-xtensa-testsuite' ref --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8156bf468ad5b..be587f94940d5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -18,7 +18,7 @@ variables: NEWLIB_REF: "esp-4.1.0_20230425" BINUTILS_REF: "esp-2022r1-binutils" XTENSA_OVERLAYS_REF: "master" - LLVM_GCC_TESTSUITE_REF: "esp-16.0.0-20230419" + LLVM_GCC_TESTSUITE_REF: "esp-16.0.0-20230425" XTENSA_CLANG_TOOLCHAIN_REF: "esp-15.0.0-20221201" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 From 592ac63f1769ce552a339ad184f8d29c79d591d5 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:30 +0300 Subject: [PATCH 140/289] esp/ci: Update 'xtensa-clang-toolchain' ref --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index be587f94940d5..c12ca71ffe244 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -19,7 +19,7 @@ variables: BINUTILS_REF: "esp-2022r1-binutils" XTENSA_OVERLAYS_REF: "master" LLVM_GCC_TESTSUITE_REF: "esp-16.0.0-20230425" - XTENSA_CLANG_TOOLCHAIN_REF: "esp-15.0.0-20221201" + XTENSA_CLANG_TOOLCHAIN_REF: "esp-16.0.0-20230502" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 PLATFORM_NAME_LINUX: "linux-amd64" From 05093230b76fb4b62626d78e2155e67141cc5022 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:30 +0300 Subject: [PATCH 141/289] esp/ci: Upload only x86_64-linux-gnu distro to HTTP server --- .universal-toolchain-release.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index 7f5f0cdfe8a00..4628e486d6e4c 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -475,11 +475,6 @@ upload_to_http: GIT_STRATEGY: fetch needs: - job: pack_x86_64-linux-gnu - - job: pack_arm-linux-gnueabihf - - job: pack_aarch64-linux-gnu - - job: pack_x86_64-w64-mingw32 - - job: sign_x86_64-apple-darwin - - job: sign_aarch64-apple-darwin before_script: - !reference [.use_ci_tools, script] script: From 464d6c6163619e41d0f4f43cb7ab732ba415c346 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:31 +0300 Subject: [PATCH 142/289] esp/ci: stick to binutils 2.35 --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c12ca71ffe244..8e7756f7bae90 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -16,6 +16,9 @@ variables: GCC_REL_NAME: "esp-2022r1" GCC_REL_VER: "gcc11_2_0" NEWLIB_REF: "esp-4.1.0_20230425" + # TODO: LLVM-248. Upgrade binutils above 2.36 when Clang will be upgraded to >=17.x + # which supports 'zicsr' or 'zifencei' RISCV extensions via '-march=' + # https://www.spinics.net/lists/stable/msg645015.html BINUTILS_REF: "esp-2022r1-binutils" XTENSA_OVERLAYS_REF: "master" LLVM_GCC_TESTSUITE_REF: "esp-16.0.0-20230425" From 83705979820c598d027a3c68d8ff470a678c621f Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 19 Sep 2024 02:18:38 +0300 Subject: [PATCH 143/289] [Xtensa] Fix i8/i16 ABI alignment. Set 32-bit alignmnet for i8 and i16 types. --- clang/lib/Basic/Targets/Xtensa.h | 2 +- llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index a23b28684e709..0e7d054ff47a8 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -49,7 +49,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { WIntType = UnsignedInt; UseZeroLengthBitfieldAlignment = true; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32; - resetDataLayout("e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-i128:128-n32"); + resetDataLayout("e-m:e-p:32:32-i64:64-i128:128-n32"); } void getTargetDefines(const LangOptions &Opts, diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 020ae4a997572..3cb1812c96685 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -34,7 +34,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXtensaTarget() { static std::string computeDataLayout(const Triple &TT, StringRef CPU, const TargetOptions &Options, bool IsLittle) { - std::string Ret = "e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-i128:128-n32"; + std::string Ret = "e-m:e-p:32:32-i64:64-i128:128-n32"; return Ret; } From c0338af88638d3d0a93c361e7641391f04742e69 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:31 +0300 Subject: [PATCH 144/289] esp/toolchain: Adds '-fdata-sections' to newlib target CFLAGS --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8e7756f7bae90..c6253679a43bc 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,7 +22,7 @@ variables: BINUTILS_REF: "esp-2022r1-binutils" XTENSA_OVERLAYS_REF: "master" LLVM_GCC_TESTSUITE_REF: "esp-16.0.0-20230425" - XTENSA_CLANG_TOOLCHAIN_REF: "esp-16.0.0-20230502" + XTENSA_CLANG_TOOLCHAIN_REF: "esp-16.0.0-20230511" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 PLATFORM_NAME_LINUX: "linux-amd64" From 9aeb4f8e1d79a8e62f4afaa927a43fc7ee32635d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 1 Jun 2023 00:43:32 +0300 Subject: [PATCH 145/289] toolchain/esp: Bring 'libgcc' back to the toolchain --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c6253679a43bc..8601675f3b7fd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,7 +22,7 @@ variables: BINUTILS_REF: "esp-2022r1-binutils" XTENSA_OVERLAYS_REF: "master" LLVM_GCC_TESTSUITE_REF: "esp-16.0.0-20230425" - XTENSA_CLANG_TOOLCHAIN_REF: "esp-16.0.0-20230511" + XTENSA_CLANG_TOOLCHAIN_REF: "esp-16.0.0-20230516" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 PLATFORM_NAME_LINUX: "linux-amd64" From 48cdf98e88163b5131f26f8731d71d0dcd22ef7e Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 20:04:29 +0300 Subject: [PATCH 146/289] [Xtensa] Respect srli assembler semantics. --- .../Target/Xtensa/AsmParser/XtensaAsmParser.cpp | 14 ++++++++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 11 ++++++++++- llvm/test/MC/Xtensa/Core/invalid.s | 8 ++++++-- llvm/test/MC/Xtensa/Core/shift.s | 11 ++++++++--- 4 files changed, 38 insertions(+), 6 deletions(-) diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 080dd0521c060..8c1d0d3e4c5e7 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -574,6 +574,20 @@ bool XtensaAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, } break; } + case Xtensa::SRLI: { + uint32_t ImmOp32 = static_cast(Inst.getOperand(2).getImm()); + int64_t Imm = ImmOp32; + if (Imm >= 16 && Imm <= 31) { + MCInst TmpInst; + TmpInst.setLoc(IDLoc); + TmpInst.setOpcode(Xtensa::EXTUI); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(MCOperand::createImm(Imm)); + TmpInst.addOperand(MCOperand::createImm(32 - Imm)); + Inst = TmpInst; + } + } break; default: break; } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index e8a458d25f2f1..bafbe4e68ac7f 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -157,7 +157,7 @@ def SRAI : RRR_Inst<0x00, 0x01, 0x02, (outs AR:$r), (ins AR:$t, uimm5:$sa), let s = sa{3-0}; } -def SRLI : RRR_Inst<0x00, 0x01, 0x04, (outs AR:$r), (ins AR:$t, uimm4:$sa), +def SRLI : RRR_Inst<0x00, 0x01, 0x04, (outs AR:$r), (ins AR:$t, uimm5:$sa), "srli\t$r, $t, $sa", [(set AR:$r, (srl AR:$t, uimm4:$sa))]> { bits<4> sa; @@ -165,6 +165,15 @@ def SRLI : RRR_Inst<0x00, 0x01, 0x04, (outs AR:$r), (ins AR:$t, uimm4:$sa), let s = sa; } +def _SRLI : RRR_Inst<0x00, 0x01, 0x04, (outs AR:$r), (ins AR:$t, uimm4:$sa), + "_srli\t$r, $t, $sa", + [(set AR:$r, (srl AR:$t, uimm4:$sa))]> { + let DecoderNamespace = "Fallback"; + bits<4> sa; + + let s = sa; +} + def SLLI : RRR_Inst<0x00, 0x01, 0x00, (outs AR:$r), (ins AR:$s, shimm1_31:$sa), "slli\t$r, $s, $sa", [(set AR:$r, (shl AR:$s, shimm1_31:$sa))]> { diff --git a/llvm/test/MC/Xtensa/Core/invalid.s b/llvm/test/MC/Xtensa/Core/invalid.s index 7fc7b47db1337..b36f3509ea9bc 100644 --- a/llvm/test/MC/Xtensa/Core/invalid.s +++ b/llvm/test/MC/Xtensa/Core/invalid.s @@ -21,8 +21,12 @@ slli a1, a2, 0 # CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [1, 31] # uimm4 -srli a1, a2, 16 -# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [0, 15] +_srli a1, a2, 16 +# CHECK: :[[#@LINE-1]]:15: error: expected immediate in range [0, 15] + +# uimm5 +srli a1, a2, 32 +# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [0, 31] # uimm5 srai a2, a3, 32 diff --git a/llvm/test/MC/Xtensa/Core/shift.s b/llvm/test/MC/Xtensa/Core/shift.s index 3f9c980ff5554..fbe00dc107d80 100644 --- a/llvm/test/MC/Xtensa/Core/shift.s +++ b/llvm/test/MC/Xtensa/Core/shift.s @@ -41,9 +41,14 @@ src a3, a4, a5 srl a6, a7 # Instruction format RRR -# CHECK-INST: srli a3, a4, 8 -# CHECK: encoding: [0x40,0x38,0x41] -srli a3, a4, 8 +# CHECK-INST: extui a3, a4, 18, 14 +# CHECK: encoding: [0x40,0x32,0xd5] +srli a3, a4, 18 + +# Instruction format RRR +# CHECK-INST: srli a3, a4, 14 +# CHECK: encoding: [0x40,0x3e,0x41] +_srli a3, a4, 14 # Instruction format RRR # CHECK-INST: ssa8l a14 From b57b54cd074f5236086ecb4b2aece513429bfd44 Mon Sep 17 00:00:00 2001 From: Stefan Stipanovic Date: Wed, 28 Jun 2023 10:56:49 +0200 Subject: [PATCH 147/289] [LLD][Xtensa] Recognize bt instruction in lld --- lld/ELF/Arch/Xtensa.cpp | 20 ++++++++++---------- lld/test/ELF/xtensa-reloc.s | 2 ++ 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/lld/ELF/Arch/Xtensa.cpp b/lld/ELF/Arch/Xtensa.cpp index 10d6c3b52d1dc..fef4d2c06b899 100644 --- a/lld/ELF/Arch/Xtensa.cpp +++ b/lld/ELF/Arch/Xtensa.cpp @@ -95,19 +95,19 @@ RelExpr Xtensa::getRelExpr(RelType type, const Symbol &s, } static inline bool isRRI8Branch(uint8_t *loc) { - if ((loc[0] & 0x0f) == 0b0111) { - // instructions: ball, bany, bbc, bbci, bbs, bbsi, beq, bge, bgeu, blt, - // bltu, bnall, bne, bnone + // instructions: ball, bany, bbc, bbci, bbs, bbsi, beq, bge, bgeu, blt, + // bltu, bnall, bne, bnone + if ((loc[0] & 0x0f) == 0b0111) return true; - } - if ((loc[0] & 0b11'1111) == 0b10'0110) { - // instructions: beqi, bgei, bnei, blti + // instructions: beqi, bgei, bnei, blti + if ((loc[0] & 0b11'1111) == 0b10'0110) return true; - } - if ((loc[0] & 0b1011'1111) == 0b1011'0110) { - // instructions: bgeui, bltui + // instructions: bgeui, bltui + if ((loc[0] & 0b1011'1111) == 0b1011'0110) + return true; + // instruction: bt + if ((loc[0] & 0b0111'1111) == 0b0111'0110) return true; - } // some other instruction return false; } diff --git a/lld/test/ELF/xtensa-reloc.s b/lld/test/ELF/xtensa-reloc.s index e14151ae4a814..3e3c4d4508a59 100644 --- a/lld/test/ELF/xtensa-reloc.s +++ b/lld/test/ELF/xtensa-reloc.s @@ -52,6 +52,8 @@ # CHECK: beq a3, a4, . +16 # CHECK-NEXT: ball a3, a4, . +13 # CHECK-NEXT: blt a3, a4, . +10 +# CHECK-NEXT: bt b0, . +7 beq a3, a4, d ball a3, a4, d blt a3, a4, d + bt b0, d From f86c623e536f080047ee23a95fecb86d80959da3 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 20:16:39 +0300 Subject: [PATCH 148/289] [Xtensa] Fix asm parsing of special registers. Fix parsing of the interrupt feature registers. The "interrupt" register mnemonic is used only with rsr instruction, "intset" and "intclear" register mnemonics are used only with wsr instruction. Also fixed "debugcause" and "prid" registers parsing. Fix tryParseRegister function. --- .../Xtensa/AsmParser/XtensaAsmParser.cpp | 84 ++++++++++++++----- .../Disassembler/XtensaDisassembler.cpp | 4 +- .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 18 ++++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 6 +- llvm/test/MC/Xtensa/xtensa-invalid-int.s | 19 +++++ llvm/test/MC/Xtensa/xtensa-valid-int.s | 29 ++++++- 6 files changed, 129 insertions(+), 31 deletions(-) create mode 100644 llvm/test/MC/Xtensa/xtensa-invalid-int.s diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 8c1d0d3e4c5e7..0da7430b0cd22 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -78,6 +78,7 @@ class XtensaAsmParser : public MCTargetAsmParser { ParseStatus parseImmediate(OperandVector &Operands); ParseStatus parseRegister(OperandVector &Operands, + StringRef Mnemonic, bool AllowParens = false, bool SR = false, bool UR = false); ParseStatus parseOperandWithModifier(OperandVector &Operands); @@ -86,10 +87,9 @@ class XtensaAsmParser : public MCTargetAsmParser { bool ParseInstructionWithSR(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands); ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, - SMLoc &EndLoc) override { - return ParseStatus::NoMatch; - } + SMLoc &EndLoc) override; ParseStatus parsePCRelTarget(OperandVector &Operands); + bool checkRegister(StringRef Mnemonic, StringRef RegName, MCRegister RegNo); bool parseLiteralDirective(SMLoc L); bool parseBeginDirective(SMLoc L); bool parseEndDirective(SMLoc L); @@ -721,6 +721,29 @@ ParseStatus XtensaAsmParser::parsePCRelTarget(OperandVector &Operands) { return ParseStatus::Success; } +// Attempts to match Name as a register (either using the default name or +// alternative ABI names), setting RegNo to the matching register. Upon +// failure, returns true and sets RegNo to 0 +static bool matchRegisterNameHelper(MCRegister &RegNo, StringRef Name) { + RegNo = MatchRegisterName(Name); + + if (RegNo == Xtensa::NoRegister) + RegNo = MatchRegisterAltName(Name.lower()); + + if (RegNo == Xtensa::NoRegister) + RegNo = MatchRegisterAltName(Name.upper()); + + return RegNo == Xtensa::NoRegister; +} + +ParseStatus XtensaAsmParser::tryParseRegister(MCRegister &RegNo, + SMLoc &StartLoc, + SMLoc &EndLoc) { + if (parseRegister(RegNo, StartLoc, EndLoc)) + return ParseStatus::NoMatch; + return ParseStatus::Success; +} + bool XtensaAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) { const AsmToken &Tok = getParser().getTok(); @@ -738,12 +761,14 @@ bool XtensaAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc, } ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, + StringRef Mnemonic, bool AllowParens, bool SR, bool UR) { SMLoc FirstS = getLoc(); bool HadParens = false; AsmToken Buf[2]; std::string RegName = ""; + MCRegister RegNo = 0; int64_t Num; bool IsIdentifier = false; @@ -758,8 +783,6 @@ ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, } } - unsigned RegNo = 0; - switch (getLexer().getKind()) { default: return ParseStatus::NoMatch; @@ -797,16 +820,13 @@ ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, RegName = "F64S"; } else RegName = std::to_string(Num); - RegNo = MatchRegisterName(RegName); - if (RegNo == 0) - RegNo = MatchRegisterAltName(RegName); + + matchRegisterNameHelper(RegNo, RegName); break; case AsmToken::Identifier: IsIdentifier = true; RegName = getLexer().getTok().getIdentifier().str(); - RegNo = MatchRegisterName(RegName); - if (RegNo == 0) - RegNo = MatchRegisterAltName(RegName); + matchRegisterNameHelper(RegNo, RegName); break; } @@ -816,7 +836,7 @@ ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, return ParseStatus::NoMatch; } - if (!checkRegister(RegNo)) { + if (!checkRegister(Mnemonic.lower(), RegName, RegNo)) { return ParseStatus::NoMatch; } @@ -891,7 +911,7 @@ bool XtensaAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic, return true; // Attempt to parse token as register - if (parseRegister(Operands, true, SR, UR).isSuccess()) + if (parseRegister(Operands, Mnemonic, true, SR, UR) == MatchOperand_Success) return false; // Attempt to parse token as an immediate @@ -920,15 +940,11 @@ bool XtensaAsmParser::ParseInstructionWithSR(ParseInstructionInfo &Info, Operands.push_back(XtensaOperand::createToken(Name.take_front(3), NameLoc)); StringRef RegName = Name.drop_front(4); - unsigned RegNo = MatchRegisterName(RegName); - - if (RegNo == 0) - RegNo = MatchRegisterAltName(RegName); + MCRegister RegNo = 0; - if (RegNo == 0) - return Error(NameLoc, "invalid register name"); + matchRegisterNameHelper(RegNo, RegName); - if (!checkRegister(RegNo)) { + if (!checkRegister(Name.lower(), RegName, RegNo)) { Error(NameLoc, "invalid register name"); return true; } @@ -1144,7 +1160,8 @@ ParseStatus XtensaAsmParser::parseDirective(AsmToken DirectiveID) { } // Verify SR and UR -bool XtensaAsmParser::checkRegister(unsigned RegNo) { +bool XtensaAsmParser::checkRegister(StringRef Mnemonic, StringRef RegName, + MCRegister RegNo) { StringRef CPU = getSTI().getCPU(); unsigned NumIntLevels = 0; unsigned NumTimers = 0; @@ -1153,6 +1170,8 @@ bool XtensaAsmParser::checkRegister(unsigned RegNo) { bool IsESP32S2 = false; bool IsESP32S3 = false; bool Res = true; + bool IsWSR = Mnemonic.starts_with("wsr"); + bool IsRSR = Mnemonic.starts_with("rsr"); // Assume that CPU is esp32 by default if ((CPU == "esp32") || (CPU == "")) { @@ -1209,11 +1228,14 @@ bool XtensaAsmParser::checkRegister(unsigned RegNo) { case Xtensa::DBREAKA1: case Xtensa::DBREAKC0: case Xtensa::DBREAKC1: - case Xtensa::DEBUGCAUSE: case Xtensa::ICOUNT: case Xtensa::ICOUNTLEVEL: Res = hasDebug(); break; + case Xtensa::DEBUGCAUSE: + Res = hasDebug(); + Res = Res & IsRSR; + break; case Xtensa::ATOMCTL: Res = hasATOMCTL(); break; @@ -1276,9 +1298,23 @@ bool XtensaAsmParser::checkRegister(unsigned RegNo) { break; case Xtensa::PRID: Res = hasPRID(); + Res = Res & IsRSR; + break; + case Xtensa::INTERRUPT: + // INTSET mnemonic is wrtite-only + // INTERRUPT mnemonic is read-only + if (RegName.starts_with("intset")) { + if (!IsWSR) + Res = false; + } else if (!IsRSR) { + Res = false; + } + Res = Res & hasInterrupt(); break; - case Xtensa::INTSET: case Xtensa::INTCLEAR: + Res = hasInterrupt(); + Res = Res & IsWSR; + break; case Xtensa::INTENABLE: Res = hasInterrupt(); break; @@ -1307,6 +1343,8 @@ bool XtensaAsmParser::checkRegister(unsigned RegNo) { case Xtensa::F64S: Res = hasDFPAccel(); break; + case Xtensa::NoRegister: + Res = false; } return Res; diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index a55a13340075b..e2974da88a441 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -275,7 +275,7 @@ bool CheckRegister(unsigned RegNo, MCSubtargetInfo STI) { case Xtensa::PRID: Res = STI.getFeatureBits()[Xtensa::FeaturePRID]; break; - case Xtensa::INTSET: + case Xtensa::INTERRUPT: case Xtensa::INTCLEAR: case Xtensa::INTENABLE: Res = STI.getFeatureBits()[Xtensa::FeatureInterrupt]; @@ -335,7 +335,7 @@ static const unsigned SRDecoderTable[] = { Xtensa::EXCSAVE2, 210, Xtensa::EXCSAVE3, 211, Xtensa::EXCSAVE4, 212, Xtensa::EXCSAVE5, 213, Xtensa::EXCSAVE6, 214, Xtensa::EXCSAVE7, 215, - Xtensa::CPENABLE, 224, Xtensa::INTSET, 226, + Xtensa::CPENABLE, 224, Xtensa::INTERRUPT, 226, Xtensa::INTCLEAR, 227, Xtensa::INTENABLE, 228, Xtensa::PS, 230, Xtensa::VECBASE, 231, Xtensa::EXCCAUSE, 232, Xtensa::DEBUGCAUSE, 233, diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index 0a0d298ad267a..85db697c724d6 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -12,6 +12,7 @@ // //===----------------------------------------------------------------------===// +#include "XtensaInstrInfo.h" #include "XtensaInstPrinter.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/MC/MCExpr.h" @@ -70,6 +71,23 @@ void XtensaInstPrinter::printOperand(const MCOperand &MC, raw_ostream &O) { void XtensaInstPrinter::printInst(const MCInst *MI, uint64_t Address, StringRef Annot, const MCSubtargetInfo &STI, raw_ostream &O) { + unsigned Opcode = MI->getOpcode(); + + switch (Opcode) { + case Xtensa::WSR: { + // INTERRUPT mnemonic is read-only, so use INTSET mnemonic instead + Register SR = MI->getOperand(0).getReg(); + if (SR == Xtensa::INTERRUPT) { + Register Reg = MI->getOperand(1).getReg(); + O << '\t' << "wsr" << '\t'; + printRegName(O, Reg); + O << ", " + << "intset"; + printAnnotation(O, Annot); + return; + } + } + } printInstruction(MI, Address, O); printAnnotation(O, Annot); } diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index 93e67af82fc86..73b4a3cd00b58 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -163,7 +163,7 @@ def EXCSAVE7 : SRReg<215, "excsave7", ["EXCSAVE7", "215"]>; def CPENABLE : SRReg<224, "cpenable", ["CPENABLE", "224"]>; // Interrupt enable mask register -def INTSET : SRReg<226, "interrupt", ["INTERRUPT", "226"]>; +def INTERRUPT : SRReg<226, "interrupt", ["INTERRUPT", "INTSET", "226"]>; def INTCLEAR : SRReg<227, "intclear", ["INTCLEAR", "227"]>; @@ -213,8 +213,8 @@ def SR : RegisterClass<"Xtensa", [i32], 32, (add WINDOWBASE, WINDOWSTART, IBREAKENABLE, MEMCTL, ATOMCTL, DDR, IBREAKA0, IBREAKA1, DBREAKA0, DBREAKA1, DBREAKC0, DBREAKC1, CONFIGID0, EPC1, EPC2, EPC3, EPC4, EPC5, EPC6, EPC7, DEPC, EPS2, EPS3, EPS4, EPS5, EPS6, EPS7, CONFIGID1, EXCSAVE1, EXCSAVE2, - EXCSAVE3, EXCSAVE4, EXCSAVE5, EXCSAVE6, EXCSAVE7, CPENABLE, INTSET, INTCLEAR, INTENABLE, PS, - VECBASE, EXCCAUSE, DEBUGCAUSE, CCOUNT, PRID, ICOUNT, ICOUNTLEVEL, EXCVADDR, CCOMPARE0, + EXCSAVE3, EXCSAVE4, EXCSAVE5, EXCSAVE6, EXCSAVE7, CPENABLE, INTERRUPT, INTCLEAR, INTENABLE, + PS, VECBASE, EXCCAUSE, DEBUGCAUSE, CCOUNT, PRID, ICOUNT, ICOUNTLEVEL, EXCVADDR, CCOMPARE0, CCOMPARE1, CCOMPARE2, MISC0, MISC1, MISC2, MISC3)>; //===----------------------------------------------------------------------===// diff --git a/llvm/test/MC/Xtensa/xtensa-invalid-int.s b/llvm/test/MC/Xtensa/xtensa-invalid-int.s new file mode 100644 index 0000000000000..2a95c9ccd6810 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-invalid-int.s @@ -0,0 +1,19 @@ +# RUN: not llvm-mc -triple xtensa %s 2>&1 | FileCheck %s + +.align 4 +LBL0: + +rsr a0, intclear +# CHECK: :[[#@LINE-1]]:9: error: invalid operand for instruction + +rsr a0, intset +# CHECK: :[[#@LINE-1]]:9: error: invalid operand for instruction + +wsr a1, interrupt +# CHECK: :[[#@LINE-1]]:9: error: invalid operand for instruction + +xsr a1, intset +# CHECK: :[[#@LINE-1]]:9: error: invalid operand for instruction + +xsr a1, interrupt +# CHECK: :[[#@LINE-1]]:9: error: invalid operand for instruction diff --git a/llvm/test/MC/Xtensa/xtensa-valid-int.s b/llvm/test/MC/Xtensa/xtensa-valid-int.s index a24191ef4aa5a..68d1f24dc4ea8 100644 --- a/llvm/test/MC/Xtensa/xtensa-valid-int.s +++ b/llvm/test/MC/Xtensa/xtensa-valid-int.s @@ -1,7 +1,6 @@ # RUN: llvm-mc %s -triple=xtensa -mattr=+interrupt -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s - .align 4 LBL0: @@ -12,7 +11,31 @@ LBL0: # CHECK-INST: rsil a3, 1 # CHECK: encoding: [0x30,0x61,0x00] rsil a3, 1 - + +# CHECK-INST: rsr a8, interrupt +# CHECK: encoding: [0x80,0xe2,0x03] +rsr a8, interrupt + # CHECK-INST: waiti 1 # CHECK: encoding: [0x00,0x71,0x00] - waiti 1 \ No newline at end of file + waiti 1 + +# CHECK-INST: wsr a0, intclear +# CHECK: encoding: [0x00,0xe3,0x13] + wsr a0, intclear + +# CHECK-INST: wsr a0, intclear +# CHECK: encoding: [0x00,0xe3,0x13] + wsr.intclear a0 + +# CHECK-INST: wsr a0, intset +# CHECK: encoding: [0x00,0xe2,0x13] + wsr a0, intset + +# CHECK-INST: wsr a0, intset +# CHECK: encoding: [0x00,0xe2,0x13] + wsr.intset a0 + +# CHECK-INST: wsr a0, intset +# CHECK: encoding: [0x00,0xe2,0x13] + wsr.INTSET a0 From 52f1d86e30cd63f1db8084a9d04a36c56c58219d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 23:33:20 +0300 Subject: [PATCH 149/289] [Xtensa] Implement ESP32 S3 DSP instructions. --- clang/include/clang/Basic/BuiltinsXtensa.def | 3 + .../clang/Basic/BuiltinsXtensaESP32S3.def | 269 + clang/lib/Sema/SemaXtensa.cpp | 228 + .../CodeGen/Xtensa/xtensa-ee-intrinsics.c | 514 ++ llvm/include/llvm/IR/IntrinsicsXtensa.td | 4 + .../llvm/IR/IntrinsicsXtensaESP32S3.td | 767 +++ .../Xtensa/AsmParser/XtensaAsmParser.cpp | 83 +- llvm/lib/Target/Xtensa/CMakeLists.txt | 1 + .../Disassembler/XtensaDisassembler.cpp | 121 + .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 129 + .../Xtensa/MCTargetDesc/XtensaInstPrinter.h | 11 + .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 187 +- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 9 +- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 6 +- llvm/lib/Target/Xtensa/XtensaInstrFormats.td | 12 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 2 + llvm/lib/Target/Xtensa/XtensaOperands.td | 83 + llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 27 +- .../lib/Target/Xtensa/XtensaS3DSPInstrInfo.td | 5659 +++++++++++++++++ .../Target/Xtensa/XtensaS3ISelLowering.cpp | 5262 +++++++++++++++ .../test/CodeGen/Xtensa/ee-intrinsics-loop.ll | 148 + llvm/test/CodeGen/Xtensa/xtensa-s3-dsp.ll | 764 +++ llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s | 508 +- 23 files changed, 14785 insertions(+), 12 deletions(-) create mode 100644 clang/include/clang/Basic/BuiltinsXtensaESP32S3.def create mode 100644 clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c create mode 100644 llvm/include/llvm/IR/IntrinsicsXtensaESP32S3.td create mode 100644 llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td create mode 100644 llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp create mode 100644 llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-s3-dsp.ll diff --git a/clang/include/clang/Basic/BuiltinsXtensa.def b/clang/include/clang/Basic/BuiltinsXtensa.def index b00c7bd112611..97366c76e97d8 100644 --- a/clang/include/clang/Basic/BuiltinsXtensa.def +++ b/clang/include/clang/Basic/BuiltinsXtensa.def @@ -124,4 +124,7 @@ BUILTIN(__builtin_xtensa_wsr_m3, "vUi", "n") BUILTIN(__builtin_xtensa_rsr_m3, "Ui", "n") BUILTIN(__builtin_xtensa_xsr_m3, "vUi*", "n") +// generated code +#include "clang/Basic/BuiltinsXtensaESP32S3.def" + #undef BUILTIN diff --git a/clang/include/clang/Basic/BuiltinsXtensaESP32S3.def b/clang/include/clang/Basic/BuiltinsXtensaESP32S3.def new file mode 100644 index 0000000000000..efe6ee3404d86 --- /dev/null +++ b/clang/include/clang/Basic/BuiltinsXtensaESP32S3.def @@ -0,0 +1,269 @@ +//=== BuiltinsXtensaESP32S3.def - Xtensa Builtin function database -*- C++ -*-// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the Xtensa-specific builtin function database. Users of +// this file must define the BUILTIN macro to make use of this information. +// +// Automatically generated file, do not edit! +// +//===----------------------------------------------------------------------===// + +// The format of this database matches clang/Basic/Builtins.def. + +BUILTIN(__builtin_xtensa_ee_andq, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_bitrev, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_cmul_s16, "vUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_cmul_s16_ld_incp, "vUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_cmul_s16_st_incp, "vUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_fft_ams_s16_ld_incp, "vUiUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_fft_ams_s16_ld_incp_uaup, "vUiUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_fft_ams_s16_ld_r32_decp, "vUiUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_fft_ams_s16_st_incp, "vUiUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_fft_cmul_s16_ld_xp, "vUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_fft_cmul_s16_st_xp, "vUiUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_fft_r2bf_s16, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_fft_r2bf_s16_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_fft_vst_r32_decp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_ldf_128_ip, "vffffUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ldf_128_xp, "vffffUiUi", "n") +BUILTIN(__builtin_xtensa_ee_ldf_64_ip, "vffUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ldf_64_xp, "vffUiUi", "n") +BUILTIN(__builtin_xtensa_ee_ldqa_s16_128_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ldqa_s16_128_xp, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_ldqa_s8_128_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ldqa_s8_128_xp, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_ldqa_u16_128_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ldqa_u16_128_xp, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_ldqa_u8_128_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ldqa_u8_128_xp, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_ldxq_32, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_ld_128_usar_ip, "vUiUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ld_128_usar_xp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_ld_accx_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ld_qacc_h_h_32_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ld_qacc_h_l_128_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ld_qacc_l_h_32_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ld_qacc_l_l_128_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_ld_ua_state_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_movi_32_a, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_movi_32_q, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_mov_s16_qacc, "vUi", "n") +BUILTIN(__builtin_xtensa_ee_mov_s8_qacc, "vUi", "n") +BUILTIN(__builtin_xtensa_ee_mov_u16_qacc, "vUi", "n") +BUILTIN(__builtin_xtensa_ee_mov_u8_qacc, "vUi", "n") +BUILTIN(__builtin_xtensa_ee_notq, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_orq, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_slci_2q, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_slcxxp_2q, "vUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_srci_2q, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_srcmb_s16_qacc, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_srcmb_s8_qacc, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_srcq_128_st_incp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_srcxxp_2q, "vUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_src_q, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_src_q_ld_ip, "vUiUiIiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_src_q_ld_xp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_src_q_qup, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_srs_accx, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_stf_128_ip, "vffffUiIi", "n") +BUILTIN(__builtin_xtensa_ee_stf_128_xp, "vffffUiUi", "n") +BUILTIN(__builtin_xtensa_ee_stf_64_ip, "vffUiIi", "n") +BUILTIN(__builtin_xtensa_ee_stf_64_xp, "vffUiUi", "n") +BUILTIN(__builtin_xtensa_ee_stxq_32, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_st_accx_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_st_qacc_h_h_32_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_st_qacc_h_l_128_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_st_qacc_l_h_32_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_st_qacc_l_l_128_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_st_ua_state_ip, "vUiIi", "n") +BUILTIN(__builtin_xtensa_ee_vadds_s16, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vadds_s16_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vadds_s16_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vadds_s32, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vadds_s32_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vadds_s32_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vadds_s8, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vadds_s8_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vadds_s8_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vcmp_eq_s16, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vcmp_eq_s32, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vcmp_eq_s8, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vcmp_gt_s16, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vcmp_gt_s32, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vcmp_gt_s8, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vcmp_lt_s16, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vcmp_lt_s32, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vcmp_lt_s8, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vldbc_16, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vldbc_16_ip, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vldbc_16_xp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vldbc_32, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vldbc_32_ip, "vUiUiIi", "n") +BUILTIN(__builtin_xtensa_ee_vldbc_32_xp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vldbc_8, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vldbc_8_ip, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vldbc_8_xp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vldhbc_16_incp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vld_128_ip, "vUiUiIi", "n") +BUILTIN(__builtin_xtensa_ee_vld_128_xp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vld_h_64_ip, "vUiUiIi", "n") +BUILTIN(__builtin_xtensa_ee_vld_h_64_xp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vld_l_64_ip, "vUiUiIi", "n") +BUILTIN(__builtin_xtensa_ee_vld_l_64_xp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmax_s16, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmax_s16_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmax_s16_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmax_s32, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmax_s32_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmax_s32_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmax_s8, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmax_s8_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmax_s8_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmin_s16, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmin_s16_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmin_s16_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmin_s32, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmin_s32_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmin_s32_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmin_s8, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmin_s8_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmin_s8_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_accx, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_accx_ld_ip, "vUiUiIiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_accx_ld_ip_qup, "vUiUiIiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_accx_ld_xp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_accx_ld_xp_qup, "vUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_qacc, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_qacc_ldbc_incp, "vUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_qacc_ldbc_incp_qup, "vUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_qacc_ld_ip, "vUiUiIiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_qacc_ld_ip_qup, "vUiUiIiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_qacc_ld_xp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s16_qacc_ld_xp_qup, "vUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_accx, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_accx_ld_ip, "vUiUiIiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_accx_ld_ip_qup, "vUiUiIiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_accx_ld_xp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_accx_ld_xp_qup, "vUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_qacc, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_qacc_ldbc_incp, "vUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_qacc_ldbc_incp_qup, "vUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_qacc_ld_ip, "vUiUiIiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_qacc_ld_ip_qup, "vUiUiIiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_qacc_ld_xp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_s8_qacc_ld_xp_qup, "vUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_accx, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_accx_ld_ip, "vUiUiIiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_accx_ld_ip_qup, "vUiUiIiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_accx_ld_xp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_accx_ld_xp_qup, "vUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_qacc, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_qacc_ldbc_incp, "vUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_qacc_ldbc_incp_qup, "vUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_qacc_ld_ip, "vUiUiIiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_qacc_ld_ip_qup, "vUiUiIiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_qacc_ld_xp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u16_qacc_ld_xp_qup, "vUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_accx, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_accx_ld_ip, "vUiUiIiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_accx_ld_ip_qup, "vUiUiIiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_accx_ld_xp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_accx_ld_xp_qup, "vUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_qacc, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_qacc_ldbc_incp, "vUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_qacc_ldbc_incp_qup, "vUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_qacc_ld_ip, "vUiUiIiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_qacc_ld_ip_qup, "vUiUiIiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_qacc_ld_xp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmulas_u8_qacc_ld_xp_qup, "vUiUiUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_s16, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_s16_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_s16_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_s8, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_s8_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_s8_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_u16, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_u16_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_u16_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_u8, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_u8_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vmul_u8_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vprelu_s16, "vUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vprelu_s8, "vUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vrelu_s16, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vrelu_s8, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsl_32, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsmulas_s16_qacc, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsmulas_s16_qacc_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsmulas_s8_qacc, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsmulas_s8_qacc_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsr_32, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vst_128_ip, "vUiUiIi", "n") +BUILTIN(__builtin_xtensa_ee_vst_128_xp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vst_h_64_ip, "vUiUiIi", "n") +BUILTIN(__builtin_xtensa_ee_vst_h_64_xp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vst_l_64_ip, "vUiUiIi", "n") +BUILTIN(__builtin_xtensa_ee_vst_l_64_xp, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsubs_s16, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsubs_s16_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsubs_s16_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsubs_s32, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsubs_s32_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsubs_s32_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsubs_s8, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsubs_s8_ld_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vsubs_s8_st_incp, "vUiUiUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vunzip_16, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vunzip_32, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vunzip_8, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vzip_16, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vzip_32, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_vzip_8, "vUiUi", "n") +BUILTIN(__builtin_xtensa_ee_xorq, "vUiUiUi", "n") +BUILTIN(__builtin_xtensa_ee_zero_accx, "v", "n") +BUILTIN(__builtin_xtensa_ee_zero_q, "vUi", "n") +BUILTIN(__builtin_xtensa_ee_zero_qacc, "v", "n") +BUILTIN(__builtin_xtensa_rur_accx_0, "i", "n") +BUILTIN(__builtin_xtensa_rur_accx_1, "i", "n") +BUILTIN(__builtin_xtensa_rur_fft_bit_width, "i", "n") +BUILTIN(__builtin_xtensa_rur_gpio_out, "i", "n") +BUILTIN(__builtin_xtensa_rur_qacc_h_0, "i", "n") +BUILTIN(__builtin_xtensa_rur_qacc_h_1, "i", "n") +BUILTIN(__builtin_xtensa_rur_qacc_h_2, "i", "n") +BUILTIN(__builtin_xtensa_rur_qacc_h_3, "i", "n") +BUILTIN(__builtin_xtensa_rur_qacc_h_4, "i", "n") +BUILTIN(__builtin_xtensa_rur_qacc_l_0, "i", "n") +BUILTIN(__builtin_xtensa_rur_qacc_l_1, "i", "n") +BUILTIN(__builtin_xtensa_rur_qacc_l_2, "i", "n") +BUILTIN(__builtin_xtensa_rur_qacc_l_3, "i", "n") +BUILTIN(__builtin_xtensa_rur_qacc_l_4, "i", "n") +BUILTIN(__builtin_xtensa_rur_sar_byte, "i", "n") +BUILTIN(__builtin_xtensa_rur_ua_state_0, "i", "n") +BUILTIN(__builtin_xtensa_rur_ua_state_1, "i", "n") +BUILTIN(__builtin_xtensa_rur_ua_state_2, "i", "n") +BUILTIN(__builtin_xtensa_rur_ua_state_3, "i", "n") +BUILTIN(__builtin_xtensa_wur_accx_0, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_accx_1, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_fft_bit_width, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_gpio_out, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_qacc_h_0, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_qacc_h_1, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_qacc_h_2, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_qacc_h_3, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_qacc_h_4, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_qacc_l_0, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_qacc_l_1, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_qacc_l_2, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_qacc_l_3, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_qacc_l_4, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_sar_byte, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_ua_state_0, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_ua_state_1, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_ua_state_2, "vUi", "n") +BUILTIN(__builtin_xtensa_wur_ua_state_3, "vUi", "n") +BUILTIN(__builtin_xtensa_mv_qr, "vUiUi", "n") diff --git a/clang/lib/Sema/SemaXtensa.cpp b/clang/lib/Sema/SemaXtensa.cpp index b81e4381fc44f..c4b51c8b6fb9e 100644 --- a/clang/lib/Sema/SemaXtensa.cpp +++ b/clang/lib/Sema/SemaXtensa.cpp @@ -93,6 +93,234 @@ bool SemaXtensa::CheckXtensaBuiltinFunctionCall(const TargetInfo &TI, return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) || SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1) || SemaRef.BuiltinConstantArgRange(TheCall, 3, 2, 3); + case Xtensa::BI__builtin_xtensa_ee_andq: + case Xtensa::BI__builtin_xtensa_ee_cmul_s16: + case Xtensa::BI__builtin_xtensa_ee_fft_cmul_s16_st_xp: + case Xtensa::BI__builtin_xtensa_ee_fft_r2bf_s16_st_incp: + case Xtensa::BI__builtin_xtensa_ee_orq: + case Xtensa::BI__builtin_xtensa_ee_src_q: + case Xtensa::BI__builtin_xtensa_ee_src_q_qup: + case Xtensa::BI__builtin_xtensa_ee_vadds_s16: + case Xtensa::BI__builtin_xtensa_ee_vadds_s32: + case Xtensa::BI__builtin_xtensa_ee_vadds_s8: + case Xtensa::BI__builtin_xtensa_ee_vcmp_eq_s16: + case Xtensa::BI__builtin_xtensa_ee_vcmp_eq_s32: + case Xtensa::BI__builtin_xtensa_ee_vcmp_eq_s8: + case Xtensa::BI__builtin_xtensa_ee_vcmp_gt_s16: + case Xtensa::BI__builtin_xtensa_ee_vcmp_gt_s32: + case Xtensa::BI__builtin_xtensa_ee_vcmp_gt_s8: + case Xtensa::BI__builtin_xtensa_ee_vcmp_lt_s16: + case Xtensa::BI__builtin_xtensa_ee_vcmp_lt_s32: + case Xtensa::BI__builtin_xtensa_ee_vcmp_lt_s8: + case Xtensa::BI__builtin_xtensa_ee_vmax_s16: + case Xtensa::BI__builtin_xtensa_ee_vmax_s32: + case Xtensa::BI__builtin_xtensa_ee_vmax_s8: + case Xtensa::BI__builtin_xtensa_ee_vmin_s16: + case Xtensa::BI__builtin_xtensa_ee_vmin_s32: + case Xtensa::BI__builtin_xtensa_ee_vmin_s8: + case Xtensa::BI__builtin_xtensa_ee_vmul_s16: + case Xtensa::BI__builtin_xtensa_ee_vmul_s8: + case Xtensa::BI__builtin_xtensa_ee_vmul_u16: + case Xtensa::BI__builtin_xtensa_ee_vmul_u8: + case Xtensa::BI__builtin_xtensa_ee_vprelu_s16: + case Xtensa::BI__builtin_xtensa_ee_vprelu_s8: + case Xtensa::BI__builtin_xtensa_ee_vsubs_s16: + case Xtensa::BI__builtin_xtensa_ee_vsubs_s32: + case Xtensa::BI__builtin_xtensa_ee_vsubs_s8: + case Xtensa::BI__builtin_xtensa_ee_xorq: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_bitrev: + case Xtensa::BI__builtin_xtensa_ee_fft_vst_r32_decp: + case Xtensa::BI__builtin_xtensa_ee_ld_128_usar_ip: + case Xtensa::BI__builtin_xtensa_ee_ld_128_usar_xp: + case Xtensa::BI__builtin_xtensa_ee_movi_32_a: + case Xtensa::BI__builtin_xtensa_ee_movi_32_q: + case Xtensa::BI__builtin_xtensa_ee_mov_s16_qacc: + case Xtensa::BI__builtin_xtensa_ee_mov_s8_qacc: + case Xtensa::BI__builtin_xtensa_ee_mov_u16_qacc: + case Xtensa::BI__builtin_xtensa_ee_mov_u8_qacc: + case Xtensa::BI__builtin_xtensa_ee_srcmb_s16_qacc: + case Xtensa::BI__builtin_xtensa_ee_srcmb_s8_qacc: + case Xtensa::BI__builtin_xtensa_ee_vldbc_16: + case Xtensa::BI__builtin_xtensa_ee_vldbc_16_ip: + case Xtensa::BI__builtin_xtensa_ee_vldbc_16_xp: + case Xtensa::BI__builtin_xtensa_ee_vldbc_32: + case Xtensa::BI__builtin_xtensa_ee_vldbc_32_ip: + case Xtensa::BI__builtin_xtensa_ee_vldbc_32_xp: + case Xtensa::BI__builtin_xtensa_ee_vldbc_8: + case Xtensa::BI__builtin_xtensa_ee_vldbc_8_ip: + case Xtensa::BI__builtin_xtensa_ee_vldbc_8_xp: + case Xtensa::BI__builtin_xtensa_ee_vld_128_ip: + case Xtensa::BI__builtin_xtensa_ee_vld_128_xp: + case Xtensa::BI__builtin_xtensa_ee_vld_h_64_ip: + case Xtensa::BI__builtin_xtensa_ee_vld_h_64_xp: + case Xtensa::BI__builtin_xtensa_ee_vld_l_64_ip: + case Xtensa::BI__builtin_xtensa_ee_vld_l_64_xp: + case Xtensa::BI__builtin_xtensa_ee_vrelu_s16: + case Xtensa::BI__builtin_xtensa_ee_vrelu_s8: + case Xtensa::BI__builtin_xtensa_ee_vst_128_ip: + case Xtensa::BI__builtin_xtensa_ee_vst_128_xp: + case Xtensa::BI__builtin_xtensa_ee_vst_h_64_ip: + case Xtensa::BI__builtin_xtensa_ee_vst_h_64_xp: + case Xtensa::BI__builtin_xtensa_ee_vst_l_64_ip: + case Xtensa::BI__builtin_xtensa_ee_vst_l_64_xp: + case Xtensa::BI__builtin_xtensa_ee_zero_q: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_cmul_s16_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_cmul_s16_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vadds_s16_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vadds_s16_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vadds_s32_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vadds_s32_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vadds_s8_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vadds_s8_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vmax_s16_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vmax_s16_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vmax_s32_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vmax_s32_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vmax_s8_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vmax_s8_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vmin_s16_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vmin_s16_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vmin_s32_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vmin_s32_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vmin_s8_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vmin_s8_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vmul_s16_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vmul_s16_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vmul_s8_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vmul_s8_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vmul_u16_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vmul_u16_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vmul_u8_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vmul_u8_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vsubs_s16_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vsubs_s16_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vsubs_s32_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vsubs_s32_st_incp: + case Xtensa::BI__builtin_xtensa_ee_vsubs_s8_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vsubs_s8_st_incp: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_fft_ams_s16_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_fft_ams_s16_ld_incp_uaup: + case Xtensa::BI__builtin_xtensa_ee_fft_ams_s16_ld_r32_decp: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 5, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 6, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_fft_ams_s16_st_incp: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 5, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 6, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_fft_cmul_s16_ld_xp: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 5, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_fft_r2bf_s16: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_ldxq_32: + case Xtensa::BI__builtin_xtensa_ee_notq: + case Xtensa::BI__builtin_xtensa_ee_slci_2q: + case Xtensa::BI__builtin_xtensa_ee_slcxxp_2q: + case Xtensa::BI__builtin_xtensa_ee_srci_2q: + case Xtensa::BI__builtin_xtensa_ee_srcq_128_st_incp: + case Xtensa::BI__builtin_xtensa_ee_srcxxp_2q: + case Xtensa::BI__builtin_xtensa_ee_stxq_32: + case Xtensa::BI__builtin_xtensa_ee_vldhbc_16_incp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_accx: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_qacc: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_accx: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_qacc: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_accx: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_qacc: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_accx: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_qacc: + case Xtensa::BI__builtin_xtensa_ee_vsl_32: + case Xtensa::BI__builtin_xtensa_ee_vsmulas_s16_qacc: + case Xtensa::BI__builtin_xtensa_ee_vsmulas_s8_qacc: + case Xtensa::BI__builtin_xtensa_ee_vsr_32: + case Xtensa::BI__builtin_xtensa_ee_vunzip_16: + case Xtensa::BI__builtin_xtensa_ee_vunzip_32: + case Xtensa::BI__builtin_xtensa_ee_vunzip_8: + case Xtensa::BI__builtin_xtensa_ee_vzip_16: + case Xtensa::BI__builtin_xtensa_ee_vzip_32: + case Xtensa::BI__builtin_xtensa_ee_vzip_8: + case Xtensa::BI__builtin_xtensa_mv_qr: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_src_q_ld_ip: + case Xtensa::BI__builtin_xtensa_ee_src_q_ld_xp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_accx_ld_ip: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_accx_ld_xp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_qacc_ld_ip: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_qacc_ld_xp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_accx_ld_ip: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_accx_ld_xp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_qacc_ld_ip: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_qacc_ld_xp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_accx_ld_ip: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_accx_ld_xp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_qacc_ld_ip: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_qacc_ld_xp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_accx_ld_ip: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_accx_ld_xp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_qacc_ld_ip: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_qacc_ld_xp: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_accx_ld_ip_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_accx_ld_xp_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_qacc_ld_ip_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_qacc_ld_xp_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_accx_ld_ip_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_accx_ld_xp_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_qacc_ld_ip_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_qacc_ld_xp_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_accx_ld_ip_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_accx_ld_xp_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_qacc_ld_ip_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_qacc_ld_xp_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_accx_ld_ip_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_accx_ld_xp_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_qacc_ld_ip_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_qacc_ld_xp_qup: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 5, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 6, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_qacc_ldbc_incp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_qacc_ldbc_incp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_qacc_ldbc_incp: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_qacc_ldbc_incp: + case Xtensa::BI__builtin_xtensa_ee_vsmulas_s16_qacc_ld_incp: + case Xtensa::BI__builtin_xtensa_ee_vsmulas_s8_qacc_ld_incp: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7); + case Xtensa::BI__builtin_xtensa_ee_vmulas_s16_qacc_ldbc_incp_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_s8_qacc_ldbc_incp_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u16_qacc_ldbc_incp_qup: + case Xtensa::BI__builtin_xtensa_ee_vmulas_u8_qacc_ldbc_incp_qup: + return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 7) && + SemaRef.BuiltinConstantArgRange(TheCall, 5, 0, 7); } return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u); } diff --git a/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c new file mode 100644 index 0000000000000..c3ce2e107d886 --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c @@ -0,0 +1,514 @@ +// RUN: %clang_cc1 -no-opaque-pointers -triple xtensa -S -emit-llvm -O0 -o - %s \ +// RUN: | FileCheck %s + +#include + +void test() { + // CHECK: void @test() + uint32_t data = 10; + float a, b, c, d; + __builtin_xtensa_ee_andq(0, 4, 3); + // CHECK: call void @llvm.xtensa.ee.andq + __builtin_xtensa_ee_bitrev(5, data); + // CHECK: call void @llvm.xtensa.ee.bitrev + __builtin_xtensa_ee_cmul_s16(6, 1, 1, 1); + // CHECK: call void @llvm.xtensa.ee.cmul.s16 + __builtin_xtensa_ee_cmul_s16_ld_incp(7, data, 3, 4, 7, 1); + // CHECK: call void @llvm.xtensa.ee.cmul.s16.ld.incp + __builtin_xtensa_ee_cmul_s16_st_incp(0, data, 1, 7, 3, 3); + // CHECK: call void @llvm.xtensa.ee.cmul.s16.st.incp + __builtin_xtensa_ee_fft_ams_s16_ld_incp(0, data, 4, 7, 7, 5, 5, 1); + // CHECK: call void @llvm.xtensa.ee.fft.ams.s16.ld.incp + __builtin_xtensa_ee_fft_ams_s16_ld_incp_uaup(1, data, 5, 2, 7, 3, 4, 0); + // CHECK: call void @llvm.xtensa.ee.fft.ams.s16.ld.incp.uaup + __builtin_xtensa_ee_fft_ams_s16_ld_r32_decp(4, data, 2, 4, 5, 0, 0, 1); + // CHECK: call void @llvm.xtensa.ee.fft.ams.s16.ld.r32.decp + __builtin_xtensa_ee_fft_ams_s16_st_incp(4, 5, data, data, 5, 6, 1, 1); + // CHECK: call void @llvm.xtensa.ee.fft.ams.s16.st.incp + __builtin_xtensa_ee_fft_cmul_s16_ld_xp(0, data, data, 4, 4, 0, 2); + // CHECK: call void @llvm.xtensa.ee.fft.cmul.s16.ld.xp + __builtin_xtensa_ee_fft_cmul_s16_st_xp(1, 1, 5, data, data, 6, 0, 0); + // CHECK: call void @llvm.xtensa.ee.fft.cmul.s16.st.xp + __builtin_xtensa_ee_fft_r2bf_s16(6, 0, 7, 1, 1); + // CHECK: call void @llvm.xtensa.ee.fft.r2bf.s16 + __builtin_xtensa_ee_fft_r2bf_s16_st_incp(4, 6, 3, data, 3); + // CHECK: call void @llvm.xtensa.ee.fft.r2bf.s16.st.incp + __builtin_xtensa_ee_fft_vst_r32_decp(6, data, 0); + // CHECK: call void @llvm.xtensa.ee.fft.vst.r32.decp + __builtin_xtensa_ee_ldf_128_ip(a, b, d, d, data, 46); + // CHECK: call void @llvm.xtensa.ee.ldf.128.ip + __builtin_xtensa_ee_ldf_128_xp(b, b, d, b, data, data); + // CHECK: call void @llvm.xtensa.ee.ldf.128.xp + __builtin_xtensa_ee_ldf_64_ip(b, d, data, -962); + // CHECK: call void @llvm.xtensa.ee.ldf.64.ip + __builtin_xtensa_ee_ldf_64_xp(d, c, data, data); + // CHECK: call void @llvm.xtensa.ee.ldf.64.xp + __builtin_xtensa_ee_ldqa_s16_128_ip(data, 1790); + // CHECK: call void @llvm.xtensa.ee.ldqa.s16.128.ip + __builtin_xtensa_ee_ldqa_s16_128_xp(data, data); + // CHECK: call void @llvm.xtensa.ee.ldqa.s16.128.xp + __builtin_xtensa_ee_ldqa_s8_128_ip(data, -331); + // CHECK: call void @llvm.xtensa.ee.ldqa.s8.128.ip + __builtin_xtensa_ee_ldqa_s8_128_xp(data, data); + // CHECK: call void @llvm.xtensa.ee.ldqa.s8.128.xp + __builtin_xtensa_ee_ldqa_u16_128_ip(data, -1341); + // CHECK: call void @llvm.xtensa.ee.ldqa.u16.128.ip + __builtin_xtensa_ee_ldqa_u16_128_xp(data, data); + // CHECK: call void @llvm.xtensa.ee.ldqa.u16.128.xp + __builtin_xtensa_ee_ldqa_u8_128_ip(data, -1372); + // CHECK: call void @llvm.xtensa.ee.ldqa.u8.128.ip + __builtin_xtensa_ee_ldqa_u8_128_xp(data, data); + // CHECK: call void @llvm.xtensa.ee.ldqa.u8.128.xp + __builtin_xtensa_ee_ldxq_32(3, 3, data, 2, 2); + // CHECK: call void @llvm.xtensa.ee.ldxq.32 + __builtin_xtensa_ee_ld_128_usar_ip(0, data, 344); + // CHECK: call void @llvm.xtensa.ee.ld.128.usar.ip + __builtin_xtensa_ee_ld_128_usar_xp(7, data, data); + // CHECK: call void @llvm.xtensa.ee.ld.128.usar.xp + __builtin_xtensa_ee_ld_accx_ip(data, 144); + // CHECK: call void @llvm.xtensa.ee.ld.accx.ip + __builtin_xtensa_ee_ld_qacc_h_h_32_ip(data, 355); + // CHECK: call void @llvm.xtensa.ee.ld.qacc.h.h.32.ip + __builtin_xtensa_ee_ld_qacc_h_l_128_ip(data, 1267); + // CHECK: call void @llvm.xtensa.ee.ld.qacc.h.l.128.ip + __builtin_xtensa_ee_ld_qacc_l_h_32_ip(data, 220); + // CHECK: call void @llvm.xtensa.ee.ld.qacc.l.h.32.ip + __builtin_xtensa_ee_ld_qacc_l_l_128_ip(data, 1453); + // CHECK: call void @llvm.xtensa.ee.ld.qacc.l.l.128.ip + __builtin_xtensa_ee_ld_ua_state_ip(data, -1314); + // CHECK: call void @llvm.xtensa.ee.ld.ua.state.ip + __builtin_xtensa_ee_movi_32_a(1, data, 0); + // CHECK: call void @llvm.xtensa.ee.movi.32.a + __builtin_xtensa_ee_movi_32_q(4, data, 3); + // CHECK: call void @llvm.xtensa.ee.movi.32.q + __builtin_xtensa_ee_mov_s16_qacc(0); + // CHECK: call void @llvm.xtensa.ee.mov.s16.qacc + __builtin_xtensa_ee_mov_s8_qacc(7); + // CHECK: call void @llvm.xtensa.ee.mov.s8.qacc + __builtin_xtensa_ee_mov_u16_qacc(0); + // CHECK: call void @llvm.xtensa.ee.mov.u16.qacc + __builtin_xtensa_ee_mov_u8_qacc(1); + // CHECK: call void @llvm.xtensa.ee.mov.u8.qacc + __builtin_xtensa_ee_notq(0, 2); + // CHECK: call void @llvm.xtensa.ee.notq + __builtin_xtensa_ee_orq(6, 2, 7); + // CHECK: call void @llvm.xtensa.ee.orq + __builtin_xtensa_ee_slci_2q(4, 3, 8); + // CHECK: call void @llvm.xtensa.ee.slci.2q + __builtin_xtensa_ee_slcxxp_2q(2, 5, data, data); + // CHECK: call void @llvm.xtensa.ee.slcxxp.2q + __builtin_xtensa_ee_srci_2q(0, 6, 13); + // CHECK: call void @llvm.xtensa.ee.srci.2q + __builtin_xtensa_ee_srcmb_s16_qacc(3, data, 1); + // CHECK: call void @llvm.xtensa.ee.srcmb.s16.qacc + __builtin_xtensa_ee_srcmb_s8_qacc(3, data, 1); + // CHECK: call void @llvm.xtensa.ee.srcmb.s8.qacc + __builtin_xtensa_ee_srcq_128_st_incp(0, 7, data); + // CHECK: call void @llvm.xtensa.ee.srcq.128.st.incp + __builtin_xtensa_ee_srcxxp_2q(1, 3, data, data); + // CHECK: call void @llvm.xtensa.ee.srcxxp.2q + __builtin_xtensa_ee_src_q(3, 4, 7); + // CHECK: call void @llvm.xtensa.ee.src.q + __builtin_xtensa_ee_src_q_ld_ip(4, data, -1149, 4, 7); + // CHECK: call void @llvm.xtensa.ee.src.q.ld.ip + __builtin_xtensa_ee_src_q_ld_xp(4, data, data, 7, 2); + // CHECK: call void @llvm.xtensa.ee.src.q.ld.xp + __builtin_xtensa_ee_src_q_qup(4, 5, 7); + // CHECK: call void @llvm.xtensa.ee.src.q.qup + __builtin_xtensa_ee_srs_accx(data, data, 0); + // CHECK: call void @llvm.xtensa.ee.srs.accx + __builtin_xtensa_ee_stf_128_ip(c, a, b, c, data, 14); + // CHECK: call void @llvm.xtensa.ee.stf.128.ip + __builtin_xtensa_ee_stf_128_xp(b, b, d, d, data, data); + // CHECK: call void @llvm.xtensa.ee.stf.128.xp + __builtin_xtensa_ee_stf_64_ip(d, c, data, 984); + // CHECK: call void @llvm.xtensa.ee.stf.64.ip + __builtin_xtensa_ee_stf_64_xp(d, b, data, data); + // CHECK: call void @llvm.xtensa.ee.stf.64.xp + __builtin_xtensa_ee_stxq_32(1, 0, data, 2, 6); + // CHECK: call void @llvm.xtensa.ee.stxq.32 + __builtin_xtensa_ee_st_accx_ip(data, 842); + // CHECK: call void @llvm.xtensa.ee.st.accx.ip + __builtin_xtensa_ee_st_qacc_h_h_32_ip(data, -243); + // CHECK: call void @llvm.xtensa.ee.st.qacc.h.h.32.ip + __builtin_xtensa_ee_st_qacc_h_l_128_ip(data, 663); + // CHECK: call void @llvm.xtensa.ee.st.qacc.h.l.128.ip + __builtin_xtensa_ee_st_qacc_l_h_32_ip(data, 97); + // CHECK: call void @llvm.xtensa.ee.st.qacc.l.h.32.ip + __builtin_xtensa_ee_st_qacc_l_l_128_ip(data, -864); + // CHECK: call void @llvm.xtensa.ee.st.qacc.l.l.128.ip + __builtin_xtensa_ee_st_ua_state_ip(data, -709); + // CHECK: call void @llvm.xtensa.ee.st.ua.state.ip + __builtin_xtensa_ee_vadds_s16(7, 5, 2); + // CHECK: call void @llvm.xtensa.ee.vadds.s16 + __builtin_xtensa_ee_vadds_s16_ld_incp(4, data, 6, 1, 6); + // CHECK: call void @llvm.xtensa.ee.vadds.s16.ld.incp + __builtin_xtensa_ee_vadds_s16_st_incp(4, data, 1, 3, 2); + // CHECK: call void @llvm.xtensa.ee.vadds.s16.st.incp + __builtin_xtensa_ee_vadds_s32(7, 0, 6); + // CHECK: call void @llvm.xtensa.ee.vadds.s32 + __builtin_xtensa_ee_vadds_s32_ld_incp(4, data, 4, 7, 1); + // CHECK: call void @llvm.xtensa.ee.vadds.s32.ld.incp + __builtin_xtensa_ee_vadds_s32_st_incp(1, data, 7, 4, 4); + // CHECK: call void @llvm.xtensa.ee.vadds.s32.st.incp + __builtin_xtensa_ee_vadds_s8(0, 3, 6); + // CHECK: call void @llvm.xtensa.ee.vadds.s8 + __builtin_xtensa_ee_vadds_s8_ld_incp(3, data, 2, 4, 2); + // CHECK: call void @llvm.xtensa.ee.vadds.s8.ld.incp + __builtin_xtensa_ee_vadds_s8_st_incp(2, data, 1, 7, 2); + // CHECK: call void @llvm.xtensa.ee.vadds.s8.st.incp + __builtin_xtensa_ee_vcmp_eq_s16(2, 1, 0); + // CHECK: call void @llvm.xtensa.ee.vcmp.eq.s16 + __builtin_xtensa_ee_vcmp_eq_s32(6, 1, 2); + // CHECK: call void @llvm.xtensa.ee.vcmp.eq.s32 + __builtin_xtensa_ee_vcmp_eq_s8(0, 2, 1); + // CHECK: call void @llvm.xtensa.ee.vcmp.eq.s8 + __builtin_xtensa_ee_vcmp_gt_s16(1, 2, 5); + // CHECK: call void @llvm.xtensa.ee.vcmp.gt.s16 + __builtin_xtensa_ee_vcmp_gt_s32(0, 0, 0); + // CHECK: call void @llvm.xtensa.ee.vcmp.gt.s32 + __builtin_xtensa_ee_vcmp_gt_s8(4, 1, 4); + // CHECK: call void @llvm.xtensa.ee.vcmp.gt.s8 + __builtin_xtensa_ee_vcmp_lt_s16(7, 4, 0); + // CHECK: call void @llvm.xtensa.ee.vcmp.lt.s16 + __builtin_xtensa_ee_vcmp_lt_s32(0, 1, 0); + // CHECK: call void @llvm.xtensa.ee.vcmp.lt.s32 + __builtin_xtensa_ee_vcmp_lt_s8(3, 3, 3); + // CHECK: call void @llvm.xtensa.ee.vcmp.lt.s8 + __builtin_xtensa_ee_vldbc_16(6, data); + // CHECK: call void @llvm.xtensa.ee.vldbc.16 + __builtin_xtensa_ee_vldbc_16_ip(3, data, 115); + // CHECK: call void @llvm.xtensa.ee.vldbc.16.ip + __builtin_xtensa_ee_vldbc_16_xp(0, data, data); + // CHECK: call void @llvm.xtensa.ee.vldbc.16.xp + __builtin_xtensa_ee_vldbc_32(4, data); + // CHECK: call void @llvm.xtensa.ee.vldbc.32 + __builtin_xtensa_ee_vldbc_32_ip(5, data, -213); + // CHECK: call void @llvm.xtensa.ee.vldbc.32.ip + __builtin_xtensa_ee_vldbc_32_xp(1, data, data); + // CHECK: call void @llvm.xtensa.ee.vldbc.32.xp + __builtin_xtensa_ee_vldbc_8(5, data); + // CHECK: call void @llvm.xtensa.ee.vldbc.8 + __builtin_xtensa_ee_vldbc_8_ip(7, data, 32); + // CHECK: call void @llvm.xtensa.ee.vldbc.8.ip + __builtin_xtensa_ee_vldbc_8_xp(6, data, data); + // CHECK: call void @llvm.xtensa.ee.vldbc.8.xp + __builtin_xtensa_ee_vldhbc_16_incp(1, 1, data); + // CHECK: call void @llvm.xtensa.ee.vldhbc.16.incp + __builtin_xtensa_ee_vld_128_ip(6, data, 1284); + // CHECK: call void @llvm.xtensa.ee.vld.128.ip + __builtin_xtensa_ee_vld_128_xp(4, data, data); + // CHECK: call void @llvm.xtensa.ee.vld.128.xp + __builtin_xtensa_ee_vld_h_64_ip(1, data, 400); + // CHECK: call void @llvm.xtensa.ee.vld.h.64.ip + __builtin_xtensa_ee_vld_h_64_xp(1, data, data); + // CHECK: call void @llvm.xtensa.ee.vld.h.64.xp + __builtin_xtensa_ee_vld_l_64_ip(0, data, 907); + // CHECK: call void @llvm.xtensa.ee.vld.l.64.ip + __builtin_xtensa_ee_vld_l_64_xp(0, data, data); + // CHECK: call void @llvm.xtensa.ee.vld.l.64.xp + __builtin_xtensa_ee_vmax_s16(4, 1, 0); + // CHECK: call void @llvm.xtensa.ee.vmax.s16 + __builtin_xtensa_ee_vmax_s16_ld_incp(3, data, 0, 3, 6); + // CHECK: call void @llvm.xtensa.ee.vmax.s16.ld.incp + __builtin_xtensa_ee_vmax_s16_st_incp(7, data, 2, 0, 2); + // CHECK: call void @llvm.xtensa.ee.vmax.s16.st.incp + __builtin_xtensa_ee_vmax_s32(1, 4, 0); + // CHECK: call void @llvm.xtensa.ee.vmax.s32 + __builtin_xtensa_ee_vmax_s32_ld_incp(6, data, 1, 7, 5); + // CHECK: call void @llvm.xtensa.ee.vmax.s32.ld.incp + __builtin_xtensa_ee_vmax_s32_st_incp(5, data, 4, 6, 4); + // CHECK: call void @llvm.xtensa.ee.vmax.s32.st.incp + __builtin_xtensa_ee_vmax_s8(2, 4, 6); + // CHECK: call void @llvm.xtensa.ee.vmax.s8 + __builtin_xtensa_ee_vmax_s8_ld_incp(7, data, 7, 3, 3); + // CHECK: call void @llvm.xtensa.ee.vmax.s8.ld.incp + __builtin_xtensa_ee_vmax_s8_st_incp(0, data, 3, 5, 1); + // CHECK: call void @llvm.xtensa.ee.vmax.s8.st.incp + __builtin_xtensa_ee_vmin_s16(7, 4, 3); + // CHECK: call void @llvm.xtensa.ee.vmin.s16 + __builtin_xtensa_ee_vmin_s16_ld_incp(2, data, 5, 2, 1); + // CHECK: call void @llvm.xtensa.ee.vmin.s16.ld.incp + __builtin_xtensa_ee_vmin_s16_st_incp(4, data, 4, 2, 0); + // CHECK: call void @llvm.xtensa.ee.vmin.s16.st.incp + __builtin_xtensa_ee_vmin_s32(4, 6, 5); + // CHECK: call void @llvm.xtensa.ee.vmin.s32 + __builtin_xtensa_ee_vmin_s32_ld_incp(1, data, 3, 3, 5); + // CHECK: call void @llvm.xtensa.ee.vmin.s32.ld.incp + __builtin_xtensa_ee_vmin_s32_st_incp(3, data, 1, 0, 6); + // CHECK: call void @llvm.xtensa.ee.vmin.s32.st.incp + __builtin_xtensa_ee_vmin_s8(5, 1, 3); + // CHECK: call void @llvm.xtensa.ee.vmin.s8 + __builtin_xtensa_ee_vmin_s8_ld_incp(0, data, 7, 2, 5); + // CHECK: call void @llvm.xtensa.ee.vmin.s8.ld.incp + __builtin_xtensa_ee_vmin_s8_st_incp(0, data, 5, 3, 7); + // CHECK: call void @llvm.xtensa.ee.vmin.s8.st.incp + __builtin_xtensa_ee_vmulas_s16_accx(4, 1); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.accx + __builtin_xtensa_ee_vmulas_s16_accx_ld_ip(7, data, 26, 1, 0); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.accx.ld.ip + __builtin_xtensa_ee_vmulas_s16_accx_ld_ip_qup(0, data, 7, 2, 3, 3, 7); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.accx.ld.ip.qup + __builtin_xtensa_ee_vmulas_s16_accx_ld_xp(6, data, data, 0, 3); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.accx.ld.xp + __builtin_xtensa_ee_vmulas_s16_accx_ld_xp_qup(7, data, data, 2, 4, 7, 2); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.accx.ld.xp.qup + __builtin_xtensa_ee_vmulas_s16_qacc(7, 7); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.qacc + __builtin_xtensa_ee_vmulas_s16_qacc_ldbc_incp(6, data, 1, 0); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.qacc.ldbc.incp + __builtin_xtensa_ee_vmulas_s16_qacc_ldbc_incp_qup(5, data, 4, 5, 3, 1); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.qacc.ldbc.incp.qup + __builtin_xtensa_ee_vmulas_s16_qacc_ld_ip(1, data, 35, 2, 2); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.qacc.ld.ip + __builtin_xtensa_ee_vmulas_s16_qacc_ld_ip_qup(1, data, 24, 0, 1, 1, 7); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.qacc.ld.ip.qup + __builtin_xtensa_ee_vmulas_s16_qacc_ld_xp(4, data, data, 5, 3); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.qacc.ld.xp + __builtin_xtensa_ee_vmulas_s16_qacc_ld_xp_qup(2, data, data, 3, 3, 1, 2); + // CHECK: call void @llvm.xtensa.ee.vmulas.s16.qacc.ld.xp.qup + __builtin_xtensa_ee_vmulas_s8_accx(5, 5); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.accx + __builtin_xtensa_ee_vmulas_s8_accx_ld_ip(0, data, -30, 1, 5); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.accx.ld.ip + __builtin_xtensa_ee_vmulas_s8_accx_ld_ip_qup(2, data, -1, 5, 0, 4, 0); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.accx.ld.ip.qup + __builtin_xtensa_ee_vmulas_s8_accx_ld_xp(1, data, data, 4, 5); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.accx.ld.xp + __builtin_xtensa_ee_vmulas_s8_accx_ld_xp_qup(4, data, data, 5, 7, 7, 0); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.accx.ld.xp.qup + __builtin_xtensa_ee_vmulas_s8_qacc(6, 7); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.qacc + __builtin_xtensa_ee_vmulas_s8_qacc_ldbc_incp(3, data, 6, 4); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.qacc.ldbc.incp + __builtin_xtensa_ee_vmulas_s8_qacc_ldbc_incp_qup(7, data, 2, 3, 4, 1); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.qacc.ldbc.incp.qup + __builtin_xtensa_ee_vmulas_s8_qacc_ld_ip(4, data, -99, 2, 4); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.qacc.ld.ip + __builtin_xtensa_ee_vmulas_s8_qacc_ld_ip_qup(4, data, -87, 5, 4, 2, 3); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.qacc.ld.ip.qup + __builtin_xtensa_ee_vmulas_s8_qacc_ld_xp(7, data, data, 1, 1); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.qacc.ld.xp + __builtin_xtensa_ee_vmulas_s8_qacc_ld_xp_qup(2, data, data, 1, 6, 3, 2); + // CHECK: call void @llvm.xtensa.ee.vmulas.s8.qacc.ld.xp.qup + __builtin_xtensa_ee_vmulas_u16_accx(2, 5); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.accx + __builtin_xtensa_ee_vmulas_u16_accx_ld_ip(0, data, -28, 6, 7); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.accx.ld.ip + __builtin_xtensa_ee_vmulas_u16_accx_ld_ip_qup(5, data, -41, 2, 1, 6, 6); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.accx.ld.ip.qup + __builtin_xtensa_ee_vmulas_u16_accx_ld_xp(1, data, data, 6, 7); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.accx.ld.xp + __builtin_xtensa_ee_vmulas_u16_accx_ld_xp_qup(7, data, data, 0, 0, 7, 5); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.accx.ld.xp.qup + __builtin_xtensa_ee_vmulas_u16_qacc(4, 3); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.qacc + __builtin_xtensa_ee_vmulas_u16_qacc_ldbc_incp(3, data, 5, 6); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.qacc.ldbc.incp + __builtin_xtensa_ee_vmulas_u16_qacc_ldbc_incp_qup(3, data, 4, 5, 4, 0); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.qacc.ldbc.incp.qup + __builtin_xtensa_ee_vmulas_u16_qacc_ld_ip(4, data, -94, 5, 3); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.qacc.ld.ip + __builtin_xtensa_ee_vmulas_u16_qacc_ld_ip_qup(7, data, -116, 5, 6, 1, 4); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.qacc.ld.ip.qup + __builtin_xtensa_ee_vmulas_u16_qacc_ld_xp(3, data, data, 0, 6); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.qacc.ld.xp + __builtin_xtensa_ee_vmulas_u16_qacc_ld_xp_qup(0, data, data, 5, 3, 3, 7); + // CHECK: call void @llvm.xtensa.ee.vmulas.u16.qacc.ld.xp.qup + __builtin_xtensa_ee_vmulas_u8_accx(3, 0); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.accx + __builtin_xtensa_ee_vmulas_u8_accx_ld_ip(1, data, -112, 1, 5); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.accx.ld.ip + __builtin_xtensa_ee_vmulas_u8_accx_ld_ip_qup(4, data, -68, 3, 1, 5, 4); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.accx.ld.ip.qup + __builtin_xtensa_ee_vmulas_u8_accx_ld_xp(2, data, data, 0, 5); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.accx.ld.xp + __builtin_xtensa_ee_vmulas_u8_accx_ld_xp_qup(0, data, data, 3, 5, 3, 3); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.accx.ld.xp.qup + __builtin_xtensa_ee_vmulas_u8_qacc(4, 2); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.qacc + __builtin_xtensa_ee_vmulas_u8_qacc_ldbc_incp(6, data, 4, 7); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.qacc.ldbc.incp + __builtin_xtensa_ee_vmulas_u8_qacc_ldbc_incp_qup(3, data, 3, 4, 4, 3); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.qacc.ldbc.incp.qup + __builtin_xtensa_ee_vmulas_u8_qacc_ld_ip(4, data, 46, 2, 0); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.qacc.ld.ip + __builtin_xtensa_ee_vmulas_u8_qacc_ld_ip_qup(4, data, -51, 6, 2, 2, 0); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.qacc.ld.ip.qup + __builtin_xtensa_ee_vmulas_u8_qacc_ld_xp(7, data, data, 1, 2); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.qacc.ld.xp + __builtin_xtensa_ee_vmulas_u8_qacc_ld_xp_qup(5, data, data, 0, 4, 5, 0); + // CHECK: call void @llvm.xtensa.ee.vmulas.u8.qacc.ld.xp.qup + __builtin_xtensa_ee_vmul_s16(4, 2, 5); + // CHECK: call void @llvm.xtensa.ee.vmul.s16 + __builtin_xtensa_ee_vmul_s16_ld_incp(2, data, 7, 0, 3); + // CHECK: call void @llvm.xtensa.ee.vmul.s16.ld.incp + __builtin_xtensa_ee_vmul_s16_st_incp(3, data, 5, 7, 6); + // CHECK: call void @llvm.xtensa.ee.vmul.s16.st.incp + __builtin_xtensa_ee_vmul_s8(1, 3, 6); + // CHECK: call void @llvm.xtensa.ee.vmul.s8 + __builtin_xtensa_ee_vmul_s8_ld_incp(3, data, 2, 2, 6); + // CHECK: call void @llvm.xtensa.ee.vmul.s8.ld.incp + __builtin_xtensa_ee_vmul_s8_st_incp(1, data, 2, 1, 4); + // CHECK: call void @llvm.xtensa.ee.vmul.s8.st.incp + __builtin_xtensa_ee_vmul_u16(6, 6, 2); + // CHECK: call void @llvm.xtensa.ee.vmul.u16 + __builtin_xtensa_ee_vmul_u16_ld_incp(0, data, 7, 0, 5); + // CHECK: call void @llvm.xtensa.ee.vmul.u16.ld.incp + __builtin_xtensa_ee_vmul_u16_st_incp(7, data, 4, 0, 2); + // CHECK: call void @llvm.xtensa.ee.vmul.u16.st.incp + __builtin_xtensa_ee_vmul_u8(1, 5, 7); + // CHECK: call void @llvm.xtensa.ee.vmul.u8 + __builtin_xtensa_ee_vmul_u8_ld_incp(1, data, 7, 4, 0); + // CHECK: call void @llvm.xtensa.ee.vmul.u8.ld.incp + __builtin_xtensa_ee_vmul_u8_st_incp(7, data, 0, 3, 4); + // CHECK: call void @llvm.xtensa.ee.vmul.u8.st.incp + __builtin_xtensa_ee_vprelu_s16(0, 0, 6, data); + // CHECK: call void @llvm.xtensa.ee.vprelu.s16 + __builtin_xtensa_ee_vprelu_s8(4, 7, 1, data); + // CHECK: call void @llvm.xtensa.ee.vprelu.s8 + __builtin_xtensa_ee_vrelu_s16(6, data, data); + // CHECK: call void @llvm.xtensa.ee.vrelu.s16 + __builtin_xtensa_ee_vrelu_s8(1, data, data); + // CHECK: call void @llvm.xtensa.ee.vrelu.s8 + __builtin_xtensa_ee_vsl_32(5, 1); + // CHECK: call void @llvm.xtensa.ee.vsl.32 + __builtin_xtensa_ee_vsmulas_s16_qacc(4, 6, 4); + // CHECK: call void @llvm.xtensa.ee.vsmulas.s16.qacc + __builtin_xtensa_ee_vsmulas_s16_qacc_ld_incp(4, data, 6, 6, 4); + // CHECK: call void @llvm.xtensa.ee.vsmulas.s16.qacc.ld.incp + __builtin_xtensa_ee_vsmulas_s8_qacc(3, 2, 9); + // CHECK: call void @llvm.xtensa.ee.vsmulas.s8.qacc + __builtin_xtensa_ee_vsmulas_s8_qacc_ld_incp(3, data, 2, 2, 5); + // CHECK: call void @llvm.xtensa.ee.vsmulas.s8.qacc.ld.incp + __builtin_xtensa_ee_vsr_32(3, 1); + // CHECK: call void @llvm.xtensa.ee.vsr.32 + __builtin_xtensa_ee_vst_128_ip(6, data, 68); + // CHECK: call void @llvm.xtensa.ee.vst.128.ip + __builtin_xtensa_ee_vst_128_xp(7, data, data); + // CHECK: call void @llvm.xtensa.ee.vst.128.xp + __builtin_xtensa_ee_vst_h_64_ip(2, data, 843); + // CHECK: call void @llvm.xtensa.ee.vst.h.64.ip + __builtin_xtensa_ee_vst_h_64_xp(5, data, data); + // CHECK: call void @llvm.xtensa.ee.vst.h.64.xp + __builtin_xtensa_ee_vst_l_64_ip(7, data, -658); + // CHECK: call void @llvm.xtensa.ee.vst.l.64.ip + __builtin_xtensa_ee_vst_l_64_xp(7, data, data); + // CHECK: call void @llvm.xtensa.ee.vst.l.64.xp + __builtin_xtensa_ee_vsubs_s16(7, 5, 3); + // CHECK: call void @llvm.xtensa.ee.vsubs.s16 + __builtin_xtensa_ee_vsubs_s16_ld_incp(7, data, 5, 1, 2); + // CHECK: call void @llvm.xtensa.ee.vsubs.s16.ld.incp + __builtin_xtensa_ee_vsubs_s16_st_incp(3, data, 5, 2, 3); + // CHECK: call void @llvm.xtensa.ee.vsubs.s16.st.incp + __builtin_xtensa_ee_vsubs_s32(0, 7, 3); + // CHECK: call void @llvm.xtensa.ee.vsubs.s32 + __builtin_xtensa_ee_vsubs_s32_ld_incp(0, data, 4, 1, 0); + // CHECK: call void @llvm.xtensa.ee.vsubs.s32.ld.incp + __builtin_xtensa_ee_vsubs_s32_st_incp(4, data, 6, 7, 6); + // CHECK: call void @llvm.xtensa.ee.vsubs.s32.st.incp + __builtin_xtensa_ee_vsubs_s8(4, 4, 0); + // CHECK: call void @llvm.xtensa.ee.vsubs.s8 + __builtin_xtensa_ee_vsubs_s8_ld_incp(1, data, 1, 4, 4); + // CHECK: call void @llvm.xtensa.ee.vsubs.s8.ld.incp + __builtin_xtensa_ee_vsubs_s8_st_incp(2, data, 4, 4, 1); + // CHECK: call void @llvm.xtensa.ee.vsubs.s8.st.incp + __builtin_xtensa_ee_vunzip_16(7, 6); + // CHECK: call void @llvm.xtensa.ee.vunzip.16 + __builtin_xtensa_ee_vunzip_32(7, 3); + // CHECK: call void @llvm.xtensa.ee.vunzip.32 + __builtin_xtensa_ee_vunzip_8(0, 2); + // CHECK: call void @llvm.xtensa.ee.vunzip.8 + __builtin_xtensa_ee_vzip_16(1, 1); + // CHECK: call void @llvm.xtensa.ee.vzip.16 + __builtin_xtensa_ee_vzip_32(0, 1); + // CHECK: call void @llvm.xtensa.ee.vzip.32 + __builtin_xtensa_ee_vzip_8(0, 5); + // CHECK: call void @llvm.xtensa.ee.vzip.8 + __builtin_xtensa_ee_xorq(4, 3, 3); + // CHECK: call void @llvm.xtensa.ee.xorq + __builtin_xtensa_ee_zero_accx(); + // CHECK: call void @llvm.xtensa.ee.zero.accx + __builtin_xtensa_ee_zero_q(0); + // CHECK: call void @llvm.xtensa.ee.zero.q + __builtin_xtensa_ee_zero_qacc(); + // CHECK: call void @llvm.xtensa.ee.zero.qacc + data = __builtin_xtensa_rur_accx_0(); + // CHECK: call i32 @llvm.xtensa.rur.accx.0 + data = __builtin_xtensa_rur_accx_1(); + // CHECK: call i32 @llvm.xtensa.rur.accx.1 + data = __builtin_xtensa_rur_fft_bit_width(); + // CHECK: call i32 @llvm.xtensa.rur.fft.bit.width + data = __builtin_xtensa_rur_gpio_out(); + // CHECK: call i32 @llvm.xtensa.rur.gpio.out + data = __builtin_xtensa_rur_qacc_h_0(); + // CHECK: call i32 @llvm.xtensa.rur.qacc.h.0 + data = __builtin_xtensa_rur_qacc_h_1(); + // CHECK: call i32 @llvm.xtensa.rur.qacc.h.1 + data = __builtin_xtensa_rur_qacc_h_2(); + // CHECK: call i32 @llvm.xtensa.rur.qacc.h.2 + data = __builtin_xtensa_rur_qacc_h_3(); + // CHECK: call i32 @llvm.xtensa.rur.qacc.h.3 + data = __builtin_xtensa_rur_qacc_h_4(); + // CHECK: call i32 @llvm.xtensa.rur.qacc.h.4 + data = __builtin_xtensa_rur_qacc_l_0(); + // CHECK: call i32 @llvm.xtensa.rur.qacc.l.0 + data = __builtin_xtensa_rur_qacc_l_1(); + // CHECK: call i32 @llvm.xtensa.rur.qacc.l.1 + data = __builtin_xtensa_rur_qacc_l_2(); + // CHECK: call i32 @llvm.xtensa.rur.qacc.l.2 + data = __builtin_xtensa_rur_qacc_l_3(); + // CHECK: call i32 @llvm.xtensa.rur.qacc.l.3 + data = __builtin_xtensa_rur_qacc_l_4(); + // CHECK: call i32 @llvm.xtensa.rur.qacc.l.4 + data = __builtin_xtensa_rur_sar_byte(); + // CHECK: call i32 @llvm.xtensa.rur.sar.byte + data = __builtin_xtensa_rur_ua_state_0(); + // CHECK: call i32 @llvm.xtensa.rur.ua.state.0 + data = __builtin_xtensa_rur_ua_state_1(); + // CHECK: call i32 @llvm.xtensa.rur.ua.state.1 + data = __builtin_xtensa_rur_ua_state_2(); + // CHECK: call i32 @llvm.xtensa.rur.ua.state.2 + data = __builtin_xtensa_rur_ua_state_3(); + // CHECK: call i32 @llvm.xtensa.rur.ua.state.3 + __builtin_xtensa_wur_accx_0(data); + // CHECK: call void @llvm.xtensa.wur.accx.0 + __builtin_xtensa_wur_accx_1(data); + // CHECK: call void @llvm.xtensa.wur.accx.1 + __builtin_xtensa_wur_fft_bit_width(data); + // CHECK: call void @llvm.xtensa.wur.fft.bit.width + __builtin_xtensa_wur_gpio_out(data); + // CHECK: call void @llvm.xtensa.wur.gpio.out + __builtin_xtensa_wur_qacc_h_0(data); + // CHECK: call void @llvm.xtensa.wur.qacc.h.0 + __builtin_xtensa_wur_qacc_h_1(data); + // CHECK: call void @llvm.xtensa.wur.qacc.h.1 + __builtin_xtensa_wur_qacc_h_2(data); + // CHECK: call void @llvm.xtensa.wur.qacc.h.2 + __builtin_xtensa_wur_qacc_h_3(data); + // CHECK: call void @llvm.xtensa.wur.qacc.h.3 + __builtin_xtensa_wur_qacc_h_4(data); + // CHECK: call void @llvm.xtensa.wur.qacc.h.4 + __builtin_xtensa_wur_qacc_l_0(data); + // CHECK: call void @llvm.xtensa.wur.qacc.l.0 + __builtin_xtensa_wur_qacc_l_1(data); + // CHECK: call void @llvm.xtensa.wur.qacc.l.1 + __builtin_xtensa_wur_qacc_l_2(data); + // CHECK: call void @llvm.xtensa.wur.qacc.l.2 + __builtin_xtensa_wur_qacc_l_3(data); + // CHECK: call void @llvm.xtensa.wur.qacc.l.3 + __builtin_xtensa_wur_qacc_l_4(data); + // CHECK: call void @llvm.xtensa.wur.qacc.l.4 + __builtin_xtensa_wur_sar_byte(data); + // CHECK: call void @llvm.xtensa.wur.sar.byte + __builtin_xtensa_wur_ua_state_0(data); + // CHECK: call void @llvm.xtensa.wur.ua.state.0 + __builtin_xtensa_wur_ua_state_1(data); + // CHECK: call void @llvm.xtensa.wur.ua.state.1 + __builtin_xtensa_wur_ua_state_2(data); + // CHECK: call void @llvm.xtensa.wur.ua.state.2 + __builtin_xtensa_wur_ua_state_3(data); + // CHECK: call void @llvm.xtensa.wur.ua.state.3 + __builtin_xtensa_mv_qr(4, 5); + // CHECK: call void @llvm.xtensa.mv.qr +} diff --git a/llvm/include/llvm/IR/IntrinsicsXtensa.td b/llvm/include/llvm/IR/IntrinsicsXtensa.td index d7d25609b5d56..ab5d463277c37 100644 --- a/llvm/include/llvm/IR/IntrinsicsXtensa.td +++ b/llvm/include/llvm/IR/IntrinsicsXtensa.td @@ -248,4 +248,8 @@ def int_xtensa_rsr_m3: ClangBuiltin<"__builtin_xtensa_rsr_m3">, def int_xtensa_xsr_m3: ClangBuiltin<"__builtin_xtensa_xsr_m3">, Intrinsic<[], [llvm_ptr_ty], []>; + +// Generated code +// --------------- +include "llvm/IR/IntrinsicsXtensaESP32S3.td" } diff --git a/llvm/include/llvm/IR/IntrinsicsXtensaESP32S3.td b/llvm/include/llvm/IR/IntrinsicsXtensaESP32S3.td new file mode 100644 index 0000000000000..e656f362445a3 --- /dev/null +++ b/llvm/include/llvm/IR/IntrinsicsXtensaESP32S3.td @@ -0,0 +1,767 @@ +//===- IntrinsicsXtensa.td - Defines Xtensa intrinsics -----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines all of the Xtensa-specific intrinsics. +// Automatically generated file, do not edit! +//===----------------------------------------------------------------------===// + +def int_xtensa_ee_andq: ClangBuiltin<"__builtin_xtensa_ee_andq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_bitrev: ClangBuiltin<"__builtin_xtensa_ee_bitrev">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_cmul_s16: ClangBuiltin<"__builtin_xtensa_ee_cmul_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_cmul_s16_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_cmul_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_cmul_s16_st_incp: ClangBuiltin<"__builtin_xtensa_ee_cmul_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_fft_ams_s16_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_fft_ams_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_fft_ams_s16_ld_incp_uaup: ClangBuiltin<"__builtin_xtensa_ee_fft_ams_s16_ld_incp_uaup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_fft_ams_s16_ld_r32_decp: ClangBuiltin<"__builtin_xtensa_ee_fft_ams_s16_ld_r32_decp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_fft_ams_s16_st_incp: ClangBuiltin<"__builtin_xtensa_ee_fft_ams_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_fft_cmul_s16_ld_xp: ClangBuiltin<"__builtin_xtensa_ee_fft_cmul_s16_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_fft_cmul_s16_st_xp: ClangBuiltin<"__builtin_xtensa_ee_fft_cmul_s16_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_fft_r2bf_s16: ClangBuiltin<"__builtin_xtensa_ee_fft_r2bf_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_fft_r2bf_s16_st_incp: ClangBuiltin<"__builtin_xtensa_ee_fft_r2bf_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_fft_vst_r32_decp: ClangBuiltin<"__builtin_xtensa_ee_fft_vst_r32_decp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_ldf_128_ip: ClangBuiltin<"__builtin_xtensa_ee_ldf_128_ip">, + Intrinsic<[], [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ldf_128_xp: ClangBuiltin<"__builtin_xtensa_ee_ldf_128_xp">, + Intrinsic<[], [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_ee_ldf_64_ip: ClangBuiltin<"__builtin_xtensa_ee_ldf_64_ip">, + Intrinsic<[], [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ldf_64_xp: ClangBuiltin<"__builtin_xtensa_ee_ldf_64_xp">, + Intrinsic<[], [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_ee_ldqa_s16_128_ip: ClangBuiltin<"__builtin_xtensa_ee_ldqa_s16_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ldqa_s16_128_xp: ClangBuiltin<"__builtin_xtensa_ee_ldqa_s16_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_ee_ldqa_s8_128_ip: ClangBuiltin<"__builtin_xtensa_ee_ldqa_s8_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ldqa_s8_128_xp: ClangBuiltin<"__builtin_xtensa_ee_ldqa_s8_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_ee_ldqa_u16_128_ip: ClangBuiltin<"__builtin_xtensa_ee_ldqa_u16_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ldqa_u16_128_xp: ClangBuiltin<"__builtin_xtensa_ee_ldqa_u16_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_ee_ldqa_u8_128_ip: ClangBuiltin<"__builtin_xtensa_ee_ldqa_u8_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ldqa_u8_128_xp: ClangBuiltin<"__builtin_xtensa_ee_ldqa_u8_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_ee_ldxq_32: ClangBuiltin<"__builtin_xtensa_ee_ldxq_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_ld_128_usar_ip: ClangBuiltin<"__builtin_xtensa_ee_ld_128_usar_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_ld_128_usar_xp: ClangBuiltin<"__builtin_xtensa_ee_ld_128_usar_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ld_accx_ip: ClangBuiltin<"__builtin_xtensa_ee_ld_accx_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ld_qacc_h_h_32_ip: ClangBuiltin<"__builtin_xtensa_ee_ld_qacc_h_h_32_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ld_qacc_h_l_128_ip: ClangBuiltin<"__builtin_xtensa_ee_ld_qacc_h_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ld_qacc_l_h_32_ip: ClangBuiltin<"__builtin_xtensa_ee_ld_qacc_l_h_32_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ld_qacc_l_l_128_ip: ClangBuiltin<"__builtin_xtensa_ee_ld_qacc_l_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_ld_ua_state_ip: ClangBuiltin<"__builtin_xtensa_ee_ld_ua_state_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_movi_32_a: ClangBuiltin<"__builtin_xtensa_ee_movi_32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_movi_32_q: ClangBuiltin<"__builtin_xtensa_ee_movi_32_q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_mov_s16_qacc: ClangBuiltin<"__builtin_xtensa_ee_mov_s16_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_mov_s8_qacc: ClangBuiltin<"__builtin_xtensa_ee_mov_s8_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_mov_u16_qacc: ClangBuiltin<"__builtin_xtensa_ee_mov_u16_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_mov_u8_qacc: ClangBuiltin<"__builtin_xtensa_ee_mov_u8_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_notq: ClangBuiltin<"__builtin_xtensa_ee_notq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_orq: ClangBuiltin<"__builtin_xtensa_ee_orq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_slci_2q: ClangBuiltin<"__builtin_xtensa_ee_slci_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_slcxxp_2q: ClangBuiltin<"__builtin_xtensa_ee_slcxxp_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_srci_2q: ClangBuiltin<"__builtin_xtensa_ee_srci_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_srcmb_s16_qacc: ClangBuiltin<"__builtin_xtensa_ee_srcmb_s16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_srcmb_s8_qacc: ClangBuiltin<"__builtin_xtensa_ee_srcmb_s8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_srcq_128_st_incp: ClangBuiltin<"__builtin_xtensa_ee_srcq_128_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_srcxxp_2q: ClangBuiltin<"__builtin_xtensa_ee_srcxxp_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_src_q: ClangBuiltin<"__builtin_xtensa_ee_src_q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_src_q_ld_ip: ClangBuiltin<"__builtin_xtensa_ee_src_q_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_src_q_ld_xp: ClangBuiltin<"__builtin_xtensa_ee_src_q_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_src_q_qup: ClangBuiltin<"__builtin_xtensa_ee_src_q_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_srs_accx: ClangBuiltin<"__builtin_xtensa_ee_srs_accx">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_stf_128_ip: ClangBuiltin<"__builtin_xtensa_ee_stf_128_ip">, + Intrinsic<[], [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_stf_128_xp: ClangBuiltin<"__builtin_xtensa_ee_stf_128_xp">, + Intrinsic<[], [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_ee_stf_64_ip: ClangBuiltin<"__builtin_xtensa_ee_stf_64_ip">, + Intrinsic<[], [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_stf_64_xp: ClangBuiltin<"__builtin_xtensa_ee_stf_64_xp">, + Intrinsic<[], [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_xtensa_ee_stxq_32: ClangBuiltin<"__builtin_xtensa_ee_stxq_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_st_accx_ip: ClangBuiltin<"__builtin_xtensa_ee_st_accx_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_st_qacc_h_h_32_ip: ClangBuiltin<"__builtin_xtensa_ee_st_qacc_h_h_32_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_st_qacc_h_l_128_ip: ClangBuiltin<"__builtin_xtensa_ee_st_qacc_h_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_st_qacc_l_h_32_ip: ClangBuiltin<"__builtin_xtensa_ee_st_qacc_l_h_32_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_st_qacc_l_l_128_ip: ClangBuiltin<"__builtin_xtensa_ee_st_qacc_l_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_st_ua_state_ip: ClangBuiltin<"__builtin_xtensa_ee_st_ua_state_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vadds_s16: ClangBuiltin<"__builtin_xtensa_ee_vadds_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vadds_s16_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vadds_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vadds_s16_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vadds_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vadds_s32: ClangBuiltin<"__builtin_xtensa_ee_vadds_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vadds_s32_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vadds_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vadds_s32_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vadds_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vadds_s8: ClangBuiltin<"__builtin_xtensa_ee_vadds_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vadds_s8_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vadds_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vadds_s8_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vadds_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vcmp_eq_s16: ClangBuiltin<"__builtin_xtensa_ee_vcmp_eq_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vcmp_eq_s32: ClangBuiltin<"__builtin_xtensa_ee_vcmp_eq_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vcmp_eq_s8: ClangBuiltin<"__builtin_xtensa_ee_vcmp_eq_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vcmp_gt_s16: ClangBuiltin<"__builtin_xtensa_ee_vcmp_gt_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vcmp_gt_s32: ClangBuiltin<"__builtin_xtensa_ee_vcmp_gt_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vcmp_gt_s8: ClangBuiltin<"__builtin_xtensa_ee_vcmp_gt_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vcmp_lt_s16: ClangBuiltin<"__builtin_xtensa_ee_vcmp_lt_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vcmp_lt_s32: ClangBuiltin<"__builtin_xtensa_ee_vcmp_lt_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vcmp_lt_s8: ClangBuiltin<"__builtin_xtensa_ee_vcmp_lt_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vldbc_16: ClangBuiltin<"__builtin_xtensa_ee_vldbc_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vldbc_16_ip: ClangBuiltin<"__builtin_xtensa_ee_vldbc_16_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vldbc_16_xp: ClangBuiltin<"__builtin_xtensa_ee_vldbc_16_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vldbc_32: ClangBuiltin<"__builtin_xtensa_ee_vldbc_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vldbc_32_ip: ClangBuiltin<"__builtin_xtensa_ee_vldbc_32_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vldbc_32_xp: ClangBuiltin<"__builtin_xtensa_ee_vldbc_32_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vldbc_8: ClangBuiltin<"__builtin_xtensa_ee_vldbc_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vldbc_8_ip: ClangBuiltin<"__builtin_xtensa_ee_vldbc_8_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vldbc_8_xp: ClangBuiltin<"__builtin_xtensa_ee_vldbc_8_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vldhbc_16_incp: ClangBuiltin<"__builtin_xtensa_ee_vldhbc_16_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vld_128_ip: ClangBuiltin<"__builtin_xtensa_ee_vld_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vld_128_xp: ClangBuiltin<"__builtin_xtensa_ee_vld_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vld_h_64_ip: ClangBuiltin<"__builtin_xtensa_ee_vld_h_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vld_h_64_xp: ClangBuiltin<"__builtin_xtensa_ee_vld_h_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vld_l_64_ip: ClangBuiltin<"__builtin_xtensa_ee_vld_l_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vld_l_64_xp: ClangBuiltin<"__builtin_xtensa_ee_vld_l_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vmax_s16: ClangBuiltin<"__builtin_xtensa_ee_vmax_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmax_s16_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vmax_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmax_s16_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vmax_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmax_s32: ClangBuiltin<"__builtin_xtensa_ee_vmax_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmax_s32_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vmax_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmax_s32_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vmax_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmax_s8: ClangBuiltin<"__builtin_xtensa_ee_vmax_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmax_s8_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vmax_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmax_s8_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vmax_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmin_s16: ClangBuiltin<"__builtin_xtensa_ee_vmin_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmin_s16_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vmin_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmin_s16_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vmin_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmin_s32: ClangBuiltin<"__builtin_xtensa_ee_vmin_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmin_s32_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vmin_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmin_s32_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vmin_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmin_s8: ClangBuiltin<"__builtin_xtensa_ee_vmin_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmin_s8_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vmin_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmin_s8_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vmin_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_accx: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_accx">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_accx_ld_ip: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_accx_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_accx_ld_ip_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_accx_ld_ip_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_accx_ld_xp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_accx_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_accx_ld_xp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_accx_ld_xp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_qacc: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_qacc_ldbc_incp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_qacc_ldbc_incp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_qacc_ldbc_incp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_qacc_ld_ip: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_qacc_ld_ip_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_qacc_ld_ip_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_qacc_ld_xp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s16_qacc_ld_xp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s16_qacc_ld_xp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_accx: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_accx">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_accx_ld_ip: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_accx_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_accx_ld_ip_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_accx_ld_ip_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_accx_ld_xp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_accx_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_accx_ld_xp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_accx_ld_xp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_qacc: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_qacc_ldbc_incp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_qacc_ldbc_incp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_qacc_ldbc_incp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_qacc_ld_ip: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_qacc_ld_ip_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_qacc_ld_ip_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_qacc_ld_xp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_s8_qacc_ld_xp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_s8_qacc_ld_xp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_accx: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_accx">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_accx_ld_ip: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_accx_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_accx_ld_ip_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_accx_ld_ip_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_accx_ld_xp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_accx_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_accx_ld_xp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_accx_ld_xp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_qacc: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_qacc_ldbc_incp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_qacc_ldbc_incp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_qacc_ldbc_incp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_qacc_ld_ip: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_qacc_ld_ip_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_qacc_ld_ip_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_qacc_ld_xp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u16_qacc_ld_xp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u16_qacc_ld_xp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_accx: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_accx">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_accx_ld_ip: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_accx_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_accx_ld_ip_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_accx_ld_ip_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_accx_ld_xp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_accx_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_accx_ld_xp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_accx_ld_xp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_qacc: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_qacc_ldbc_incp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_qacc_ldbc_incp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_qacc_ldbc_incp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_qacc_ld_ip: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_qacc_ld_ip_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_qacc_ld_ip_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_qacc_ld_xp: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmulas_u8_qacc_ld_xp_qup: ClangBuiltin<"__builtin_xtensa_ee_vmulas_u8_qacc_ld_xp_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_s16: ClangBuiltin<"__builtin_xtensa_ee_vmul_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_s16_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vmul_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_s16_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vmul_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_s8: ClangBuiltin<"__builtin_xtensa_ee_vmul_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_s8_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vmul_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_s8_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vmul_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_u16: ClangBuiltin<"__builtin_xtensa_ee_vmul_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_u16_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vmul_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_u16_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vmul_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_u8: ClangBuiltin<"__builtin_xtensa_ee_vmul_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_u8_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vmul_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vmul_u8_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vmul_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vprelu_s16: ClangBuiltin<"__builtin_xtensa_ee_vprelu_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vprelu_s8: ClangBuiltin<"__builtin_xtensa_ee_vprelu_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vrelu_s16: ClangBuiltin<"__builtin_xtensa_ee_vrelu_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vrelu_s8: ClangBuiltin<"__builtin_xtensa_ee_vrelu_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vsl_32: ClangBuiltin<"__builtin_xtensa_ee_vsl_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsmulas_s16_qacc: ClangBuiltin<"__builtin_xtensa_ee_vsmulas_s16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsmulas_s16_qacc_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vsmulas_s16_qacc_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsmulas_s8_qacc: ClangBuiltin<"__builtin_xtensa_ee_vsmulas_s8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsmulas_s8_qacc_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vsmulas_s8_qacc_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsr_32: ClangBuiltin<"__builtin_xtensa_ee_vsr_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vst_128_ip: ClangBuiltin<"__builtin_xtensa_ee_vst_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vst_128_xp: ClangBuiltin<"__builtin_xtensa_ee_vst_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vst_h_64_ip: ClangBuiltin<"__builtin_xtensa_ee_vst_h_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vst_h_64_xp: ClangBuiltin<"__builtin_xtensa_ee_vst_h_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vst_l_64_ip: ClangBuiltin<"__builtin_xtensa_ee_vst_l_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vst_l_64_xp: ClangBuiltin<"__builtin_xtensa_ee_vst_l_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_vsubs_s16: ClangBuiltin<"__builtin_xtensa_ee_vsubs_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsubs_s16_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vsubs_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsubs_s16_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vsubs_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsubs_s32: ClangBuiltin<"__builtin_xtensa_ee_vsubs_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsubs_s32_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vsubs_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsubs_s32_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vsubs_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsubs_s8: ClangBuiltin<"__builtin_xtensa_ee_vsubs_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsubs_s8_ld_incp: ClangBuiltin<"__builtin_xtensa_ee_vsubs_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vsubs_s8_st_incp: ClangBuiltin<"__builtin_xtensa_ee_vsubs_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vunzip_16: ClangBuiltin<"__builtin_xtensa_ee_vunzip_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vunzip_32: ClangBuiltin<"__builtin_xtensa_ee_vunzip_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vunzip_8: ClangBuiltin<"__builtin_xtensa_ee_vunzip_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vzip_16: ClangBuiltin<"__builtin_xtensa_ee_vzip_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vzip_32: ClangBuiltin<"__builtin_xtensa_ee_vzip_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_vzip_8: ClangBuiltin<"__builtin_xtensa_ee_vzip_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_xtensa_ee_xorq: ClangBuiltin<"__builtin_xtensa_ee_xorq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_xtensa_ee_zero_accx: ClangBuiltin<"__builtin_xtensa_ee_zero_accx">, + Intrinsic<[], [], []>; + +def int_xtensa_ee_zero_q: ClangBuiltin<"__builtin_xtensa_ee_zero_q">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ee_zero_qacc: ClangBuiltin<"__builtin_xtensa_ee_zero_qacc">, + Intrinsic<[], [], []>; + +def int_xtensa_rur_accx_0: ClangBuiltin<"__builtin_xtensa_rur_accx_0">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_accx_1: ClangBuiltin<"__builtin_xtensa_rur_accx_1">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_fft_bit_width: ClangBuiltin<"__builtin_xtensa_rur_fft_bit_width">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_gpio_out: ClangBuiltin<"__builtin_xtensa_rur_gpio_out">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_qacc_h_0: ClangBuiltin<"__builtin_xtensa_rur_qacc_h_0">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_qacc_h_1: ClangBuiltin<"__builtin_xtensa_rur_qacc_h_1">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_qacc_h_2: ClangBuiltin<"__builtin_xtensa_rur_qacc_h_2">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_qacc_h_3: ClangBuiltin<"__builtin_xtensa_rur_qacc_h_3">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_qacc_h_4: ClangBuiltin<"__builtin_xtensa_rur_qacc_h_4">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_qacc_l_0: ClangBuiltin<"__builtin_xtensa_rur_qacc_l_0">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_qacc_l_1: ClangBuiltin<"__builtin_xtensa_rur_qacc_l_1">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_qacc_l_2: ClangBuiltin<"__builtin_xtensa_rur_qacc_l_2">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_qacc_l_3: ClangBuiltin<"__builtin_xtensa_rur_qacc_l_3">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_qacc_l_4: ClangBuiltin<"__builtin_xtensa_rur_qacc_l_4">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_sar_byte: ClangBuiltin<"__builtin_xtensa_rur_sar_byte">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_ua_state_0: ClangBuiltin<"__builtin_xtensa_rur_ua_state_0">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_ua_state_1: ClangBuiltin<"__builtin_xtensa_rur_ua_state_1">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_ua_state_2: ClangBuiltin<"__builtin_xtensa_rur_ua_state_2">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_rur_ua_state_3: ClangBuiltin<"__builtin_xtensa_rur_ua_state_3">, + Intrinsic<[llvm_i32_ty], [], []>; + +def int_xtensa_wur_accx_0: ClangBuiltin<"__builtin_xtensa_wur_accx_0">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_accx_1: ClangBuiltin<"__builtin_xtensa_wur_accx_1">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_fft_bit_width: ClangBuiltin<"__builtin_xtensa_wur_fft_bit_width">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_gpio_out: ClangBuiltin<"__builtin_xtensa_wur_gpio_out">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_qacc_h_0: ClangBuiltin<"__builtin_xtensa_wur_qacc_h_0">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_qacc_h_1: ClangBuiltin<"__builtin_xtensa_wur_qacc_h_1">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_qacc_h_2: ClangBuiltin<"__builtin_xtensa_wur_qacc_h_2">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_qacc_h_3: ClangBuiltin<"__builtin_xtensa_wur_qacc_h_3">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_qacc_h_4: ClangBuiltin<"__builtin_xtensa_wur_qacc_h_4">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_qacc_l_0: ClangBuiltin<"__builtin_xtensa_wur_qacc_l_0">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_qacc_l_1: ClangBuiltin<"__builtin_xtensa_wur_qacc_l_1">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_qacc_l_2: ClangBuiltin<"__builtin_xtensa_wur_qacc_l_2">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_qacc_l_3: ClangBuiltin<"__builtin_xtensa_wur_qacc_l_3">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_qacc_l_4: ClangBuiltin<"__builtin_xtensa_wur_qacc_l_4">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_sar_byte: ClangBuiltin<"__builtin_xtensa_wur_sar_byte">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ua_state_0: ClangBuiltin<"__builtin_xtensa_wur_ua_state_0">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ua_state_1: ClangBuiltin<"__builtin_xtensa_wur_ua_state_1">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ua_state_2: ClangBuiltin<"__builtin_xtensa_wur_ua_state_2">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ua_state_3: ClangBuiltin<"__builtin_xtensa_wur_ua_state_3">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_mv_qr: ClangBuiltin<"__builtin_xtensa_mv_qr">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 0da7430b0cd22..7e3f8d16678a0 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -373,8 +373,48 @@ struct XtensaOperand : public MCParsedAsmOperand { bool isseimm7_22() const { return isImm(7, 22); } + bool isSelect_2() const { return isImm(0, 1); } + + bool isSelect_4() const { return isImm(0, 3); } + + bool isSelect_8() const { return isImm(0, 7); } + + bool isSelect_16() const { return isImm(0, 16); } + bool isSelect_256() const { return isImm(0, 255); } + bool isOffset_16_16() const { + return isImm(-128, 112) && + ((cast(getImm())->getValue() & 0xf) == 0); + } + + bool isOffset_256_8() const { + return isImm(-1024, 1016) && + ((cast(getImm())->getValue() & 0x7) == 0); + } + + bool isOffset_256_16() const { + return isImm(-2048, 2032) && + ((cast(getImm())->getValue() & 0xf) == 0); + } + + bool isOffset_256_4() const { + return isImm(-512, 508) && + ((cast(getImm())->getValue() & 0x3) == 0); + } + + bool isOffset_128_2() const { + return isImm(0, 254) && + ((cast(getImm())->getValue() & 0x1) == 0); + } + + bool isOffset_128_1() const { return isImm(0, 127); } + + bool isOffset_64_16() const { + return isImm(-512, 496) && + ((cast(getImm())->getValue() & 0xf) == 0); + } + /// getStartLoc - Gets location of the first token of this operand SMLoc getStartLoc() const override { return StartLoc; } /// getEndLoc - Gets location of the last token of this operand @@ -692,9 +732,48 @@ bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_Invalidseimm7_22: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [7, 22]"); + case Match_InvalidSelect_2: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 1]"); + case Match_InvalidSelect_4: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 3]"); + case Match_InvalidSelect_8: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 7]"); + case Match_InvalidSelect_16: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 15]"); case Match_InvalidSelect_256: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [0, 255]"); + case Match_InvalidOffset_16_16: + return Error( + RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-128, 112], first 4 bits should be zero"); + case Match_InvalidOffset_256_8: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-1024, 1016], first 3 bits " + "should be zero"); + case Match_InvalidOffset_256_16: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-2048, 2032], first 4 bits " + "should be zero"); + case Match_InvalidOffset_256_4: + return Error( + RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-512, 508], first 2 bits should be zero"); + case Match_InvalidOffset_128_2: + return Error( + RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 254], first bit should be zero"); + case Match_InvalidOffset_128_1: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 127]"); + case Match_InvalidOffset_64_16: + return Error( + RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [-512, 496], first 4 bits should be zero"); } report_fatal_error("Unknown match type detected!"); @@ -836,9 +915,9 @@ ParseStatus XtensaAsmParser::parseRegister(OperandVector &Operands, return ParseStatus::NoMatch; } - if (!checkRegister(Mnemonic.lower(), RegName, RegNo)) { + if (!checkRegister(Mnemonic.lower(), RegName, RegNo)) return ParseStatus::NoMatch; - } + if (HadParens) Operands.push_back(XtensaOperand::createToken("(", FirstS)); diff --git a/llvm/lib/Target/Xtensa/CMakeLists.txt b/llvm/lib/Target/Xtensa/CMakeLists.txt index 1cdb4fbac450b..985e18b1cc4c1 100644 --- a/llvm/lib/Target/Xtensa/CMakeLists.txt +++ b/llvm/lib/Target/Xtensa/CMakeLists.txt @@ -25,6 +25,7 @@ add_llvm_target(XtensaCodeGen XtensaInstrInfo.cpp XtensaISelDAGToDAG.cpp XtensaISelLowering.cpp + XtensaS3ISelLowering.cpp XtensaMachineFunctionInfo.cpp XtensaRegisterInfo.cpp XtensaSizeReductionPass.cpp diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index e2974da88a441..0c78d30ea1ab8 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -75,6 +75,21 @@ static DecodeStatus DecodeARRegisterClass(MCInst &Inst, uint64_t RegNo, return MCDisassembler::Success; } +static const unsigned QRDecoderTable[] = { + Xtensa::Q0, Xtensa::Q1, Xtensa::Q2, Xtensa::Q3, Xtensa::Q4, Xtensa::Q5, + Xtensa::Q6, Xtensa::Q7}; + +static DecodeStatus DecodeQRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= std::size(QRDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = QRDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + static const unsigned FPRDecoderTable[] = { Xtensa::F0, Xtensa::F1, Xtensa::F2, Xtensa::F3, Xtensa::F4, Xtensa::F5, Xtensa::F6, Xtensa::F7, Xtensa::F8, Xtensa::F9, Xtensa::F10, Xtensa::F11, @@ -569,6 +584,38 @@ static DecodeStatus decodeSeimm7_22Operand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeSelect_2Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeSelect_4Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeSelect_8Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeSelect_16Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + static DecodeStatus decodeSelect_256Operand(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { @@ -577,6 +624,80 @@ static DecodeStatus decodeSelect_256Operand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeOffset_16_16Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<8>(Imm) && "Invalid immediate"); + if ((Imm & 0xf) != 0) + Inst.addOperand(MCOperand::createImm(Imm << 4)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_256_8Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<16>(Imm) && "Invalid immediate"); + if ((Imm & 0x7) != 0) + Inst.addOperand(MCOperand::createImm(Imm << 3)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_256_16Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<16>(Imm) && "Invalid immediate"); + if ((Imm & 0xf) != 0) + Inst.addOperand(MCOperand::createImm(Imm << 4)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_256_4Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<16>(Imm) && "Invalid immediate"); + if ((Imm & 0x2) != 0) + Inst.addOperand(MCOperand::createImm(Imm << 2)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_128_2Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + if ((Imm & 0x1) != 0) + Inst.addOperand(MCOperand::createImm(Imm << 1)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_128_1Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_64_16Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<16>(Imm) && "Invalid immediate"); + if ((Imm & 0xf) != 0) + Inst.addOperand(MCOperand::createImm(Imm << 4)); + else + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + static int64_t TableB4const[16] = {-1, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256}; static DecodeStatus decodeB4constOperand(MCInst &Inst, uint64_t Imm, diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index 85db697c724d6..5919ef5401431 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -449,6 +449,50 @@ void XtensaInstPrinter::printSeimm7_22_AsmOperand(const MCInst *MI, int OpNum, printOperand(MI, OpNum, O); } +void XtensaInstPrinter::printSelect_2_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 1) && + "Invalid argument, value must be in range [0,1]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printSelect_4_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 3) && + "Invalid argument, value must be in range [0,3]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printSelect_8_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 7) && + "Invalid argument, value must be in range [0,7]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printSelect_16_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 15) && + "Invalid argument, value must be in range [0,15]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + void XtensaInstPrinter::printSelect_256_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O) { if (MI->getOperand(OpNum).isImm()) { @@ -459,3 +503,88 @@ void XtensaInstPrinter::printSelect_256_AsmOperand(const MCInst *MI, int OpNum, } else printOperand(MI, OpNum, O); } + +void XtensaInstPrinter::printOffset_16_16_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -128 && Value <= 112 && (Value & 0xf) == 0) && + "Invalid argument, value must be in range [-128,112], first 4 bits " + "should be zero"); + O << Value; + } else{ + printOperand(MI, OpNum, O); + } +} + +void XtensaInstPrinter::printOffset_256_8_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -1024 && Value <= 1016 && (Value & 0x7) == 0) && + "Invalid argument, value must be in range [-1024,1016], first 3 " + "bits should be zero"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printOffset_256_16_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -2048 && Value <= 2032 && (Value & 0xf) == 0) && + "Invalid argument, value must be in range [-2048,2032], first 4 " + "bits should be zero"); + O << Value; + } else{ + printOperand(MI, OpNum, O); + } +} + +void XtensaInstPrinter::printOffset_256_4_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -512 && Value <= 508 && (Value & 0x3) == 0) && + "Invalid argument, value must be in range [-512,508], first 2 bits " + "should be zero"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printOffset_128_2_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 254 && (Value & 0x1) == 0) && + "Invalid argument, value must be in range [0,254], first bit should " + "be zero"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printOffset_128_1_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 127) && + "Invalid argument, value must be in range [0,127]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + +void XtensaInstPrinter::printOffset_64_16_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -512 && Value <= 496 && (Value & 0xf) == 0) && + "Invalid argument, value must be in range [-512,496], first 4 bits " + "should be zero"); + O << Value; + } else + printOperand(MI, OpNum, O); +} diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index 3e8c752bc4426..a8416279d6ae0 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -70,7 +70,18 @@ class XtensaInstPrinter : public MCInstPrinter { void printB4const_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printB4constu_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printSeimm7_22_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printSelect_2_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printSelect_4_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printSelect_8_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printSelect_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printSelect_256_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset_16_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset_256_8_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset_256_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset_256_4_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset_128_2_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset_128_1_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printOffset_64_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); }; } // end namespace llvm diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 73039c2a44480..1e960cdce26fd 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -143,10 +143,53 @@ class XtensaMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; - uint32_t getSelect_256OpValue(const MCInst &MI, unsigned OpNo, + uint8_t getSelect_2OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + uint8_t getSelect_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_256OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int8_t getOffset_16_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int16_t getOffset_256_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int16_t getOffset_256_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int16_t getOffset_256_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getOffset_128_2OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getOffset_128_1OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int16_t getOffset_64_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; }; } // namespace @@ -598,16 +641,154 @@ XtensaMCCodeEmitter::getSeimm7_22OpValue(const MCInst &MI, unsigned OpNo, return res; } -uint32_t +uint8_t +XtensaMCCodeEmitter::getSelect_2OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 1)) && "Unexpected operand value!"); + + return Res; +} + +uint8_t +XtensaMCCodeEmitter::getSelect_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 3)) && "Unexpected operand value!"); + + return Res; +} + +uint8_t +XtensaMCCodeEmitter::getSelect_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 7)) && "Unexpected operand value!"); + + return Res; +} + +uint8_t +XtensaMCCodeEmitter::getSelect_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 15)) && "Unexpected operand value!"); + + return Res; +} + +uint8_t XtensaMCCodeEmitter::getSelect_256OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const { const MCOperand &MO = MI.getOperand(OpNo); - uint32_t Res = static_cast(MO.getImm()); + uint8_t Res = static_cast(MO.getImm()); assert(((Res >= 0) && (Res <= 255)) && "Unexpected operand value!"); return Res; } +int8_t +XtensaMCCodeEmitter::getOffset_16_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int8_t Res = static_cast(MO.getImm()); + + assert(((Res >= -128) && (Res <= 112) && ((Res & 0xf) == 0)) && + "Unexpected operand value!"); + + return Res; +} + +int16_t +XtensaMCCodeEmitter::getOffset_256_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int16_t Res = static_cast(MO.getImm()); + + assert(((Res >= -1024) && (Res <= 1016) && ((Res & 0x7) == 0)) && + "Unexpected operand value!"); + + return Res; +} + +int16_t +XtensaMCCodeEmitter::getOffset_256_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int16_t Res = static_cast(MO.getImm()); + + assert(((Res >= -2048) && (Res <= 2032) && ((Res & 0xf) == 0)) && + "Unexpected operand value!"); + + return Res; +} + +int16_t +XtensaMCCodeEmitter::getOffset_256_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int16_t Res = static_cast(MO.getImm()); + + assert(((Res >= -512) && (Res <= 508) && ((Res & 0x3) == 0)) && + "Unexpected operand value!"); + + return Res; +} + +uint8_t +XtensaMCCodeEmitter::getOffset_128_2OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 254) && ((Res & 0x1) == 0)) && + "Unexpected operand value!"); + + return Res; +} + +uint8_t +XtensaMCCodeEmitter::getOffset_128_1OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 127)) && "Unexpected operand value!"); + + return Res; +} + +int16_t +XtensaMCCodeEmitter::getOffset_64_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int16_t Res = static_cast(MO.getImm()); + + assert(((Res >= -512) && (Res <= 496) && ((Res & 0xf) == 0)) && + "Unexpected operand value!"); + + return Res; +} + #include "XtensaGenMCCodeEmitter.inc" diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 2cb5ccb25f2dd..cfa090eeea273 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -339,8 +339,8 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, /// Return the register type for a given MVT MVT XtensaTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, - CallingConv::ID CC, - EVT VT) const { + CallingConv::ID CC, + EVT VT) const { if (VT.isFloatingPoint()) return MVT::i32; @@ -632,7 +632,7 @@ static SDValue PerformHWLoopCombine(SDNode *N, SelectionDAG &DAG, Size, }; SDValue LoopDec = DAG.getNode(XtensaISD::LOOPDEC, dl, - DAG.getVTList(MVT::i32, MVT::Other), Args); + DAG.getVTList(MVT::i32, MVT::Other), Args); // We now need to make the intrinsic dead (it cannot be instruction // selected). @@ -3229,6 +3229,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( return MBB; } default: - llvm_unreachable("Unexpected instr type to insert"); + return EmitDSPInstrWithCustomInserter(MI, MBB, TII, MF, MRI, DL); + // llvm_unreachable("Unexpected instr type to insert"); } } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 3cc5528f10145..84f6c1245252d 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -26,7 +26,7 @@ enum { BR_T, BR_F, - //Conditional branch with FP operands + // Conditional branch with FP operands BR_CC_FP, BR_JT, @@ -194,6 +194,10 @@ class XtensaTargetLowering : public TargetLowering { EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override; + MachineBasicBlock *EmitDSPInstrWithCustomInserter( + MachineInstr &MI, MachineBasicBlock *MBB, const TargetInstrInfo &TII, + MachineFunction *MF, MachineRegisterInfo &MRI, DebugLoc DL) const; + private: const XtensaSubtarget &Subtarget; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrFormats.td b/llvm/lib/Target/Xtensa/XtensaInstrFormats.td index e7c51da1e14fa..beb15c3c5647b 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrFormats.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrFormats.td @@ -210,6 +210,18 @@ class RI6_Inst op0, bits<1> i, bits<1> z, dag outs, dag ins, let Inst{3-0} = op0; } +class EE_Inst24 pattern, + InstrItinClass itin = NoItinerary> + : XtensaInst24 { +} + +class EE_Inst32 pattern, + InstrItinClass itin = NoItinerary> + : XtensaInst<4, outs, ins, asmstr, pattern, itin> { + field bits<32> Inst; + field bits<32> SoftFail = 0; +} + // Pseudo instructions class Pseudo pattern> : XtensaInst<2, outs, ins, asmstr, pattern> { diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index bafbe4e68ac7f..a66ba66431d76 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1683,8 +1683,10 @@ let Predicates = [HasESP32S3Ops] in { let s = 0x8; } } +include "XtensaS3DSPInstrInfo.td" //===----------------------------------------------------------------------===// // DSP Instructions //===----------------------------------------------------------------------===// include "XtensaDSPInstrInfo.td" + diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index 620aeee000518..7810d0a36a354 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -182,6 +182,89 @@ def select_256: Immediate= 0 && Imm <= 255; }], "Select_256 let DecoderMethod = "decodeSelect_256Operand"; } +// select_2 predicate - Immediate in the range [0,1] +def Select_2_AsmOperand: ImmAsmOperand<"Select_2">; +def select_2: Immediate= 0 && Imm <= 1; }], "Select_2_AsmOperand"> { + let EncoderMethod = "getSelect_2OpValue"; + let DecoderMethod = "decodeSelect_2Operand"; +} + +// select_4 predicate - Immediate in the range [0,3] +def Select_4_AsmOperand: ImmAsmOperand<"Select_4">; +def select_4: Immediate= 0 && Imm <= 3; }], "Select_4_AsmOperand"> { + let EncoderMethod = "getSelect_4OpValue"; + let DecoderMethod = "decodeSelect_4Operand"; +} + +// select_8 predicate - Immediate in the range [0,7] +def Select_8_AsmOperand: ImmAsmOperand<"Select_8">; +def select_8: Immediate= 0 && Imm <= 7; }], "Select_8_AsmOperand"> { + let EncoderMethod = "getSelect_8OpValue"; + let DecoderMethod = "decodeSelect_8Operand"; +} + +// select_16 predicate - Immediate in the range [0,15] +def Select_16_AsmOperand: ImmAsmOperand<"Select_16">; +def select_16: Immediate= 0 && Imm <= 15; }], "Select_16_AsmOperand"> { + let EncoderMethod = "getSelect_16OpValue"; + let DecoderMethod = "decodeSelect_16Operand"; +} +/// --------------------------- /// +// offset_16_16 predicate - 4-bit signed immediate in the range [-128,112] with an interval +// of 16. +def Offset_16_16_AsmOperand: ImmAsmOperand<"Offset_16_16">; +def offset_16_16: Immediate= -128 && Imm <= 112) && (Imm & 0xf == 0); }], "Offset_16_16_AsmOperand"> { + let EncoderMethod = "getOffset_16_16OpValue"; + let DecoderMethod = "decodeOffset_16_16Operand"; +} + +// offset_256_8 predicate - 4-bit signed immediate in the range [-1024,1016] with an interval +// of 8. +def Offset_256_8_AsmOperand: ImmAsmOperand<"Offset_256_8">; +def offset_256_8: Immediate= -1024 && Imm <= 1016) && (Imm & 0x7 == 0); }], "Offset_256_8_AsmOperand"> { + let EncoderMethod = "getOffset_256_8OpValue"; + let DecoderMethod = "decodeOffset_256_8Operand"; +} + +// offset_256_16 predicate - 8-bit signed immediate in the range [-2048,2032] with an interval +// of 16. +def Offset_256_16_AsmOperand: ImmAsmOperand<"Offset_256_16">; +def offset_256_16: Immediate= -2048 && Imm <= 2032) && (Imm & 0xf == 0); }], "Offset_256_16_AsmOperand"> { + let EncoderMethod = "getOffset_256_16OpValue"; + let DecoderMethod = "decodeOffset_256_16Operand"; +} + +// offset_256_4 predicate - 4-bit signed immediate in the range [-512,508] with an interval +// of 4. +def Offset_256_4_AsmOperand: ImmAsmOperand<"Offset_256_4">; +def offset_256_4: Immediate= -512 && Imm <= 508) && (Imm & 0x3 == 0); }], "Offset_256_4_AsmOperand"> { + let EncoderMethod = "getOffset_256_4OpValue"; + let DecoderMethod = "decodeOffset_256_4Operand"; +} + +// offset_128_2 predicate - 4-bit signed immediate in the range [0,254] with an interval +// of 2. +def Offset_128_2_AsmOperand: ImmAsmOperand<"Offset_128_2">; +def offset_128_2: Immediate= 0 && Imm <= 254) && (Imm & 0x1 == 0); }], "Offset_128_2_AsmOperand"> { + let EncoderMethod = "getOffset_128_2OpValue"; + let DecoderMethod = "decodeOffset_128_2Operand"; +} + +// offset_128_1 predicate - 4-bit signed immediate in the range [0,127] +def Offset_128_1_AsmOperand: ImmAsmOperand<"Offset_128_1">; +def offset_128_1: Immediate= 0 && Imm <= 127; }], "Offset_128_1_AsmOperand"> { + let EncoderMethod = "getOffset_128_1OpValue"; + let DecoderMethod = "decodeOffset_128_1Operand"; +} + +// offset_64_16 predicate - 4-bit signed immediate in the range [-512,496] with an interval +// of 16. +def Offset_64_16_AsmOperand: ImmAsmOperand<"Offset_64_16">; +def offset_64_16: Immediate= -512 && Imm <= 496) && (Imm & 0xf == 0); }], "Offset_64_16_AsmOperand"> { + let EncoderMethod = "getOffset_64_16OpValue"; + let DecoderMethod = "decodeOffset_64_16Operand"; +} + //===----------------------------------------------------------------------===// // Memory address operands //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index 73b4a3cd00b58..dc2d5abc48758 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -64,6 +64,25 @@ def A15 : ARReg<15, "a15">, DwarfRegNum<[15]>; def AR : RegisterClass<"Xtensa", [i32], 32, (add A8, A9, A10, A11, A12, A13, A14, A15, A7, A6, A5, A4, A3, A2, A0, SP)>; + + +// 128-bit general-purpose registers +class QRReg num, string n, listalt = []> : XtensaReg{ + let HWEncoding{15-0} = num; + let AltNames = alt; +} + +def Q0 : QRReg<0, "q0">, DwarfRegNum<[0]>; +def Q1 : QRReg<1, "q1">, DwarfRegNum<[1]>; +def Q2 : QRReg<2, "q2">, DwarfRegNum<[2]>; +def Q3 : QRReg<3, "q3">, DwarfRegNum<[3]>; +def Q4 : QRReg<4, "q4">, DwarfRegNum<[4]>; +def Q5 : QRReg<5, "q5">, DwarfRegNum<[5]>; +def Q6 : QRReg<6, "q6">, DwarfRegNum<[6]>; +def Q7 : QRReg<7, "q7">, DwarfRegNum<[7]>; + +def QR : RegisterClass<"Xtensa", [v16i8, v4i32], 128, (sequence "Q%u", 0, 7)>; + //===----------------------------------------------------------------------===// // Special-purpose registers //===----------------------------------------------------------------------===// @@ -237,8 +256,14 @@ def F64R_LO : URReg<234, "f64r_lo", ["F64R_LO"]>; def F64R_HI : URReg<235, "f64r_hi", ["F64R_HI"]>; def F64S : URReg<236, "f64s", ["F64S"]>; +def ACCX : URReg<237, "accx", ["accx_0", "accx_1"]>; +def QACC : URReg<238, "qacc", ["qacc_h_0", "qacc_h_1", "qacc_h_2", "qacc_h_3", "qacc_h_4", "qacc_l_0", "qacc_l_1", "qacc_l_2", "qacc_l_3", "qacc_l_4"]>; +def FFT_BIT_WIDTH : URReg<239, "fft_bit_width", ["fft_bit_width"]>; +def SAR_BYTE : URReg<240, "sar_byte", ["sar_byte"]>; +def UA_STATE : URReg<241, "ua_state", ["ua_state_0", "ua_state_1", "ua_state_2", "ua_state_3"]>; + def UR : RegisterClass<"Xtensa", [i32], 32, (add GPIO_OUT, EXPSTATE, THREADPTR, FCR, - FSR, F64R_LO, F64R_HI, F64S)>; + FSR, F64R_LO, F64R_HI, F64S, ACCX, QACC, FFT_BIT_WIDTH, SAR_BYTE, UA_STATE)>; //===----------------------------------------------------------------------===// // Floating-Point registers diff --git a/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td new file mode 100644 index 0000000000000..b3efb331ce45f --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td @@ -0,0 +1,5659 @@ +//===- XtensaS3DSPInstrInfo.td - Xtensa Target Description -*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the Xtensa S3 DSP instructions in TableGen format. +// +// These definitions are generated +// This file is generated +// AI_S6_V2.h +// +//===----------------------------------------------------------------------===// + +// This file is generated + + +def EE_ANDQ: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.andq\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qa{0}; + let Inst{14-12} = 0x3; + let Inst{11-10} = qy{2-1}; + let Inst{9-8} = 0x0; + let Inst{7-6} = qx{2-1}; + let Inst{5} = qy{0}; + let Inst{4} = qx{0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_ANDQ_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_andq_p, $qa, $qx, $qy", + [(int_xtensa_ee_andq timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_BITREV: EE_Inst24<(outs QR:$qa, AR:$axr), (ins AR:$ax), + "ee.bitrev\t $qa, $ax", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<4> ax; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qa{0}; + let Inst{14-8} = 0x7b; + let Inst{7-4} = ax{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_BITREV_P : Pseudo<(outs), (ins imm8:$qa, AR:$ax), + "!xtensa_ee_bitrev_p, $qa, $ax", + [(int_xtensa_ee_bitrev timm:$qa, AR:$ax)]>; + +def EE_CMUL_S16: EE_Inst24<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), + "ee.cmul.s16\t $qz, $qx, $qy, $sel4", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qz; + bits<3> qx; + bits<3> qy; + bits<2> sel4; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qz{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qz{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-6} = 0x0; + let Inst{5-4} = sel4{1-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_CMUL_S16_P : Pseudo<(outs), (ins imm8:$qz, imm8:$qx, imm8:$qy, select_4:$sel4), + "!xtensa_ee_cmul_s16_p, $qz, $qx, $qy, $sel4", + [(int_xtensa_ee_cmul_s16 timm:$qz, timm:$qx, timm:$qy, timm:$sel4)]>; + +def EE_CMUL_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, QR:$qx, QR:$qy, select_4:$sel4), + "ee.cmul.s16.ld.incp\t $qu, $as, $qz, $qx, $qy, $sel4", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qz; + bits<3> qx; + bits<3> qy; + bits<2> sel4; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-6} = 0x3; + let Inst{5-4} = sel4{1-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_CMUL_S16_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qz, imm8:$qx, imm8:$qy, select_4:$sel4), + "!xtensa_ee_cmul_s16_ld_incp_p, $qu, $as, $qz, $qx, $qy, $sel4", + [(int_xtensa_ee_cmul_s16_ld_incp timm:$qu, AR:$as, timm:$qz, timm:$qx, timm:$qy, timm:$sel4)]>; + +def EE_CMUL_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy, select_4:$sel4), + "ee.cmul.s16.st.incp\t $qv, $as, $qz, $qx, $qy, $sel4", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qz; + bits<3> qx; + bits<3> qy; + bits<2> sel4; + + let mayStore = 1; + + let Inst{28-20} = 0x1c8; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-6} = 0x0; + let Inst{5-4} = sel4{1-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_CMUL_S16_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qz, imm8:$qx, imm8:$qy, select_4:$sel4), + "!xtensa_ee_cmul_s16_st_incp_p, $qv, $as, $qz, $qx, $qy, $sel4", + [(int_xtensa_ee_cmul_s16_st_incp timm:$qv, AR:$as, timm:$qz, timm:$qx, timm:$qy, timm:$sel4)]>; + +def EE_FFT_AMS_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz, QR:$qz1), (ins AR:$as, QR:$qx, QR:$qy, QR:$qm, select_2:$sel2), + "ee.fft.ams.s16.ld.incp\t $qu, $as, $qz, $qz1, $qx, $qy, $qm, $sel2", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qz; + bits<3> qz1; + bits<3> qx; + bits<3> qy; + bits<3> qm; + bits<1> sel2; + + let mayLoad = 1; + + let Inst{28-23} = 0x34; + let Inst{22} = sel2{0}; + let Inst{21-20} = qz1{2-1}; + let Inst{19-17} = qm{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qz{2-0}; + let Inst{10-8} = qy{2-0}; + let Inst{7} = qz1{0}; + let Inst{6-4} = qu{2-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_FFT_AMS_S16_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qz, imm8:$qz1, imm8:$qx, imm8:$qy, imm8:$qm, select_2:$sel2), + "!xtensa_ee_fft_ams_s16_ld_incp_p, $qu, $as, $qz, $qz1, $qx, $qy, $qm, $sel2", + [(int_xtensa_ee_fft_ams_s16_ld_incp timm:$qu, AR:$as, timm:$qz, timm:$qz1, timm:$qx, timm:$qy, timm:$qm, timm:$sel2)]>; + +def EE_FFT_AMS_S16_LD_INCP_UAUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz, QR:$qz1), (ins AR:$as, QR:$qx, QR:$qy, QR:$qm, select_2:$sel2), + "ee.fft.ams.s16.ld.incp.uaup\t $qu, $as, $qz, $qz1, $qx, $qy, $qm, $sel2", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qz; + bits<3> qz1; + bits<3> qx; + bits<3> qy; + bits<3> qm; + bits<1> sel2; + + let mayLoad = 1; + + let Inst{28-23} = 0x35; + let Inst{22} = sel2{0}; + let Inst{21-20} = qz1{2-1}; + let Inst{19-17} = qm{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qz{2-0}; + let Inst{10-8} = qy{2-0}; + let Inst{7} = qz1{0}; + let Inst{6-4} = qu{2-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_FFT_AMS_S16_LD_INCP_UAUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qz, imm8:$qz1, imm8:$qx, imm8:$qy, imm8:$qm, select_2:$sel2), + "!xtensa_ee_fft_ams_s16_ld_incp_uaup_p, $qu, $as, $qz, $qz1, $qx, $qy, $qm, $sel2", + [(int_xtensa_ee_fft_ams_s16_ld_incp_uaup timm:$qu, AR:$as, timm:$qz, timm:$qz1, timm:$qx, timm:$qy, timm:$qm, timm:$sel2)]>; + +def EE_FFT_AMS_S16_LD_R32_DECP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz, QR:$qz1), (ins AR:$as, QR:$qx, QR:$qy, QR:$qm, select_2:$sel2), + "ee.fft.ams.s16.ld.r32.decp\t $qu, $as, $qz, $qz1, $qx, $qy, $qm, $sel2", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qz; + bits<3> qz1; + bits<3> qx; + bits<3> qy; + bits<3> qm; + bits<1> sel2; + + let mayLoad = 1; + + let Inst{28-23} = 0x36; + let Inst{22} = sel2{0}; + let Inst{21-20} = qz1{2-1}; + let Inst{19-17} = qm{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qz{2-0}; + let Inst{10-8} = qy{2-0}; + let Inst{7} = qz1{0}; + let Inst{6-4} = qu{2-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_FFT_AMS_S16_LD_R32_DECP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qz, imm8:$qz1, imm8:$qx, imm8:$qy, imm8:$qm, select_2:$sel2), + "!xtensa_ee_fft_ams_s16_ld_r32_decp_p, $qu, $as, $qz, $qz1, $qx, $qy, $qm, $sel2", + [(int_xtensa_ee_fft_ams_s16_ld_r32_decp timm:$qu, AR:$as, timm:$qz, timm:$qz1, timm:$qx, timm:$qy, timm:$qm, timm:$sel2)]>; + +def EE_FFT_AMS_S16_ST_INCP: EE_Inst32<(outs QR:$qz1, AR:$as0r, AR:$asr), (ins QR:$qv, AR:$as0, AR:$as, QR:$qx, QR:$qy, QR:$qm, select_2:$sel2), + "ee.fft.ams.s16.st.incp\t $qv, $qz1, $as0, $as, $qx, $qy, $qm, $sel2", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<3> qz1; + bits<4> as0; + bits<4> as; + bits<3> qx; + bits<3> qy; + bits<3> qm; + bits<1> sel2; + + let mayStore = 1; + + let Inst{28-24} = 0x14; + let Inst{23} = sel2{0}; + let Inst{22-20} = qz1{2-0}; + let Inst{19-17} = qx{2-0}; + let Inst{16-14} = qy{2-0}; + let Inst{13-11} = qm{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = as0{3-0}; +} + +let usesCustomInserter = 1 in +def EE_FFT_AMS_S16_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, imm8:$qz1, AR:$as0, AR:$as, imm8:$qx, imm8:$qy, imm8:$qm, select_2:$sel2), + "!xtensa_ee_fft_ams_s16_st_incp_p, $qv, $qz1, $as0, $as, $qx, $qy, $qm, $sel2", + [(int_xtensa_ee_fft_ams_s16_st_incp timm:$qv, timm:$qz1, AR:$as0, AR:$as, timm:$qx, timm:$qy, timm:$qm, timm:$sel2)]>; + +def EE_FFT_CMUL_S16_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy, select_8:$sel8), + "ee.fft.cmul.s16.ld.xp\t $qu, $as, $ad, $qz, $qx, $qy, $sel8", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qz; + bits<3> qx; + bits<3> qy; + bits<3> sel8; + + let mayLoad = 1; + + let Inst{28-23} = 0x37; + let Inst{22-20} = sel8{2-0}; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qu{2-0}; + let Inst{10-8} = qy{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_FFT_CMUL_S16_LD_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qz, imm8:$qx, imm8:$qy, select_8:$sel8), + "!xtensa_ee_fft_cmul_s16_ld_xp_p, $qu, $as, $ad, $qz, $qx, $qy, $sel8", + [(int_xtensa_ee_fft_cmul_s16_ld_xp timm:$qu, AR:$as, AR:$ad, timm:$qz, timm:$qx, timm:$qy, timm:$sel8)]>; + +def EE_FFT_CMUL_S16_ST_XP: EE_Inst32<(outs AR:$asr), (ins QR:$qx, QR:$qy, QR:$qv, AR:$as, AR:$ad, select_8:$sel8, select_4:$upd4, select_4:$sar4), + "ee.fft.cmul.s16.st.xp\t $qx, $qy, $qv, $as, $ad, $sel8, $upd4, $sar4", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + bits<4> as; + bits<4> ad; + bits<3> sel8; + bits<2> upd4; + bits<2> sar4; + + let mayStore = 1; + + let Inst{28-24} = 0x15; + let Inst{23-22} = sar4{1-0}; + let Inst{21-20} = upd4{1-0}; + let Inst{19-17} = qv{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = sel8{2-0}; + let Inst{10-8} = qy{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_FFT_CMUL_S16_ST_XP_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv, AR:$as, AR:$ad, select_8:$sel8, select_4:$upd4, select_4:$sar4), + "!xtensa_ee_fft_cmul_s16_st_xp_p, $qx, $qy, $qv, $as, $ad, $sel8, $upd4, $sar4", + [(int_xtensa_ee_fft_cmul_s16_st_xp timm:$qx, timm:$qy, timm:$qv, AR:$as, AR:$ad, timm:$sel8, timm:$upd4, timm:$sar4)]>; + +def EE_FFT_R2BF_S16: EE_Inst24<(outs QR:$qa0, QR:$qa1), (ins QR:$qx, QR:$qy, select_2:$sel2), + "ee.fft.r2bf.s16\t $qa0, $qa1, $qx, $qy, $sel2", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa0; + bits<3> qa1; + bits<3> qx; + bits<3> qy; + bits<1> sel2; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qa0{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qa0{0}; + let Inst{14-12} = qa1{2-0}; + let Inst{11-10} = qy{2-1}; + let Inst{9} = 0x0; + let Inst{8} = sel2{0}; + let Inst{7-6} = qx{2-1}; + let Inst{5} = qy{0}; + let Inst{4} = qx{0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_FFT_R2BF_S16_P : Pseudo<(outs), (ins imm8:$qa0, imm8:$qa1, imm8:$qx, imm8:$qy, select_2:$sel2), + "!xtensa_ee_fft_r2bf_s16_p, $qa0, $qa1, $qx, $qy, $sel2", + [(int_xtensa_ee_fft_r2bf_s16 timm:$qa0, timm:$qa1, timm:$qx, timm:$qy, timm:$sel2)]>; + +def EE_FFT_R2BF_S16_ST_INCP: EE_Inst32<(outs QR:$qa0, AR:$asr), (ins QR:$qx, QR:$qy, AR:$as, select_4:$sar4), + "ee.fft.r2bf.s16.st.incp\t $qa0, $qx, $qy, $as, $sar4", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa0; + bits<3> qx; + bits<3> qy; + bits<4> as; + bits<2> sar4; + + let mayStore = 1; + + let Inst{28-20} = 0x1d1; + let Inst{19-17} = qa0{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13} = 0x0; + let Inst{12-11} = sar4{1-0}; + let Inst{10-8} = qy{2-0}; + let Inst{7-4} = 0x4; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_FFT_R2BF_S16_ST_INCP_P : Pseudo<(outs), (ins imm8:$qa0, imm8:$qx, imm8:$qy, AR:$as, select_4:$sar4), + "!xtensa_ee_fft_r2bf_s16_st_incp_p, $qa0, $qx, $qy, $as, $sar4", + [(int_xtensa_ee_fft_r2bf_s16_st_incp timm:$qa0, timm:$qx, timm:$qy, AR:$as, timm:$sar4)]>; + +def EE_FFT_VST_R32_DECP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, select_2:$sar2), + "ee.fft.vst.r32.decp\t $qv, $as, $sar2", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<1> sar2; + + let mayStore = 1; + + let Inst{23-22} = 0x3; + let Inst{21-20} = qv{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qv{0}; + let Inst{14-11} = 0x6; + let Inst{10} = sar2{0}; + let Inst{9-8} = 0x3; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_FFT_VST_R32_DECP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, select_2:$sar2), + "!xtensa_ee_fft_vst_r32_decp_p, $qv, $as, $sar2", + [(int_xtensa_ee_fft_vst_r32_decp timm:$qv, AR:$as, timm:$sar2)]>; + +def EE_LDF_128_IP: EE_Inst32<(outs FPR:$fu3, FPR:$fu2, FPR:$fu1, FPR:$fu0, AR:$asr), (ins AR:$as, offset_16_16:$imm16f), + "ee.ldf.128.ip\t $fu3, $fu2, $fu1, $fu0, $as, $imm16f", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> fu3; + bits<4> fu2; + bits<4> fu1; + bits<4> fu0; + bits<4> as; + bits<4> imm16f; + + let mayLoad = 1; + + let Inst{28-24} = 0x10; + let Inst{23-20} = fu3{3-0}; + let Inst{19-16} = fu2{3-0}; + let Inst{15-12} = fu1{3-0}; + let Inst{11-8} = fu0{3-0}; + let Inst{7-4} = imm16f{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_LDF_128_IP_P : Pseudo<(outs), (ins FPR:$fu3, FPR:$fu2, FPR:$fu1, FPR:$fu0, AR:$as, offset_16_16:$imm16f), + "!xtensa_ee_ldf_128_ip_p, $fu3, $fu2, $fu1, $fu0, $as, $imm16f", + [(int_xtensa_ee_ldf_128_ip FPR:$fu3, FPR:$fu2, FPR:$fu1, FPR:$fu0, AR:$as, timm:$imm16f)]>; + +def EE_LDF_128_XP: EE_Inst32<(outs FPR:$fu3, FPR:$fu2, FPR:$fu1, FPR:$fu0, AR:$asr), (ins AR:$as, AR:$ad), + "ee.ldf.128.xp\t $fu3, $fu2, $fu1, $fu0, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> fu3; + bits<4> fu2; + bits<4> fu1; + bits<4> fu0; + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{28-24} = 0x11; + let Inst{23-20} = fu3{3-0}; + let Inst{19-16} = fu2{3-0}; + let Inst{15-12} = fu1{3-0}; + let Inst{11-8} = fu0{3-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_LDF_128_XP_P : Pseudo<(outs), (ins FPR:$fu3, FPR:$fu2, FPR:$fu1, FPR:$fu0, AR:$as, AR:$ad), + "!xtensa_ee_ldf_128_xp_p, $fu3, $fu2, $fu1, $fu0, $as, $ad", + [(int_xtensa_ee_ldf_128_xp FPR:$fu3, FPR:$fu2, FPR:$fu1, FPR:$fu0, AR:$as, AR:$ad)]>; + +def EE_LDF_64_IP: EE_Inst32<(outs FPR:$fu1, FPR:$fu0, AR:$asr), (ins AR:$as, offset_256_8:$imm8), + "ee.ldf.64.ip\t $fu1, $fu0, $as, $imm8", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> fu1; + bits<4> fu0; + bits<4> as; + bits<8> imm8; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-16} = imm8{7-1}; + let Inst{15-12} = fu1{3-0}; + let Inst{11-8} = fu0{3-0}; + let Inst{7-5} = 0x2; + let Inst{4} = imm8{0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_LDF_64_IP_P : Pseudo<(outs), (ins FPR:$fu1, FPR:$fu0, AR:$as, offset_256_8:$imm8), + "!xtensa_ee_ldf_64_ip_p, $fu1, $fu0, $as, $imm8", + [(int_xtensa_ee_ldf_64_ip FPR:$fu1, FPR:$fu0, AR:$as, timm:$imm8)]>; + +def EE_LDF_64_XP: EE_Inst24<(outs FPR:$fu1, FPR:$fu0, AR:$asr), (ins AR:$as, AR:$ad), + "ee.ldf.64.xp\t $fu1, $fu0, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> fu1; + bits<4> fu0; + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-20} = fu0{3-0}; + let Inst{19-16} = 0x6; + let Inst{15-12} = fu1{3-0}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x0; +} + +let usesCustomInserter = 1 in +def EE_LDF_64_XP_P : Pseudo<(outs), (ins FPR:$fu1, FPR:$fu0, AR:$as, AR:$ad), + "!xtensa_ee_ldf_64_xp_p, $fu1, $fu0, $as, $ad", + [(int_xtensa_ee_ldf_64_xp FPR:$fu1, FPR:$fu0, AR:$as, AR:$ad)]>; + +def EE_LDQA_S16_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.ldqa.s16.128.ip\t $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm16; + + let mayLoad = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm16{7}; + let Inst{21-15} = 0x2; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LDQA_S16_128_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_16:$imm16), + "!xtensa_ee_ldqa_s16_128_ip_p, $as, $imm16", + [(int_xtensa_ee_ldqa_s16_128_ip AR:$as, timm:$imm16)]>; + +def EE_LDQA_S16_128_XP: EE_Inst24<(outs AR:$asr), (ins AR:$as, AR:$ad), + "ee.ldqa.s16.128.xp\t $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-12} = 0x7e4; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LDQA_S16_128_XP_P : Pseudo<(outs), (ins AR:$as, AR:$ad), + "!xtensa_ee_ldqa_s16_128_xp_p, $as, $ad", + [(int_xtensa_ee_ldqa_s16_128_xp AR:$as, AR:$ad)]>; + +def EE_LDQA_S8_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.ldqa.s8.128.ip\t $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm16; + + let mayLoad = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm16{7}; + let Inst{21-15} = 0x22; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LDQA_S8_128_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_16:$imm16), + "!xtensa_ee_ldqa_s8_128_ip_p, $as, $imm16", + [(int_xtensa_ee_ldqa_s8_128_ip AR:$as, timm:$imm16)]>; + +def EE_LDQA_S8_128_XP: EE_Inst24<(outs AR:$asr), (ins AR:$as, AR:$ad), + "ee.ldqa.s8.128.xp\t $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-12} = 0x714; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LDQA_S8_128_XP_P : Pseudo<(outs), (ins AR:$as, AR:$ad), + "!xtensa_ee_ldqa_s8_128_xp_p, $as, $ad", + [(int_xtensa_ee_ldqa_s8_128_xp AR:$as, AR:$ad)]>; + +def EE_LDQA_U16_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.ldqa.u16.128.ip\t $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm16; + + let mayLoad = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm16{7}; + let Inst{21-15} = 0xa; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LDQA_U16_128_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_16:$imm16), + "!xtensa_ee_ldqa_u16_128_ip_p, $as, $imm16", + [(int_xtensa_ee_ldqa_u16_128_ip AR:$as, timm:$imm16)]>; + +def EE_LDQA_U16_128_XP: EE_Inst24<(outs AR:$asr), (ins AR:$as, AR:$ad), + "ee.ldqa.u16.128.xp\t $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-12} = 0x7a4; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LDQA_U16_128_XP_P : Pseudo<(outs), (ins AR:$as, AR:$ad), + "!xtensa_ee_ldqa_u16_128_xp_p, $as, $ad", + [(int_xtensa_ee_ldqa_u16_128_xp AR:$as, AR:$ad)]>; + +def EE_LDQA_U8_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.ldqa.u8.128.ip\t $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm16; + + let mayLoad = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm16{7}; + let Inst{21-15} = 0x2a; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LDQA_U8_128_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_16:$imm16), + "!xtensa_ee_ldqa_u8_128_ip_p, $as, $imm16", + [(int_xtensa_ee_ldqa_u8_128_ip AR:$as, timm:$imm16)]>; + +def EE_LDQA_U8_128_XP: EE_Inst24<(outs AR:$asr), (ins AR:$as, AR:$ad), + "ee.ldqa.u8.128.xp\t $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-12} = 0x704; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LDQA_U8_128_XP_P : Pseudo<(outs), (ins AR:$as, AR:$ad), + "!xtensa_ee_ldqa_u8_128_xp_p, $as, $ad", + [(int_xtensa_ee_ldqa_u8_128_xp AR:$as, AR:$ad)]>; + +def EE_LDXQ_32: EE_Inst32<(outs QR:$qu), (ins QR:$qs, AR:$as, select_4:$sel4, select_8:$sel8), + "ee.ldxq.32\t $qu, $qs, $as, $sel4, $sel8", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<3> qs; + bits<4> as; + bits<2> sel4; + bits<3> sel8; + + let mayLoad = 1; + + let Inst{28-22} = 0x70; + let Inst{21-20} = sel4{1-0}; + let Inst{19-17} = qu{2-0}; + let Inst{16-14} = qs{2-0}; + let Inst{13-11} = sel8{2-0}; + let Inst{10-4} = 0x7d; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_LDXQ_32_P : Pseudo<(outs), (ins imm8:$qu, imm8:$qs, AR:$as, select_4:$sel4, select_8:$sel8), + "!xtensa_ee_ldxq_32_p, $qu, $qs, $as, $sel4, $sel8", + [(int_xtensa_ee_ldxq_32 timm:$qu, timm:$qs, AR:$as, timm:$sel4, timm:$sel8)]>; + +def EE_LD_128_USAR_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.ld.128.usar.ip\t $qu, $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + + let mayLoad = 1; + + let Inst{23} = 0x1; + let Inst{22} = imm16{7}; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0x1; + let Inst{15} = qu{0}; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LD_128_USAR_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_256_16:$imm16), + "!xtensa_ee_ld_128_usar_ip_p, $qu, $as, $imm16", + [(int_xtensa_ee_ld_128_usar_ip timm:$qu, AR:$as, timm:$imm16)]>; + +def EE_LD_128_USAR_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), + "ee.ld.128.usar.xp\t $qu, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-22} = 0x2; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-12} = 0x0; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LD_128_USAR_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad), + "!xtensa_ee_ld_128_usar_xp_p, $qu, $as, $ad", + [(int_xtensa_ee_ld_128_usar_xp timm:$qu, AR:$as, AR:$ad)]>; + +def EE_LD_ACCX_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_8:$imm8), + "ee.ld.accx.ip\t $as, $imm8", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm8; + + let mayLoad = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm8{7}; + let Inst{21-15} = 0x1c; + let Inst{14-8} = imm8{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LD_ACCX_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_8:$imm8), + "!xtensa_ee_ld_accx_ip_p, $as, $imm8", + [(int_xtensa_ee_ld_accx_ip AR:$as, timm:$imm8)]>; + +def EE_LD_QACC_H_H_32_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_4:$imm4), + "ee.ld.qacc_h.h.32.ip\t $as, $imm4", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm4; + + let mayLoad = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm4{7}; + let Inst{21-15} = 0x3c; + let Inst{14-8} = imm4{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LD_QACC_H_H_32_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_4:$imm4), + "!xtensa_ee_ld_qacc_h_h_32_ip_p, $as, $imm4", + [(int_xtensa_ee_ld_qacc_h_h_32_ip AR:$as, timm:$imm4)]>; + +def EE_LD_QACC_H_L_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.ld.qacc_h.l.128.ip\t $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm16; + + let mayLoad = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm16{7}; + let Inst{21-15} = 0xc; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LD_QACC_H_L_128_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_16:$imm16), + "!xtensa_ee_ld_qacc_h_l_128_ip_p, $as, $imm16", + [(int_xtensa_ee_ld_qacc_h_l_128_ip AR:$as, timm:$imm16)]>; + +def EE_LD_QACC_L_H_32_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_4:$imm4), + "ee.ld.qacc_l.h.32.ip\t $as, $imm4", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm4; + + let mayLoad = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm4{7}; + let Inst{21-15} = 0x2c; + let Inst{14-8} = imm4{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LD_QACC_L_H_32_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_4:$imm4), + "!xtensa_ee_ld_qacc_l_h_32_ip_p, $as, $imm4", + [(int_xtensa_ee_ld_qacc_l_h_32_ip AR:$as, timm:$imm4)]>; + +def EE_LD_QACC_L_L_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.ld.qacc_l.l.128.ip\t $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm16; + + let mayLoad = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm16{7}; + let Inst{21-15} = 0x0; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LD_QACC_L_L_128_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_16:$imm16), + "!xtensa_ee_ld_qacc_l_l_128_ip_p, $as, $imm16", + [(int_xtensa_ee_ld_qacc_l_l_128_ip AR:$as, timm:$imm16)]>; + +def EE_LD_UA_STATE_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.ld.ua_state.ip\t $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm16; + + let mayLoad = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm16{7}; + let Inst{21-15} = 0x20; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_LD_UA_STATE_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_16:$imm16), + "!xtensa_ee_ld_ua_state_ip_p, $as, $imm16", + [(int_xtensa_ee_ld_ua_state_ip AR:$as, timm:$imm16)]>; + +def EE_MOVI_32_A: EE_Inst24<(outs AR:$au), (ins QR:$qs, select_4:$sel4), + "ee.movi.32.a\t $qs, $au, $sel4", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs; + bits<4> au; + bits<2> sel4; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qs{0}; + let Inst{14-12} = 0x7; + let Inst{11-10} = sel4{1-0}; + let Inst{9-8} = 0x1; + let Inst{7-4} = au{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_MOVI_32_A_P : Pseudo<(outs), (ins imm8:$qs, AR:$au, select_4:$sel4), + "!xtensa_ee_movi_32_a_p, $qs, $au, $sel4", + [(int_xtensa_ee_movi_32_a timm:$qs, AR:$au, timm:$sel4)]>; + +def EE_MOVI_32_Q: EE_Inst24<(outs QR:$qu), (ins AR:$as, select_4:$sel4), + "ee.movi.32.q\t $qu, $as, $sel4", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<2> sel4; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-12} = 0x3; + let Inst{11-10} = sel4{1-0}; + let Inst{9-8} = 0x2; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_MOVI_32_Q_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, select_4:$sel4), + "!xtensa_ee_movi_32_q_p, $qu, $as, $sel4", + [(int_xtensa_ee_movi_32_q timm:$qu, AR:$as, timm:$sel4)]>; + +def EE_MOV_S16_QACC: EE_Inst24<(outs), (ins QR:$qs), + "ee.mov.s16.qacc\t $qs", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qs{0}; + let Inst{14-0} = 0x7f24; +} + +let usesCustomInserter = 1 in +def EE_MOV_S16_QACC_P : Pseudo<(outs), (ins imm8:$qs), + "!xtensa_ee_mov_s16_qacc_p, $qs", + [(int_xtensa_ee_mov_s16_qacc timm:$qs)]>; + +def EE_MOV_S8_QACC: EE_Inst24<(outs), (ins QR:$qs), + "ee.mov.s8.qacc\t $qs", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qs{0}; + let Inst{14-0} = 0x7f34; +} + +let usesCustomInserter = 1 in +def EE_MOV_S8_QACC_P : Pseudo<(outs), (ins imm8:$qs), + "!xtensa_ee_mov_s8_qacc_p, $qs", + [(int_xtensa_ee_mov_s8_qacc timm:$qs)]>; + +def EE_MOV_U16_QACC: EE_Inst24<(outs), (ins QR:$qs), + "ee.mov.u16.qacc\t $qs", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qs{0}; + let Inst{14-0} = 0x7f64; +} + +let usesCustomInserter = 1 in +def EE_MOV_U16_QACC_P : Pseudo<(outs), (ins imm8:$qs), + "!xtensa_ee_mov_u16_qacc_p, $qs", + [(int_xtensa_ee_mov_u16_qacc timm:$qs)]>; + +def EE_MOV_U8_QACC: EE_Inst24<(outs), (ins QR:$qs), + "ee.mov.u8.qacc\t $qs", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qs{0}; + let Inst{14-0} = 0x7f74; +} + +let usesCustomInserter = 1 in +def EE_MOV_U8_QACC_P : Pseudo<(outs), (ins imm8:$qs), + "!xtensa_ee_mov_u8_qacc_p, $qs", + [(int_xtensa_ee_mov_u8_qacc timm:$qs)]>; + +def EE_NOTQ: EE_Inst24<(outs QR:$qa), (ins QR:$qx), + "ee.notq\t $qa, $qx", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qa{0}; + let Inst{14-8} = 0x7f; + let Inst{7-6} = qx{2-1}; + let Inst{5} = 0x0; + let Inst{4} = qx{0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_NOTQ_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx), + "!xtensa_ee_notq_p, $qa, $qx", + [(int_xtensa_ee_notq timm:$qa, timm:$qx)]>; + +def EE_ORQ: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.orq\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qa{0}; + let Inst{14-12} = 0x7; + let Inst{11-10} = qy{2-1}; + let Inst{9-8} = 0x0; + let Inst{7-6} = qx{2-1}; + let Inst{5} = qy{0}; + let Inst{4} = qx{0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_ORQ_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_orq_p, $qa, $qx, $qy", + [(int_xtensa_ee_orq timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_SLCI_2Q: EE_Inst24<(outs QR:$qs1r, QR:$qs0r), (ins QR:$qs1, QR:$qs0, select_16:$sar16), + "ee.slci.2q\t $qs1, $qs0, $sar16", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs1; + bits<3> qs0; + bits<4> sar16; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-8} = 0x6; + let Inst{7-4} = sar16{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_SLCI_2Q_P : Pseudo<(outs), (ins imm8:$qs1, imm8:$qs0, select_16:$sar16), + "!xtensa_ee_slci_2q_p, $qs1, $qs0, $sar16", + [(int_xtensa_ee_slci_2q timm:$qs1, timm:$qs0, timm:$sar16)]>; + +def EE_SLCXXP_2Q: EE_Inst24<(outs QR:$qs1r, QR:$qs0r, AR:$asr), (ins QR:$qs1, QR:$qs0, AR:$as, AR:$ad), + "ee.slcxxp.2q\t $qs1, $qs0, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs1; + bits<3> qs0; + bits<4> as; + bits<4> ad; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0x6; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_SLCXXP_2Q_P : Pseudo<(outs), (ins imm8:$qs1, imm8:$qs0, AR:$as, AR:$ad), + "!xtensa_ee_slcxxp_2q_p, $qs1, $qs0, $as, $ad", + [(int_xtensa_ee_slcxxp_2q timm:$qs1, timm:$qs0, AR:$as, AR:$ad)]>; + +def EE_SRCI_2Q: EE_Inst24<(outs QR:$qs1r, QR:$qs0r), (ins QR:$qs1, QR:$qs0, select_16:$sar16), + "ee.srci.2q\t $qs1, $qs0, $sar16", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs1; + bits<3> qs0; + bits<4> sar16; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-8} = 0xa; + let Inst{7-4} = sar16{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_SRCI_2Q_P : Pseudo<(outs), (ins imm8:$qs1, imm8:$qs0, select_16:$sar16), + "!xtensa_ee_srci_2q_p, $qs1, $qs0, $sar16", + [(int_xtensa_ee_srci_2q timm:$qs1, timm:$qs0, timm:$sar16)]>; + +def EE_SRCMB_S16_QACC: EE_Inst24<(outs QR:$qu), (ins AR:$as, select_2:$sel2), + "ee.srcmb.s16.qacc\t $qu, $as, $sel2", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<1> sel2; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-11} = 0xe; + let Inst{10} = sel2{0}; + let Inst{9-8} = 0x2; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_SRCMB_S16_QACC_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, select_2:$sel2), + "!xtensa_ee_srcmb_s16_qacc_p, $qu, $as, $sel2", + [(int_xtensa_ee_srcmb_s16_qacc timm:$qu, AR:$as, timm:$sel2)]>; + +def EE_SRCMB_S8_QACC: EE_Inst24<(outs QR:$qu), (ins AR:$as, select_2:$sel2), + "ee.srcmb.s8.qacc\t $qu, $as, $sel2", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<1> sel2; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-11} = 0xf; + let Inst{10} = sel2{0}; + let Inst{9-8} = 0x2; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_SRCMB_S8_QACC_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, select_2:$sel2), + "!xtensa_ee_srcmb_s8_qacc_p, $qu, $as, $sel2", + [(int_xtensa_ee_srcmb_s8_qacc timm:$qu, AR:$as, timm:$sel2)]>; + +def EE_SRCQ_128_ST_INCP: EE_Inst24<(outs AR:$asr), (ins QR:$qs0, QR:$qs1, AR:$as), + "ee.srcq.128.st.incp\t $qs0, $qs1, $as", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs0; + bits<3> qs1; + bits<4> as; + + let mayStore = 1; + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-8} = 0xe; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_SRCQ_128_ST_INCP_P : Pseudo<(outs), (ins imm8:$qs0, imm8:$qs1, AR:$as), + "!xtensa_ee_srcq_128_st_incp_p, $qs0, $qs1, $as", + [(int_xtensa_ee_srcq_128_st_incp timm:$qs0, timm:$qs1, AR:$as)]>; + +def EE_SRCXXP_2Q: EE_Inst24<(outs QR:$qs1r, QR:$qs0r, AR:$asr), (ins QR:$qs1, QR:$qs0, AR:$as, AR:$ad), + "ee.srcxxp.2q\t $qs1, $qs0, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs1; + bits<3> qs0; + bits<4> as; + bits<4> ad; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0x6; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_SRCXXP_2Q_P : Pseudo<(outs), (ins imm8:$qs1, imm8:$qs0, AR:$as, AR:$ad), + "!xtensa_ee_srcxxp_2q_p, $qs1, $qs0, $as, $ad", + [(int_xtensa_ee_srcxxp_2q timm:$qs1, timm:$qs0, AR:$as, AR:$ad)]>; + +def EE_SRC_Q: EE_Inst24<(outs QR:$qa), (ins QR:$qs0, QR:$qs1), + "ee.src.q\t $qa, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qs0; + bits<3> qs1; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-7} = 0x6; + let Inst{6-4} = qa{2-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_SRC_Q_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_src_q_p, $qa, $qs0, $qs1", + [(int_xtensa_ee_src_q timm:$qa, timm:$qs0, timm:$qs1)]>; + +def EE_SRC_Q_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, offset_256_16:$imm16, QR:$qs0, QR:$qs1), + "ee.src.q.ld.ip\t $qu, $as, $imm16, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = imm16{7-5}; + let Inst{19-17} = qu{2-0}; + let Inst{16-14} = qs0{2-0}; + let Inst{13-11} = imm16{4-2}; + let Inst{10-8} = qs1{2-0}; + let Inst{7-6} = 0x0; + let Inst{5-4} = imm16{1-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_SRC_Q_LD_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_256_16:$imm16, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_src_q_ld_ip_p, $qu, $as, $imm16, $qs0, $qs1", + [(int_xtensa_ee_src_q_ld_ip timm:$qu, AR:$as, timm:$imm16, timm:$qs0, timm:$qs1)]>; + +def EE_SRC_Q_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR:$ad, QR:$qs0, QR:$qs1), + "ee.src.q.ld.xp\t $qu, $as, $ad, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-20} = 0x1d0; + let Inst{19-17} = qu{2-0}; + let Inst{16-14} = qs0{2-0}; + let Inst{13-11} = 0x0; + let Inst{10-8} = qs1{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_SRC_Q_LD_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_src_q_ld_xp_p, $qu, $as, $ad, $qs0, $qs1", + [(int_xtensa_ee_src_q_ld_xp timm:$qu, AR:$as, AR:$ad, timm:$qs0, timm:$qs1)]>; + +def EE_SRC_Q_QUP: EE_Inst24<(outs QR:$qa, QR:$qs0r), (ins QR:$qs0, QR:$qs1), + "ee.src.q.qup\t $qa, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qs0; + bits<3> qs1; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-7} = 0xe; + let Inst{6-4} = qa{2-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_SRC_Q_QUP_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_src_q_qup_p, $qa, $qs0, $qs1", + [(int_xtensa_ee_src_q_qup timm:$qa, timm:$qs0, timm:$qs1)]>; + +def EE_SRS_ACCX: EE_Inst24<(outs AR:$au), (ins AR:$as, select_2:$sel2), + "ee.srs.accx\t $au, $as, $sel2", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> au; + bits<4> as; + bits<1> sel2; + + + let Inst{23-15} = 0xfc; + let Inst{14} = sel2{0}; + let Inst{13-12} = 0x1; + let Inst{11-8} = au{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_SRS_ACCX_P : Pseudo<(outs), (ins AR:$au, AR:$as, select_2:$sel2), + "!xtensa_ee_srs_accx_p, $au, $as, $sel2", + [(int_xtensa_ee_srs_accx AR:$au, AR:$as, timm:$sel2)]>; + +def EE_STF_128_IP: EE_Inst32<(outs AR:$asr), (ins FPR:$fv3, FPR:$fv2, FPR:$fv1, FPR:$fv0, AR:$as, offset_16_16:$imm16f), + "ee.stf.128.ip\t $fv3, $fv2, $fv1, $fv0, $as, $imm16f", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> fv3; + bits<4> fv2; + bits<4> fv1; + bits<4> fv0; + bits<4> as; + bits<4> imm16f; + + let mayStore = 1; + + let Inst{28-24} = 0x12; + let Inst{23-20} = fv3{3-0}; + let Inst{19-16} = fv2{3-0}; + let Inst{15-12} = fv1{3-0}; + let Inst{11-8} = fv0{3-0}; + let Inst{7-4} = imm16f{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_STF_128_IP_P : Pseudo<(outs), (ins FPR:$fv3, FPR:$fv2, FPR:$fv1, FPR:$fv0, AR:$as, offset_16_16:$imm16f), + "!xtensa_ee_stf_128_ip_p, $fv3, $fv2, $fv1, $fv0, $as, $imm16f", + [(int_xtensa_ee_stf_128_ip FPR:$fv3, FPR:$fv2, FPR:$fv1, FPR:$fv0, AR:$as, timm:$imm16f)]>; + +def EE_STF_128_XP: EE_Inst32<(outs AR:$asr), (ins FPR:$fv3, FPR:$fv2, FPR:$fv1, FPR:$fv0, AR:$as, AR:$ad), + "ee.stf.128.xp\t $fv3, $fv2, $fv1, $fv0, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> fv3; + bits<4> fv2; + bits<4> fv1; + bits<4> fv0; + bits<4> as; + bits<4> ad; + + let mayStore = 1; + + let Inst{28-24} = 0x13; + let Inst{23-20} = fv3{3-0}; + let Inst{19-16} = fv2{3-0}; + let Inst{15-12} = fv1{3-0}; + let Inst{11-8} = fv0{3-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_STF_128_XP_P : Pseudo<(outs), (ins FPR:$fv3, FPR:$fv2, FPR:$fv1, FPR:$fv0, AR:$as, AR:$ad), + "!xtensa_ee_stf_128_xp_p, $fv3, $fv2, $fv1, $fv0, $as, $ad", + [(int_xtensa_ee_stf_128_xp FPR:$fv3, FPR:$fv2, FPR:$fv1, FPR:$fv0, AR:$as, AR:$ad)]>; + +def EE_STF_64_IP: EE_Inst32<(outs AR:$asr), (ins FPR:$fv1, FPR:$fv0, AR:$as, offset_256_8:$imm8), + "ee.stf.64.ip\t $fv1, $fv0, $as, $imm8", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> fv1; + bits<4> fv0; + bits<4> as; + bits<8> imm8; + + let mayStore = 1; + + let Inst{28-23} = 0x38; + let Inst{22-16} = imm8{7-1}; + let Inst{15-12} = fv1{3-0}; + let Inst{11-8} = fv0{3-0}; + let Inst{7-5} = 0x3; + let Inst{4} = imm8{0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_STF_64_IP_P : Pseudo<(outs), (ins FPR:$fv1, FPR:$fv0, AR:$as, offset_256_8:$imm8), + "!xtensa_ee_stf_64_ip_p, $fv1, $fv0, $as, $imm8", + [(int_xtensa_ee_stf_64_ip FPR:$fv1, FPR:$fv0, AR:$as, timm:$imm8)]>; + +def EE_STF_64_XP: EE_Inst24<(outs AR:$asr), (ins FPR:$fv1, FPR:$fv0, AR:$as, AR:$ad), + "ee.stf.64.xp\t $fv1, $fv0, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> fv1; + bits<4> fv0; + bits<4> as; + bits<4> ad; + + let mayStore = 1; + + let Inst{23-20} = fv0{3-0}; + let Inst{19-16} = 0x7; + let Inst{15-12} = fv1{3-0}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x0; +} + +let usesCustomInserter = 1 in +def EE_STF_64_XP_P : Pseudo<(outs), (ins FPR:$fv1, FPR:$fv0, AR:$as, AR:$ad), + "!xtensa_ee_stf_64_xp_p, $fv1, $fv0, $as, $ad", + [(int_xtensa_ee_stf_64_xp FPR:$fv1, FPR:$fv0, AR:$as, AR:$ad)]>; + +def EE_STXQ_32: EE_Inst32<(outs), (ins QR:$qv, QR:$qs, AR:$as, select_4:$sel4, select_8:$sel8), + "ee.stxq.32\t $qv, $qs, $as, $sel4, $sel8", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<3> qs; + bits<4> as; + bits<2> sel4; + bits<3> sel8; + + let mayStore = 1; + + let Inst{28-22} = 0x73; + let Inst{21-20} = sel4{1-0}; + let Inst{19-17} = 0x0; + let Inst{16-14} = qs{2-0}; + let Inst{13-11} = sel8{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x0; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_STXQ_32_P : Pseudo<(outs), (ins imm8:$qv, imm8:$qs, AR:$as, select_4:$sel4, select_8:$sel8), + "!xtensa_ee_stxq_32_p, $qv, $qs, $as, $sel4, $sel8", + [(int_xtensa_ee_stxq_32 timm:$qv, timm:$qs, AR:$as, timm:$sel4, timm:$sel8)]>; + +def EE_ST_ACCX_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_8:$imm8), + "ee.st.accx.ip\t $as, $imm8", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm8; + + let mayStore = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm8{7}; + let Inst{21-15} = 0x4; + let Inst{14-8} = imm8{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_ST_ACCX_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_8:$imm8), + "!xtensa_ee_st_accx_ip_p, $as, $imm8", + [(int_xtensa_ee_st_accx_ip AR:$as, timm:$imm8)]>; + +def EE_ST_QACC_H_H_32_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_4:$imm4), + "ee.st.qacc_h.h.32.ip\t $as, $imm4", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm4; + + let mayStore = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm4{7}; + let Inst{21-15} = 0x24; + let Inst{14-8} = imm4{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_ST_QACC_H_H_32_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_4:$imm4), + "!xtensa_ee_st_qacc_h_h_32_ip_p, $as, $imm4", + [(int_xtensa_ee_st_qacc_h_h_32_ip AR:$as, timm:$imm4)]>; + +def EE_ST_QACC_H_L_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.st.qacc_h.l.128.ip\t $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm16; + + let mayStore = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm16{7}; + let Inst{21-15} = 0x1a; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_ST_QACC_H_L_128_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_16:$imm16), + "!xtensa_ee_st_qacc_h_l_128_ip_p, $as, $imm16", + [(int_xtensa_ee_st_qacc_h_l_128_ip AR:$as, timm:$imm16)]>; + +def EE_ST_QACC_L_H_32_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_4:$imm4), + "ee.st.qacc_l.h.32.ip\t $as, $imm4", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm4; + + let mayStore = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm4{7}; + let Inst{21-15} = 0x3a; + let Inst{14-8} = imm4{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_ST_QACC_L_H_32_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_4:$imm4), + "!xtensa_ee_st_qacc_l_h_32_ip_p, $as, $imm4", + [(int_xtensa_ee_st_qacc_l_h_32_ip AR:$as, timm:$imm4)]>; + +def EE_ST_QACC_L_L_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.st.qacc_l.l.128.ip\t $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm16; + + let mayStore = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm16{7}; + let Inst{21-15} = 0x18; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_ST_QACC_L_L_128_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_16:$imm16), + "!xtensa_ee_st_qacc_l_l_128_ip_p, $as, $imm16", + [(int_xtensa_ee_st_qacc_l_l_128_ip AR:$as, timm:$imm16)]>; + +def EE_ST_UA_STATE_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.st.ua_state.ip\t $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> as; + bits<8> imm16; + + let mayStore = 1; + + let Inst{23} = 0x0; + let Inst{22} = imm16{7}; + let Inst{21-15} = 0x38; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_ST_UA_STATE_IP_P : Pseudo<(outs), (ins AR:$as, offset_256_16:$imm16), + "!xtensa_ee_st_ua_state_ip_p, $as, $imm16", + [(int_xtensa_ee_st_ua_state_ip AR:$as, timm:$imm16)]>; + +def EE_VADDS_S16: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vadds.s16\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x64; +} + +let usesCustomInserter = 1 in +def EE_VADDS_S16_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vadds_s16_p, $qa, $qx, $qy", + [(int_xtensa_ee_vadds_s16 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VADDS_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vadds.s16.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x2d; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VADDS_S16_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vadds_s16_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vadds_s16_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VADDS_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vadds.s16.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1c9; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x0; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VADDS_S16_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vadds_s16_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vadds_s16_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VADDS_S32: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vadds.s32\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x74; +} + +let usesCustomInserter = 1 in +def EE_VADDS_S32_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vadds_s32_p, $qa, $qx, $qy", + [(int_xtensa_ee_vadds_s32 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VADDS_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vadds.s32.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x3d; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VADDS_S32_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vadds_s32_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vadds_s32_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VADDS_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vadds.s32.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1c9; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x1; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VADDS_S32_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vadds_s32_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vadds_s32_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VADDS_S8: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vadds.s8\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x84; +} + +let usesCustomInserter = 1 in +def EE_VADDS_S8_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vadds_s8_p, $qa, $qx, $qy", + [(int_xtensa_ee_vadds_s8 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VADDS_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vadds.s8.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x1c; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VADDS_S8_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vadds_s8_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vadds_s8_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VADDS_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vadds.s8.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1c9; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x2; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VADDS_S8_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vadds_s8_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vadds_s8_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VCMP_EQ_S16: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vcmp.eq.s16\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x94; +} + +let usesCustomInserter = 1 in +def EE_VCMP_EQ_S16_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vcmp_eq_s16_p, $qa, $qx, $qy", + [(int_xtensa_ee_vcmp_eq_s16 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VCMP_EQ_S32: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vcmp.eq.s32\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xa4; +} + +let usesCustomInserter = 1 in +def EE_VCMP_EQ_S32_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vcmp_eq_s32_p, $qa, $qx, $qy", + [(int_xtensa_ee_vcmp_eq_s32 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VCMP_EQ_S8: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vcmp.eq.s8\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xb4; +} + +let usesCustomInserter = 1 in +def EE_VCMP_EQ_S8_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vcmp_eq_s8_p, $qa, $qx, $qy", + [(int_xtensa_ee_vcmp_eq_s8 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VCMP_GT_S16: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vcmp.gt.s16\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xc4; +} + +let usesCustomInserter = 1 in +def EE_VCMP_GT_S16_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vcmp_gt_s16_p, $qa, $qx, $qy", + [(int_xtensa_ee_vcmp_gt_s16 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VCMP_GT_S32: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vcmp.gt.s32\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xd4; +} + +let usesCustomInserter = 1 in +def EE_VCMP_GT_S32_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vcmp_gt_s32_p, $qa, $qx, $qy", + [(int_xtensa_ee_vcmp_gt_s32 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VCMP_GT_S8: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vcmp.gt.s8\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xe4; +} + +let usesCustomInserter = 1 in +def EE_VCMP_GT_S8_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vcmp_gt_s8_p, $qa, $qx, $qy", + [(int_xtensa_ee_vcmp_gt_s8 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VCMP_LT_S16: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vcmp.lt.s16\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xf4; +} + +let usesCustomInserter = 1 in +def EE_VCMP_LT_S16_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vcmp_lt_s16_p, $qa, $qx, $qy", + [(int_xtensa_ee_vcmp_lt_s16 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VCMP_LT_S32: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vcmp.lt.s32\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VCMP_LT_S32_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vcmp_lt_s32_p, $qa, $qx, $qy", + [(int_xtensa_ee_vcmp_lt_s32 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VCMP_LT_S8: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vcmp.lt.s8\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x14; +} + +let usesCustomInserter = 1 in +def EE_VCMP_LT_S8_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vcmp_lt_s8_p, $qa, $qx, $qy", + [(int_xtensa_ee_vcmp_lt_s8 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VLDBC_16: EE_Inst24<(outs QR:$qu), (ins AR:$as), + "ee.vldbc.16\t $qu, $as", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + + let mayLoad = 1; + + let Inst{23-22} = 0x3; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-8} = 0x73; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLDBC_16_P : Pseudo<(outs), (ins imm8:$qu, AR:$as), + "!xtensa_ee_vldbc_16_p, $qu, $as", + [(int_xtensa_ee_vldbc_16 timm:$qu, AR:$as)]>; + +def EE_VLDBC_16_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_128_2:$imm2), + "ee.vldbc.16.ip\t $qu, $as, $imm2", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<7> imm2; + + let mayLoad = 1; + + let Inst{23-22} = 0x2; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0x5; + let Inst{15} = qu{0}; + let Inst{14-8} = imm2{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLDBC_16_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_128_2:$imm2), + "!xtensa_ee_vldbc_16_ip_p, $qu, $as, $imm2", + [(int_xtensa_ee_vldbc_16_ip timm:$qu, AR:$as, timm:$imm2)]>; + +def EE_VLDBC_16_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), + "ee.vldbc.16.xp\t $qu, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-22} = 0x2; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-12} = 0x4; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLDBC_16_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad), + "!xtensa_ee_vldbc_16_xp_p, $qu, $as, $ad", + [(int_xtensa_ee_vldbc_16_xp timm:$qu, AR:$as, AR:$ad)]>; + +def EE_VLDBC_32: EE_Inst24<(outs QR:$qu), (ins AR:$as), + "ee.vldbc.32\t $qu, $as", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + + let mayLoad = 1; + + let Inst{23-22} = 0x3; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-8} = 0x77; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLDBC_32_P : Pseudo<(outs), (ins imm8:$qu, AR:$as), + "!xtensa_ee_vldbc_32_p, $qu, $as", + [(int_xtensa_ee_vldbc_32 timm:$qu, AR:$as)]>; + +def EE_VLDBC_32_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_256_4:$imm4), + "ee.vldbc.32.ip\t $qu, $as, $imm4", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm4; + + let mayLoad = 1; + + let Inst{23} = 0x1; + let Inst{22} = imm4{7}; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0x2; + let Inst{15} = qu{0}; + let Inst{14-8} = imm4{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLDBC_32_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_256_4:$imm4), + "!xtensa_ee_vldbc_32_ip_p, $qu, $as, $imm4", + [(int_xtensa_ee_vldbc_32_ip timm:$qu, AR:$as, timm:$imm4)]>; + +def EE_VLDBC_32_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), + "ee.vldbc.32.xp\t $qu, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-22} = 0x2; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-12} = 0x1; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLDBC_32_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad), + "!xtensa_ee_vldbc_32_xp_p, $qu, $as, $ad", + [(int_xtensa_ee_vldbc_32_xp timm:$qu, AR:$as, AR:$ad)]>; + +def EE_VLDBC_8: EE_Inst24<(outs QR:$qu), (ins AR:$as), + "ee.vldbc.8\t $qu, $as", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + + let mayLoad = 1; + + let Inst{23-22} = 0x3; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-8} = 0x3b; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLDBC_8_P : Pseudo<(outs), (ins imm8:$qu, AR:$as), + "!xtensa_ee_vldbc_8_p, $qu, $as", + [(int_xtensa_ee_vldbc_8 timm:$qu, AR:$as)]>; + +def EE_VLDBC_8_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_128_1:$imm1), + "ee.vldbc.8.ip\t $qu, $as, $imm1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<7> imm1; + + let mayLoad = 1; + + let Inst{23-22} = 0x3; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0x5; + let Inst{15} = qu{0}; + let Inst{14-8} = imm1{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLDBC_8_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_128_1:$imm1), + "!xtensa_ee_vldbc_8_ip_p, $qu, $as, $imm1", + [(int_xtensa_ee_vldbc_8_ip timm:$qu, AR:$as, timm:$imm1)]>; + +def EE_VLDBC_8_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), + "ee.vldbc.8.xp\t $qu, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-22} = 0x2; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-12} = 0x5; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLDBC_8_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad), + "!xtensa_ee_vldbc_8_xp_p, $qu, $as, $ad", + [(int_xtensa_ee_vldbc_8_xp timm:$qu, AR:$as, AR:$ad)]>; + +def EE_VLDHBC_16_INCP: EE_Inst24<(outs QR:$qu, QR:$qu1, AR:$asr), (ins AR:$as), + "ee.vldhbc.16.incp\t $qu, $qu1, $as", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<3> qu1; + bits<4> as; + + let mayLoad = 1; + + let Inst{23-22} = 0x3; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qu{0}; + let Inst{14-12} = qu1{2-0}; + let Inst{11-8} = 0x2; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLDHBC_16_INCP_P : Pseudo<(outs), (ins imm8:$qu, imm8:$qu1, AR:$as), + "!xtensa_ee_vldhbc_16_incp_p, $qu, $qu1, $as", + [(int_xtensa_ee_vldhbc_16_incp timm:$qu, timm:$qu1, AR:$as)]>; + +def EE_VLD_128_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_256_16:$imm16), + "ee.vld.128.ip\t $qu, $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + + let mayLoad = 1; + + let Inst{23} = 0x1; + let Inst{22} = imm16{7}; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0x3; + let Inst{15} = qu{0}; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLD_128_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_256_16:$imm16), + "!xtensa_ee_vld_128_ip_p, $qu, $as, $imm16", + [(int_xtensa_ee_vld_128_ip timm:$qu, AR:$as, timm:$imm16)]>; + +def EE_VLD_128_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), + "ee.vld.128.xp\t $qu, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-22} = 0x2; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-12} = 0x2; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLD_128_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad), + "!xtensa_ee_vld_128_xp_p, $qu, $as, $ad", + [(int_xtensa_ee_vld_128_xp timm:$qu, AR:$as, AR:$ad)]>; + +def EE_VLD_H_64_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_256_8:$imm8), + "ee.vld.h.64.ip\t $qu, $as, $imm8", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm8; + + let mayLoad = 1; + + let Inst{23} = 0x1; + let Inst{22} = imm8{7}; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0x8; + let Inst{15} = qu{0}; + let Inst{14-8} = imm8{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLD_H_64_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_256_8:$imm8), + "!xtensa_ee_vld_h_64_ip_p, $qu, $as, $imm8", + [(int_xtensa_ee_vld_h_64_ip timm:$qu, AR:$as, timm:$imm8)]>; + +def EE_VLD_H_64_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), + "ee.vld.h.64.xp\t $qu, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-22} = 0x2; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-12} = 0x6; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLD_H_64_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad), + "!xtensa_ee_vld_h_64_xp_p, $qu, $as, $ad", + [(int_xtensa_ee_vld_h_64_xp timm:$qu, AR:$as, AR:$ad)]>; + +def EE_VLD_L_64_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_256_8:$imm8), + "ee.vld.l.64.ip\t $qu, $as, $imm8", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm8; + + let mayLoad = 1; + + let Inst{23} = 0x1; + let Inst{22} = imm8{7}; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0x9; + let Inst{15} = qu{0}; + let Inst{14-8} = imm8{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLD_L_64_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_256_8:$imm8), + "!xtensa_ee_vld_l_64_ip_p, $qu, $as, $imm8", + [(int_xtensa_ee_vld_l_64_ip timm:$qu, AR:$as, timm:$imm8)]>; + +def EE_VLD_L_64_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), + "ee.vld.l.64.xp\t $qu, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + + let mayLoad = 1; + + let Inst{23-22} = 0x2; + let Inst{21-20} = qu{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qu{0}; + let Inst{14-12} = 0x3; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VLD_L_64_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad), + "!xtensa_ee_vld_l_64_xp_p, $qu, $as, $ad", + [(int_xtensa_ee_vld_l_64_xp timm:$qu, AR:$as, AR:$ad)]>; + +def EE_VMAX_S16: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vmax.s16\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x24; +} + +let usesCustomInserter = 1 in +def EE_VMAX_S16_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmax_s16_p, $qa, $qx, $qy", + [(int_xtensa_ee_vmax_s16 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMAX_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmax.s16.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x1d; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMAX_S16_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmax_s16_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmax_s16_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMAX_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vmax.s16.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1c9; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x3; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMAX_S16_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmax_s16_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmax_s16_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMAX_S32: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vmax.s32\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x34; +} + +let usesCustomInserter = 1 in +def EE_VMAX_S32_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmax_s32_p, $qa, $qx, $qy", + [(int_xtensa_ee_vmax_s32 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMAX_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmax.s32.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x1e; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMAX_S32_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmax_s32_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmax_s32_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMAX_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vmax.s32.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1ca; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x0; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMAX_S32_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmax_s32_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmax_s32_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMAX_S8: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vmax.s8\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x44; +} + +let usesCustomInserter = 1 in +def EE_VMAX_S8_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmax_s8_p, $qa, $qx, $qy", + [(int_xtensa_ee_vmax_s8 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMAX_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmax.s8.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x1f; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMAX_S8_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmax_s8_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmax_s8_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMAX_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vmax.s8.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1cb; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x0; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMAX_S8_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmax_s8_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmax_s8_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMIN_S16: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vmin.s16\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x54; +} + +let usesCustomInserter = 1 in +def EE_VMIN_S16_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmin_s16_p, $qa, $qx, $qy", + [(int_xtensa_ee_vmin_s16 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMIN_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmin.s16.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x2e; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMIN_S16_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmin_s16_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmin_s16_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMIN_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vmin.s16.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1ca; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x1; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMIN_S16_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmin_s16_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmin_s16_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMIN_S32: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vmin.s32\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x64; +} + +let usesCustomInserter = 1 in +def EE_VMIN_S32_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmin_s32_p, $qa, $qx, $qy", + [(int_xtensa_ee_vmin_s32 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMIN_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmin.s32.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x3e; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMIN_S32_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmin_s32_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmin_s32_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMIN_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vmin.s32.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1cb; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x1; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMIN_S32_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmin_s32_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmin_s32_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMIN_S8: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vmin.s8\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x74; +} + +let usesCustomInserter = 1 in +def EE_VMIN_S8_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmin_s8_p, $qa, $qx, $qy", + [(int_xtensa_ee_vmin_s8 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMIN_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmin.s8.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x2f; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMIN_S8_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmin_s8_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmin_s8_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMIN_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vmin.s8.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1ca; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x2; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMIN_S8_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmin_s8_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vmin_s8_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S16_ACCX: EE_Inst24<(outs), (ins QR:$qx, QR:$qy), + "ee.vmulas.s16.accx\t $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + + + let Inst{23-15} = 0x34; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x84; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_ACCX_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s16_accx_p, $qx, $qy", + [(int_xtensa_ee_vmulas_s16_accx timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S16_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy), + "ee.vmulas.s16.accx.ld.ip\t $qu, $as, $imm16, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-25} = 0xf; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x0; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x0; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_ACCX_LD_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s16_accx_ld_ip_p, $qu, $as, $imm16, $qx, $qy", + [(int_xtensa_ee_vmulas_s16_accx_ld_ip timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S16_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.s16.accx.ld.ip.qup\t $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-25} = 0x0; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_ACCX_LD_IP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_s16_accx_ld_ip_qup_p, $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_s16_accx_ld_ip_qup timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_S16_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy), + "ee.vmulas.s16.accx.ld.xp\t $qu, $as, $ad, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x3c; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x0; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x1; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_ACCX_LD_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s16_accx_ld_xp_p, $qu, $as, $ad, $qx, $qy", + [(int_xtensa_ee_vmulas_s16_accx_ld_xp timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S16_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.s16.accx.ld.xp.qup\t $qu, $as, $ad, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x2c; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_ACCX_LD_XP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_s16_accx_ld_xp_qup_p, $qu, $as, $ad, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_s16_accx_ld_xp_qup timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_S16_QACC: EE_Inst24<(outs), (ins QR:$qx, QR:$qy), + "ee.vmulas.s16.qacc\t $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + + + let Inst{23-15} = 0x34; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x84; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_QACC_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s16_qacc_p, $qx, $qy", + [(int_xtensa_ee_vmulas_s16_qacc timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S16_QACC_LDBC_INCP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmulas.s16.qacc.ldbc.incp\t $qu, $as, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{23-21} = 0x4; + let Inst{20} = qu{2}; + let Inst{19-16} = 0x7; + let Inst{15} = qu{1}; + let Inst{14} = qy{2}; + let Inst{13} = qu{0}; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_QACC_LDBC_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s16_qacc_ldbc_incp_p, $qu, $as, $qx, $qy", + [(int_xtensa_ee_vmulas_s16_qacc_ldbc_incp timm:$qu, AR:$as, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S16_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.s16.qacc.ldbc.incp.qup\t $qu, $as, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = 0x8; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_QACC_LDBC_INCP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_s16_qacc_ldbc_incp_qup_p, $qu, $as, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_s16_qacc_ldbc_incp_qup timm:$qu, AR:$as, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_S16_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy), + "ee.vmulas.s16.qacc.ld.ip\t $qu, $as, $imm16, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-25} = 0xf; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x1; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x0; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_QACC_LD_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s16_qacc_ld_ip_p, $qu, $as, $imm16, $qx, $qy", + [(int_xtensa_ee_vmulas_s16_qacc_ld_ip timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S16_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.s16.qacc.ld.ip.qup\t $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-25} = 0x1; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_QACC_LD_IP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_s16_qacc_ld_ip_qup_p, $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_s16_qacc_ld_ip_qup timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_S16_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy), + "ee.vmulas.s16.qacc.ld.xp\t $qu, $as, $ad, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x3c; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x1; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x1; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_QACC_LD_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s16_qacc_ld_xp_p, $qu, $as, $ad, $qx, $qy", + [(int_xtensa_ee_vmulas_s16_qacc_ld_xp timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S16_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.s16.qacc.ld.xp.qup\t $qu, $as, $ad, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x2d; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S16_QACC_LD_XP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_s16_qacc_ld_xp_qup_p, $qu, $as, $ad, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_s16_qacc_ld_xp_qup timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_S8_ACCX: EE_Inst24<(outs), (ins QR:$qx, QR:$qy), + "ee.vmulas.s8.accx\t $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + + + let Inst{23-15} = 0x34; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xc4; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_ACCX_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s8_accx_p, $qx, $qy", + [(int_xtensa_ee_vmulas_s8_accx timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S8_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy), + "ee.vmulas.s8.accx.ld.ip\t $qu, $as, $imm16, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-25} = 0xf; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x2; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x0; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_ACCX_LD_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s8_accx_ld_ip_p, $qu, $as, $imm16, $qx, $qy", + [(int_xtensa_ee_vmulas_s8_accx_ld_ip timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S8_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.s8.accx.ld.ip.qup\t $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-25} = 0x2; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_ACCX_LD_IP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_s8_accx_ld_ip_qup_p, $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_s8_accx_ld_ip_qup timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_S8_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy), + "ee.vmulas.s8.accx.ld.xp\t $qu, $as, $ad, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x3c; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x2; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x1; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_ACCX_LD_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s8_accx_ld_xp_p, $qu, $as, $ad, $qx, $qy", + [(int_xtensa_ee_vmulas_s8_accx_ld_xp timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S8_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.s8.accx.ld.xp.qup\t $qu, $as, $ad, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x2e; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_ACCX_LD_XP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_s8_accx_ld_xp_qup_p, $qu, $as, $ad, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_s8_accx_ld_xp_qup timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_S8_QACC: EE_Inst24<(outs), (ins QR:$qx, QR:$qy), + "ee.vmulas.s8.qacc\t $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + + + let Inst{23-15} = 0x34; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xc4; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_QACC_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s8_qacc_p, $qx, $qy", + [(int_xtensa_ee_vmulas_s8_qacc timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S8_QACC_LDBC_INCP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmulas.s8.qacc.ldbc.incp\t $qu, $as, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{23-21} = 0x5; + let Inst{20} = qu{2}; + let Inst{19-16} = 0x7; + let Inst{15} = qu{1}; + let Inst{14} = qy{2}; + let Inst{13} = qu{0}; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_QACC_LDBC_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s8_qacc_ldbc_incp_p, $qu, $as, $qx, $qy", + [(int_xtensa_ee_vmulas_s8_qacc_ldbc_incp timm:$qu, AR:$as, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S8_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.s8.qacc.ldbc.incp.qup\t $qu, $as, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = 0x9; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_QACC_LDBC_INCP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_s8_qacc_ldbc_incp_qup_p, $qu, $as, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_s8_qacc_ldbc_incp_qup timm:$qu, AR:$as, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_S8_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy), + "ee.vmulas.s8.qacc.ld.ip\t $qu, $as, $imm16, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-25} = 0xf; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x3; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x0; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_QACC_LD_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s8_qacc_ld_ip_p, $qu, $as, $imm16, $qx, $qy", + [(int_xtensa_ee_vmulas_s8_qacc_ld_ip timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S8_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.s8.qacc.ld.ip.qup\t $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-25} = 0x3; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_QACC_LD_IP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_s8_qacc_ld_ip_qup_p, $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_s8_qacc_ld_ip_qup timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_S8_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy), + "ee.vmulas.s8.qacc.ld.xp\t $qu, $as, $ad, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x3c; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x3; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x1; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_QACC_LD_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_s8_qacc_ld_xp_p, $qu, $as, $ad, $qx, $qy", + [(int_xtensa_ee_vmulas_s8_qacc_ld_xp timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_S8_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.s8.qacc.ld.xp.qup\t $qu, $as, $ad, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x2f; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_S8_QACC_LD_XP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_s8_qacc_ld_xp_qup_p, $qu, $as, $ad, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_s8_qacc_ld_xp_qup timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_U16_ACCX: EE_Inst24<(outs), (ins QR:$qx, QR:$qy), + "ee.vmulas.u16.accx\t $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + + + let Inst{23-15} = 0x14; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x84; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_ACCX_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u16_accx_p, $qx, $qy", + [(int_xtensa_ee_vmulas_u16_accx timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U16_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy), + "ee.vmulas.u16.accx.ld.ip\t $qu, $as, $imm16, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-25} = 0xf; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x4; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x0; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_ACCX_LD_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u16_accx_ld_ip_p, $qu, $as, $imm16, $qx, $qy", + [(int_xtensa_ee_vmulas_u16_accx_ld_ip timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U16_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.u16.accx.ld.ip.qup\t $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-25} = 0x4; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_ACCX_LD_IP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_u16_accx_ld_ip_qup_p, $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_u16_accx_ld_ip_qup timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_U16_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy), + "ee.vmulas.u16.accx.ld.xp\t $qu, $as, $ad, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x3c; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x4; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x1; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_ACCX_LD_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u16_accx_ld_xp_p, $qu, $as, $ad, $qx, $qy", + [(int_xtensa_ee_vmulas_u16_accx_ld_xp timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U16_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.u16.accx.ld.xp.qup\t $qu, $as, $ad, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x30; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_ACCX_LD_XP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_u16_accx_ld_xp_qup_p, $qu, $as, $ad, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_u16_accx_ld_xp_qup timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_U16_QACC: EE_Inst24<(outs), (ins QR:$qx, QR:$qy), + "ee.vmulas.u16.qacc\t $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + + + let Inst{23-15} = 0x14; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x84; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_QACC_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u16_qacc_p, $qx, $qy", + [(int_xtensa_ee_vmulas_u16_qacc timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U16_QACC_LDBC_INCP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmulas.u16.qacc.ldbc.incp\t $qu, $as, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{23-21} = 0x6; + let Inst{20} = qu{2}; + let Inst{19-16} = 0x7; + let Inst{15} = qu{1}; + let Inst{14} = qy{2}; + let Inst{13} = qu{0}; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_QACC_LDBC_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u16_qacc_ldbc_incp_p, $qu, $as, $qx, $qy", + [(int_xtensa_ee_vmulas_u16_qacc_ldbc_incp timm:$qu, AR:$as, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U16_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.u16.qacc.ldbc.incp.qup\t $qu, $as, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = 0xa; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_QACC_LDBC_INCP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_u16_qacc_ldbc_incp_qup_p, $qu, $as, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_u16_qacc_ldbc_incp_qup timm:$qu, AR:$as, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_U16_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy), + "ee.vmulas.u16.qacc.ld.ip\t $qu, $as, $imm16, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-25} = 0xf; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x5; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x0; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_QACC_LD_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u16_qacc_ld_ip_p, $qu, $as, $imm16, $qx, $qy", + [(int_xtensa_ee_vmulas_u16_qacc_ld_ip timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U16_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.u16.qacc.ld.ip.qup\t $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-25} = 0x5; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_QACC_LD_IP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_u16_qacc_ld_ip_qup_p, $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_u16_qacc_ld_ip_qup timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_U16_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy), + "ee.vmulas.u16.qacc.ld.xp\t $qu, $as, $ad, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x3c; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x5; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x1; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_QACC_LD_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u16_qacc_ld_xp_p, $qu, $as, $ad, $qx, $qy", + [(int_xtensa_ee_vmulas_u16_qacc_ld_xp timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U16_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.u16.qacc.ld.xp.qup\t $qu, $as, $ad, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x31; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U16_QACC_LD_XP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_u16_qacc_ld_xp_qup_p, $qu, $as, $ad, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_u16_qacc_ld_xp_qup timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_U8_ACCX: EE_Inst24<(outs), (ins QR:$qx, QR:$qy), + "ee.vmulas.u8.accx\t $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + + + let Inst{23-15} = 0x14; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xc4; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_ACCX_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u8_accx_p, $qx, $qy", + [(int_xtensa_ee_vmulas_u8_accx timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U8_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy), + "ee.vmulas.u8.accx.ld.ip\t $qu, $as, $imm16, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-25} = 0xf; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x6; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x0; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_ACCX_LD_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u8_accx_ld_ip_p, $qu, $as, $imm16, $qx, $qy", + [(int_xtensa_ee_vmulas_u8_accx_ld_ip timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U8_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.u8.accx.ld.ip.qup\t $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-25} = 0x6; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_ACCX_LD_IP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_u8_accx_ld_ip_qup_p, $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_u8_accx_ld_ip_qup timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_U8_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy), + "ee.vmulas.u8.accx.ld.xp\t $qu, $as, $ad, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x3c; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x6; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x1; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_ACCX_LD_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u8_accx_ld_xp_p, $qu, $as, $ad, $qx, $qy", + [(int_xtensa_ee_vmulas_u8_accx_ld_xp timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U8_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.u8.accx.ld.xp.qup\t $qu, $as, $ad, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x32; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_ACCX_LD_XP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_u8_accx_ld_xp_qup_p, $qu, $as, $ad, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_u8_accx_ld_xp_qup timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_U8_QACC: EE_Inst24<(outs), (ins QR:$qx, QR:$qy), + "ee.vmulas.u8.qacc\t $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + + + let Inst{23-15} = 0x14; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xc4; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_QACC_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u8_qacc_p, $qx, $qy", + [(int_xtensa_ee_vmulas_u8_qacc timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U8_QACC_LDBC_INCP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmulas.u8.qacc.ldbc.incp\t $qu, $as, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{23-21} = 0x7; + let Inst{20} = qu{2}; + let Inst{19-16} = 0x7; + let Inst{15} = qu{1}; + let Inst{14} = qy{2}; + let Inst{13} = qu{0}; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_QACC_LDBC_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u8_qacc_ldbc_incp_p, $qu, $as, $qx, $qy", + [(int_xtensa_ee_vmulas_u8_qacc_ldbc_incp timm:$qu, AR:$as, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U8_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.u8.qacc.ldbc.incp.qup\t $qu, $as, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = 0xb; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_QACC_LDBC_INCP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_u8_qacc_ldbc_incp_qup_p, $qu, $as, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_u8_qacc_ldbc_incp_qup timm:$qu, AR:$as, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_U8_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy), + "ee.vmulas.u8.qacc.ld.ip\t $qu, $as, $imm16, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-25} = 0xf; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x7; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x0; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_QACC_LD_IP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u8_qacc_ld_ip_p, $qu, $as, $imm16, $qx, $qy", + [(int_xtensa_ee_vmulas_u8_qacc_ld_ip timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U8_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, offset_64_16:$imm16, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.u8.qacc.ld.ip.qup\t $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<8> imm16; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-25} = 0x7; + let Inst{24-23} = imm16{5-4}; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = imm16{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_QACC_LD_IP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, offset_64_16:$imm16, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_u8_qacc_ld_ip_qup_p, $qu, $as, $imm16, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_u8_qacc_ld_ip_qup timm:$qu, AR:$as, timm:$imm16, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMULAS_U8_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy), + "ee.vmulas.u8.qacc.ld.xp\t $qu, $as, $ad, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x3c; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = 0x7; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = 0x1; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_QACC_LD_XP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmulas_u8_qacc_ld_xp_p, $qu, $as, $ad, $qx, $qy", + [(int_xtensa_ee_vmulas_u8_qacc_ld_xp timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy)]>; + +def EE_VMULAS_U8_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR:$ad, QR:$qx, QR:$qy, QR:$qs0, QR:$qs1), + "ee.vmulas.u8.qacc.ld.xp.qup\t $qu, $as, $ad, $qx, $qy, $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<4> ad; + bits<3> qx; + bits<3> qy; + bits<3> qs0; + bits<3> qs1; + + let mayLoad = 1; + + let Inst{28-23} = 0x33; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qs1{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qs0{2-0}; + let Inst{7-4} = ad{3-0}; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMULAS_U8_QACC_LD_XP_QUP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, AR:$ad, imm8:$qx, imm8:$qy, imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vmulas_u8_qacc_ld_xp_qup_p, $qu, $as, $ad, $qx, $qy, $qs0, $qs1", + [(int_xtensa_ee_vmulas_u8_qacc_ld_xp_qup timm:$qu, AR:$as, AR:$ad, timm:$qx, timm:$qy, timm:$qs0, timm:$qs1)]>; + +def EE_VMUL_S16: EE_Inst24<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "ee.vmul.s16\t $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qz; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qz{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qz{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x84; +} + +let usesCustomInserter = 1 in +def EE_VMUL_S16_P : Pseudo<(outs), (ins imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_s16_p, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_s16 timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmul.s16.ld.incp\t $qu, $as, $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qz; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x3f; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMUL_S16_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_s16_ld_incp_p, $qu, $as, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_s16_ld_incp timm:$qu, AR:$as, timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vmul.s16.st.incp\t $qv, $as, $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qz; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1cb; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x2; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMUL_S16_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_s16_st_incp_p, $qv, $as, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_s16_st_incp timm:$qv, AR:$as, timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_S8: EE_Inst24<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "ee.vmul.s8\t $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qz; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qz{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qz{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0x94; +} + +let usesCustomInserter = 1 in +def EE_VMUL_S8_P : Pseudo<(outs), (ins imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_s8_p, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_s8 timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmul.s8.ld.incp\t $qu, $as, $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qz; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x4c; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMUL_S8_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_s8_ld_incp_p, $qu, $as, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_s8_ld_incp timm:$qu, AR:$as, timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vmul.s8.st.incp\t $qv, $as, $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qz; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1ca; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x3; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMUL_S8_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_s8_st_incp_p, $qv, $as, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_s8_st_incp timm:$qv, AR:$as, timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_U16: EE_Inst24<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "ee.vmul.u16\t $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qz; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qz{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qz{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xa4; +} + +let usesCustomInserter = 1 in +def EE_VMUL_U16_P : Pseudo<(outs), (ins imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_u16_p, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_u16 timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_U16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmul.u16.ld.incp\t $qu, $as, $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qz; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x5c; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMUL_U16_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_u16_ld_incp_p, $qu, $as, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_u16_ld_incp timm:$qu, AR:$as, timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_U16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vmul.u16.st.incp\t $qv, $as, $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qz; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1cb; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x3; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMUL_U16_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_u16_st_incp_p, $qv, $as, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_u16_st_incp timm:$qv, AR:$as, timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_U8: EE_Inst24<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "ee.vmul.u8\t $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qz; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qz{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qz{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xb4; +} + +let usesCustomInserter = 1 in +def EE_VMUL_U8_P : Pseudo<(outs), (ins imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_u8_p, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_u8 timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_U8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vmul.u8.ld.incp\t $qu, $as, $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qz; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x6c; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMUL_U8_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_u8_ld_incp_p, $qu, $as, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_u8_ld_incp timm:$qu, AR:$as, timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VMUL_U8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vmul.u8.st.incp\t $qv, $as, $qz, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qz; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1d1; + let Inst{19-17} = qz{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x0; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VMUL_U8_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qz, imm8:$qx, imm8:$qy), + "!xtensa_ee_vmul_u8_st_incp_p, $qv, $as, $qz, $qx, $qy", + [(int_xtensa_ee_vmul_u8_st_incp timm:$qv, AR:$as, timm:$qz, timm:$qx, timm:$qy)]>; + +def EE_VPRELU_S16: EE_Inst24<(outs QR:$qz), (ins QR:$qx, QR:$qy, AR:$ay), + "ee.vprelu.s16\t $qz, $qx, $qy, $ay", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qz; + bits<3> qx; + bits<3> qy; + bits<4> ay; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qz{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qz{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-4} = ay{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VPRELU_S16_P : Pseudo<(outs), (ins imm8:$qz, imm8:$qx, imm8:$qy, AR:$ay), + "!xtensa_ee_vprelu_s16_p, $qz, $qx, $qy, $ay", + [(int_xtensa_ee_vprelu_s16 timm:$qz, timm:$qx, timm:$qy, AR:$ay)]>; + +def EE_VPRELU_S8: EE_Inst24<(outs QR:$qz), (ins QR:$qx, QR:$qy, AR:$ay), + "ee.vprelu.s8\t $qz, $qx, $qy, $ay", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qz; + bits<3> qx; + bits<3> qy; + bits<4> ay; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qz{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qz{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-4} = ay{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VPRELU_S8_P : Pseudo<(outs), (ins imm8:$qz, imm8:$qx, imm8:$qy, AR:$ay), + "!xtensa_ee_vprelu_s8_p, $qz, $qx, $qy, $ay", + [(int_xtensa_ee_vprelu_s8 timm:$qz, timm:$qx, timm:$qy, AR:$ay)]>; + +def EE_VRELU_S16: EE_Inst24<(outs QR:$qsr), (ins QR:$qs, AR:$ax, AR:$ay), + "ee.vrelu.s16\t $qs, $ax, $ay", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs; + bits<4> ax; + bits<4> ay; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qs{0}; + let Inst{14-12} = 0x1; + let Inst{11-8} = ax{3-0}; + let Inst{7-4} = ay{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VRELU_S16_P : Pseudo<(outs), (ins imm8:$qs, AR:$ax, AR:$ay), + "!xtensa_ee_vrelu_s16_p, $qs, $ax, $ay", + [(int_xtensa_ee_vrelu_s16 timm:$qs, AR:$ax, AR:$ay)]>; + +def EE_VRELU_S8: EE_Inst24<(outs QR:$qsr), (ins QR:$qs, AR:$ax, AR:$ay), + "ee.vrelu.s8\t $qs, $ax, $ay", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs; + bits<4> ax; + bits<4> ay; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qs{0}; + let Inst{14-12} = 0x5; + let Inst{11-8} = ax{3-0}; + let Inst{7-4} = ay{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VRELU_S8_P : Pseudo<(outs), (ins imm8:$qs, AR:$ax, AR:$ay), + "!xtensa_ee_vrelu_s8_p, $qs, $ax, $ay", + [(int_xtensa_ee_vrelu_s8 timm:$qs, AR:$ax, AR:$ay)]>; + +def EE_VSL_32: EE_Inst24<(outs QR:$qa), (ins QR:$qs), + "ee.vsl.32\t $qa, $qs", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qs; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qs{0}; + let Inst{14-7} = 0x7e; + let Inst{6-4} = qa{2-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VSL_32_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qs), + "!xtensa_ee_vsl_32_p, $qa, $qs", + [(int_xtensa_ee_vsl_32 timm:$qa, timm:$qs)]>; + +def EE_VSMULAS_S16_QACC: EE_Inst24<(outs), (ins QR:$qx, QR:$qy, select_8:$sel8), + "ee.vsmulas.s16.qacc\t $qx, $qy, $sel8", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + bits<3> sel8; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = sel8{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = sel8{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xc4; +} + +let usesCustomInserter = 1 in +def EE_VSMULAS_S16_QACC_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy, select_8:$sel8), + "!xtensa_ee_vsmulas_s16_qacc_p, $qx, $qy, $sel8", + [(int_xtensa_ee_vsmulas_s16_qacc timm:$qx, timm:$qy, timm:$sel8)]>; + +def EE_VSMULAS_S16_QACC_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, QR:$qx, QR:$qy, select_8:$sel8), + "ee.vsmulas.s16.qacc.ld.incp\t $qu, $as, $qx, $qy, $sel8", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qx; + bits<3> qy; + bits<3> sel8; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = sel8{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x7c; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VSMULAS_S16_QACC_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qx, imm8:$qy, select_8:$sel8), + "!xtensa_ee_vsmulas_s16_qacc_ld_incp_p, $qu, $as, $qx, $qy, $sel8", + [(int_xtensa_ee_vsmulas_s16_qacc_ld_incp timm:$qu, AR:$as, timm:$qx, timm:$qy, timm:$sel8)]>; + +def EE_VSMULAS_S8_QACC: EE_Inst24<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), + "ee.vsmulas.s8.qacc\t $qx, $qy, $sel16", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qx; + bits<3> qy; + bits<4> sel16; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = sel16{3-2}; + let Inst{19-16} = 0xe; + let Inst{15} = sel16{1}; + let Inst{14} = qy{2}; + let Inst{13} = 0x0; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-5} = 0x2; + let Inst{4} = sel16{0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VSMULAS_S8_QACC_P : Pseudo<(outs), (ins imm8:$qx, imm8:$qy, select_16:$sel16), + "!xtensa_ee_vsmulas_s8_qacc_p, $qx, $qy, $sel16", + [(int_xtensa_ee_vsmulas_s8_qacc timm:$qx, timm:$qy, timm:$sel16)]>; + +def EE_VSMULAS_S8_QACC_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, QR:$qx, QR:$qy, select_16:$sel16), + "ee.vsmulas.s8.qacc.ld.incp\t $qu, $as, $qx, $qy, $sel16", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qx; + bits<3> qy; + bits<4> sel16; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = sel16{3-1}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-9} = 0x1; + let Inst{8} = sel16{0}; + let Inst{7-4} = 0xc; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VSMULAS_S8_QACC_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qx, imm8:$qy, select_16:$sel16), + "!xtensa_ee_vsmulas_s8_qacc_ld_incp_p, $qu, $as, $qx, $qy, $sel16", + [(int_xtensa_ee_vsmulas_s8_qacc_ld_incp timm:$qu, AR:$as, timm:$qx, timm:$qy, timm:$sel16)]>; + +def EE_VSR_32: EE_Inst24<(outs QR:$qa), (ins QR:$qs), + "ee.vsr.32\t $qa, $qs", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qs; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qs{0}; + let Inst{14-7} = 0x7f; + let Inst{6-4} = qa{2-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VSR_32_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qs), + "!xtensa_ee_vsr_32_p, $qa, $qs", + [(int_xtensa_ee_vsr_32 timm:$qa, timm:$qs)]>; + +def EE_VST_128_IP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, offset_256_16:$imm16), + "ee.vst.128.ip\t $qv, $as, $imm16", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<8> imm16; + + let mayStore = 1; + + let Inst{23} = 0x1; + let Inst{22} = imm16{7}; + let Inst{21-20} = qv{2-1}; + let Inst{19-16} = 0xa; + let Inst{15} = qv{0}; + let Inst{14-8} = imm16{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VST_128_IP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, offset_256_16:$imm16), + "!xtensa_ee_vst_128_ip_p, $qv, $as, $imm16", + [(int_xtensa_ee_vst_128_ip timm:$qv, AR:$as, timm:$imm16)]>; + +def EE_VST_128_XP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, AR:$ad), + "ee.vst.128.xp\t $qv, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<4> ad; + + let mayStore = 1; + + let Inst{23-22} = 0x2; + let Inst{21-20} = qv{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qv{0}; + let Inst{14-12} = 0x7; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VST_128_XP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, AR:$ad), + "!xtensa_ee_vst_128_xp_p, $qv, $as, $ad", + [(int_xtensa_ee_vst_128_xp timm:$qv, AR:$as, AR:$ad)]>; + +def EE_VST_H_64_IP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, offset_256_8:$imm8), + "ee.vst.h.64.ip\t $qv, $as, $imm8", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<8> imm8; + + let mayStore = 1; + + let Inst{23} = 0x1; + let Inst{22} = imm8{7}; + let Inst{21-20} = qv{2-1}; + let Inst{19-16} = 0xb; + let Inst{15} = qv{0}; + let Inst{14-8} = imm8{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VST_H_64_IP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, offset_256_8:$imm8), + "!xtensa_ee_vst_h_64_ip_p, $qv, $as, $imm8", + [(int_xtensa_ee_vst_h_64_ip timm:$qv, AR:$as, timm:$imm8)]>; + +def EE_VST_H_64_XP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, AR:$ad), + "ee.vst.h.64.xp\t $qv, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<4> ad; + + let mayStore = 1; + + let Inst{23-22} = 0x3; + let Inst{21-20} = qv{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qv{0}; + let Inst{14-12} = 0x0; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VST_H_64_XP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, AR:$ad), + "!xtensa_ee_vst_h_64_xp_p, $qv, $as, $ad", + [(int_xtensa_ee_vst_h_64_xp timm:$qv, AR:$as, AR:$ad)]>; + +def EE_VST_L_64_IP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, offset_256_8:$imm8), + "ee.vst.l.64.ip\t $qv, $as, $imm8", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<8> imm8; + + let mayStore = 1; + + let Inst{23} = 0x1; + let Inst{22} = imm8{7}; + let Inst{21-20} = qv{2-1}; + let Inst{19-16} = 0x4; + let Inst{15} = qv{0}; + let Inst{14-8} = imm8{6-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VST_L_64_IP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, offset_256_8:$imm8), + "!xtensa_ee_vst_l_64_ip_p, $qv, $as, $imm8", + [(int_xtensa_ee_vst_l_64_ip timm:$qv, AR:$as, timm:$imm8)]>; + +def EE_VST_L_64_XP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, AR:$ad), + "ee.vst.l.64.xp\t $qv, $as, $ad", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<4> ad; + + let mayStore = 1; + + let Inst{23-22} = 0x3; + let Inst{21-20} = qv{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qv{0}; + let Inst{14-12} = 0x4; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_VST_L_64_XP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, AR:$ad), + "!xtensa_ee_vst_l_64_xp_p, $qv, $as, $ad", + [(int_xtensa_ee_vst_l_64_xp timm:$qv, AR:$as, AR:$ad)]>; + +def EE_VSUBS_S16: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vsubs.s16\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xd4; +} + +let usesCustomInserter = 1 in +def EE_VSUBS_S16_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vsubs_s16_p, $qa, $qx, $qy", + [(int_xtensa_ee_vsubs_s16 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VSUBS_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vsubs.s16.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x4d; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VSUBS_S16_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vsubs_s16_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vsubs_s16_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VSUBS_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vsubs.s16.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1d1; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x1; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VSUBS_S16_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vsubs_s16_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vsubs_s16_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VSUBS_S32: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vsubs.s32\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xe4; +} + +let usesCustomInserter = 1 in +def EE_VSUBS_S32_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vsubs_s32_p, $qa, $qx, $qy", + [(int_xtensa_ee_vsubs_s32 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VSUBS_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vsubs.s32.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x5d; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VSUBS_S32_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vsubs_s32_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vsubs_s32_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VSUBS_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vsubs.s32.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1d1; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x2; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VSUBS_S32_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vsubs_s32_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vsubs_s32_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VSUBS_S8: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.vsubs.s8\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xe; + let Inst{15} = qa{0}; + let Inst{14} = qy{2}; + let Inst{13} = 0x1; + let Inst{12-11} = qy{1-0}; + let Inst{10-8} = qx{2-0}; + let Inst{7-0} = 0xf4; +} + +let usesCustomInserter = 1 in +def EE_VSUBS_S8_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vsubs_s8_p, $qa, $qx, $qy", + [(int_xtensa_ee_vsubs_s8 timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VSUBS_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, QR:$qx, QR:$qy), + "ee.vsubs.s8.ld.incp\t $qu, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qu; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayLoad = 1; + + let Inst{28-23} = 0x38; + let Inst{22-20} = qu{2-0}; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-4} = 0x6d; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VSUBS_S8_LD_INCP_P : Pseudo<(outs), (ins imm8:$qu, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vsubs_s8_ld_incp_p, $qu, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vsubs_s8_ld_incp timm:$qu, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VSUBS_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, QR:$qx, QR:$qy), + "ee.vsubs.s8.st.incp\t $qv, $as, $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qv; + bits<4> as; + bits<3> qa; + bits<3> qx; + bits<3> qy; + + let mayStore = 1; + + let Inst{28-20} = 0x1d1; + let Inst{19-17} = qa{2-0}; + let Inst{16-14} = qx{2-0}; + let Inst{13-11} = qy{2-0}; + let Inst{10-8} = qv{2-0}; + let Inst{7-4} = 0x3; + let Inst{3-0} = as{3-0}; +} + +let usesCustomInserter = 1 in +def EE_VSUBS_S8_ST_INCP_P : Pseudo<(outs), (ins imm8:$qv, AR:$as, imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_vsubs_s8_st_incp_p, $qv, $as, $qa, $qx, $qy", + [(int_xtensa_ee_vsubs_s8_st_incp timm:$qv, AR:$as, timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_VUNZIP_16: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), + "ee.vunzip.16\t $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs0; + bits<3> qs1; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-0} = 0x384; +} + +let usesCustomInserter = 1 in +def EE_VUNZIP_16_P : Pseudo<(outs), (ins imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vunzip_16_p, $qs0, $qs1", + [(int_xtensa_ee_vunzip_16 timm:$qs0, timm:$qs1)]>; + +def EE_VUNZIP_32: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), + "ee.vunzip.32\t $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs0; + bits<3> qs1; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-0} = 0x394; +} + +let usesCustomInserter = 1 in +def EE_VUNZIP_32_P : Pseudo<(outs), (ins imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vunzip_32_p, $qs0, $qs1", + [(int_xtensa_ee_vunzip_32 timm:$qs0, timm:$qs1)]>; + +def EE_VUNZIP_8: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), + "ee.vunzip.8\t $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs0; + bits<3> qs1; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-0} = 0x3a4; +} + +let usesCustomInserter = 1 in +def EE_VUNZIP_8_P : Pseudo<(outs), (ins imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vunzip_8_p, $qs0, $qs1", + [(int_xtensa_ee_vunzip_8 timm:$qs0, timm:$qs1)]>; + +def EE_VZIP_16: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), + "ee.vzip.16\t $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs0; + bits<3> qs1; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-0} = 0x3b4; +} + +let usesCustomInserter = 1 in +def EE_VZIP_16_P : Pseudo<(outs), (ins imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vzip_16_p, $qs0, $qs1", + [(int_xtensa_ee_vzip_16 timm:$qs0, timm:$qs1)]>; + +def EE_VZIP_32: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), + "ee.vzip.32\t $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs0; + bits<3> qs1; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-0} = 0x3c4; +} + +let usesCustomInserter = 1 in +def EE_VZIP_32_P : Pseudo<(outs), (ins imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vzip_32_p, $qs0, $qs1", + [(int_xtensa_ee_vzip_32 timm:$qs0, timm:$qs1)]>; + +def EE_VZIP_8: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), + "ee.vzip.8\t $qs0, $qs1", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qs0; + bits<3> qs1; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qs1{2-1}; + let Inst{19-16} = 0xc; + let Inst{15} = qs1{0}; + let Inst{14-12} = qs0{2-0}; + let Inst{11-0} = 0x3d4; +} + +let usesCustomInserter = 1 in +def EE_VZIP_8_P : Pseudo<(outs), (ins imm8:$qs0, imm8:$qs1), + "!xtensa_ee_vzip_8_p, $qs0, $qs1", + [(int_xtensa_ee_vzip_8 timm:$qs0, timm:$qs1)]>; + +def EE_XORQ: EE_Inst24<(outs QR:$qa), (ins QR:$qx, QR:$qy), + "ee.xorq\t $qa, $qx, $qy", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + bits<3> qx; + bits<3> qy; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qa{0}; + let Inst{14-12} = 0x3; + let Inst{11-10} = qy{2-1}; + let Inst{9-8} = 0x1; + let Inst{7-6} = qx{2-1}; + let Inst{5} = qy{0}; + let Inst{4} = qx{0}; + let Inst{3-0} = 0x4; +} + +let usesCustomInserter = 1 in +def EE_XORQ_P : Pseudo<(outs), (ins imm8:$qa, imm8:$qx, imm8:$qy), + "!xtensa_ee_xorq_p, $qa, $qx, $qy", + [(int_xtensa_ee_xorq timm:$qa, timm:$qx, timm:$qy)]>; + +def EE_ZERO_ACCX: EE_Inst24<(outs), (ins), + "ee.zero.accx\t", []>, Requires<[HasESP32S3Ops]> +{ + + + let Inst{23-0} = 0x250804; +} + +let usesCustomInserter = 1 in +def EE_ZERO_ACCX_P : Pseudo<(outs), (ins), + "!xtensa_ee_zero_accx_p", + [(int_xtensa_ee_zero_accx)]>; + +def EE_ZERO_Q: EE_Inst24<(outs QR:$qa), (ins), + "ee.zero.q\t $qa", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> qa; + + + let Inst{23-22} = 0x3; + let Inst{21-20} = qa{2-1}; + let Inst{19-16} = 0xd; + let Inst{15} = qa{0}; + let Inst{14-0} = 0x7fa4; +} + +let usesCustomInserter = 1 in +def EE_ZERO_Q_P : Pseudo<(outs), (ins imm8:$qa), + "!xtensa_ee_zero_q_p, $qa", + [(int_xtensa_ee_zero_q timm:$qa)]>; + +def EE_ZERO_QACC: EE_Inst24<(outs), (ins), + "ee.zero.qacc\t", []>, Requires<[HasESP32S3Ops]> +{ + + + let Inst{23-0} = 0x250844; +} + +let usesCustomInserter = 1 in +def EE_ZERO_QACC_P : Pseudo<(outs), (ins), + "!xtensa_ee_zero_qacc_p", + [(int_xtensa_ee_zero_qacc)]>; + +def RUR_ACCX_0: EE_Inst24<(outs AR:$arr), (ins), + "rur.accx_0\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x0; +} + +def : Pat<(i32 (int_xtensa_rur_accx_0)), (RUR_ACCX_0)>; + +def RUR_ACCX_1: EE_Inst24<(outs AR:$arr), (ins), + "rur.accx_1\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x10; +} + +def : Pat<(i32 (int_xtensa_rur_accx_1)), (RUR_ACCX_1)>; + +def RUR_FFT_BIT_WIDTH: EE_Inst24<(outs AR:$arr), (ins), + "rur.fft_bit_width\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0xe0; +} + +def : Pat<(i32 (int_xtensa_rur_fft_bit_width)), (RUR_FFT_BIT_WIDTH)>; + +def RUR_GPIO_OUT: EE_Inst24<(outs AR:$arr), (ins), + "rur.gpio_out\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0xc0; +} + +def : Pat<(i32 (int_xtensa_rur_gpio_out)), (RUR_GPIO_OUT)>; + +def RUR_QACC_H_0: EE_Inst24<(outs AR:$arr), (ins), + "rur.qacc_h_0\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x20; +} + +def : Pat<(i32 (int_xtensa_rur_qacc_h_0)), (RUR_QACC_H_0)>; + +def RUR_QACC_H_1: EE_Inst24<(outs AR:$arr), (ins), + "rur.qacc_h_1\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x30; +} + +def : Pat<(i32 (int_xtensa_rur_qacc_h_1)), (RUR_QACC_H_1)>; + +def RUR_QACC_H_2: EE_Inst24<(outs AR:$arr), (ins), + "rur.qacc_h_2\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x40; +} + +def : Pat<(i32 (int_xtensa_rur_qacc_h_2)), (RUR_QACC_H_2)>; + +def RUR_QACC_H_3: EE_Inst24<(outs AR:$arr), (ins), + "rur.qacc_h_3\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x50; +} + +def : Pat<(i32 (int_xtensa_rur_qacc_h_3)), (RUR_QACC_H_3)>; + +def RUR_QACC_H_4: EE_Inst24<(outs AR:$arr), (ins), + "rur.qacc_h_4\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x60; +} + +def : Pat<(i32 (int_xtensa_rur_qacc_h_4)), (RUR_QACC_H_4)>; + +def RUR_QACC_L_0: EE_Inst24<(outs AR:$arr), (ins), + "rur.qacc_l_0\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x70; +} + +def : Pat<(i32 (int_xtensa_rur_qacc_l_0)), (RUR_QACC_L_0)>; + +def RUR_QACC_L_1: EE_Inst24<(outs AR:$arr), (ins), + "rur.qacc_l_1\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x80; +} + +def : Pat<(i32 (int_xtensa_rur_qacc_l_1)), (RUR_QACC_L_1)>; + +def RUR_QACC_L_2: EE_Inst24<(outs AR:$arr), (ins), + "rur.qacc_l_2\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x90; +} + +def : Pat<(i32 (int_xtensa_rur_qacc_l_2)), (RUR_QACC_L_2)>; + +def RUR_QACC_L_3: EE_Inst24<(outs AR:$arr), (ins), + "rur.qacc_l_3\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0xa0; +} + +def : Pat<(i32 (int_xtensa_rur_qacc_l_3)), (RUR_QACC_L_3)>; + +def RUR_QACC_L_4: EE_Inst24<(outs AR:$arr), (ins), + "rur.qacc_l_4\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0xb0; +} + +def : Pat<(i32 (int_xtensa_rur_qacc_l_4)), (RUR_QACC_L_4)>; + +def RUR_SAR_BYTE: EE_Inst24<(outs AR:$arr), (ins), + "rur.sar_byte\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0xd0; +} + +def : Pat<(i32 (int_xtensa_rur_sar_byte)), (RUR_SAR_BYTE)>; + +def RUR_UA_STATE_0: EE_Inst24<(outs AR:$arr), (ins), + "rur.ua_state_0\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0xf0; +} + +def : Pat<(i32 (int_xtensa_rur_ua_state_0)), (RUR_UA_STATE_0)>; + +def RUR_UA_STATE_1: EE_Inst24<(outs AR:$arr), (ins), + "rur.ua_state_1\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x100; +} + +def : Pat<(i32 (int_xtensa_rur_ua_state_1)), (RUR_UA_STATE_1)>; + +def RUR_UA_STATE_2: EE_Inst24<(outs AR:$arr), (ins), + "rur.ua_state_2\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x110; +} + +def : Pat<(i32 (int_xtensa_rur_ua_state_2)), (RUR_UA_STATE_2)>; + +def RUR_UA_STATE_3: EE_Inst24<(outs AR:$arr), (ins), + "rur.ua_state_3\t $arr", []>, Requires<[HasESP32S3Ops]> +{ + bits<4> arr; + + + let Inst{23-16} = 0xe3; + let Inst{15-12} = arr{3-0}; + let Inst{11-0} = 0x120; +} + +def : Pat<(i32 (int_xtensa_rur_ua_state_3)), (RUR_UA_STATE_3)>; + +def WUR_ACCX_0: EE_Inst24<(outs), (ins AR:$art), + "wur.accx_0\t $art", [(int_xtensa_wur_accx_0 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf300; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_ACCX_1: EE_Inst24<(outs), (ins AR:$art), + "wur.accx_1\t $art", [(int_xtensa_wur_accx_1 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf301; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_FFT_BIT_WIDTH: EE_Inst24<(outs), (ins AR:$art), + "wur.fft_bit_width\t $art", [(int_xtensa_wur_fft_bit_width AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf30e; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_GPIO_OUT: EE_Inst24<(outs), (ins AR:$art), + "wur.gpio_out\t $art", [(int_xtensa_wur_gpio_out AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf30c; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_QACC_H_0: EE_Inst24<(outs), (ins AR:$art), + "wur.qacc_h_0\t $art", [(int_xtensa_wur_qacc_h_0 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf302; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_QACC_H_1: EE_Inst24<(outs), (ins AR:$art), + "wur.qacc_h_1\t $art", [(int_xtensa_wur_qacc_h_1 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf303; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_QACC_H_2: EE_Inst24<(outs), (ins AR:$art), + "wur.qacc_h_2\t $art", [(int_xtensa_wur_qacc_h_2 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf304; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_QACC_H_3: EE_Inst24<(outs), (ins AR:$art), + "wur.qacc_h_3\t $art", [(int_xtensa_wur_qacc_h_3 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf305; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_QACC_H_4: EE_Inst24<(outs), (ins AR:$art), + "wur.qacc_h_4\t $art", [(int_xtensa_wur_qacc_h_4 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf306; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_QACC_L_0: EE_Inst24<(outs), (ins AR:$art), + "wur.qacc_l_0\t $art", [(int_xtensa_wur_qacc_l_0 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf307; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_QACC_L_1: EE_Inst24<(outs), (ins AR:$art), + "wur.qacc_l_1\t $art", [(int_xtensa_wur_qacc_l_1 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf308; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_QACC_L_2: EE_Inst24<(outs), (ins AR:$art), + "wur.qacc_l_2\t $art", [(int_xtensa_wur_qacc_l_2 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf309; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_QACC_L_3: EE_Inst24<(outs), (ins AR:$art), + "wur.qacc_l_3\t $art", [(int_xtensa_wur_qacc_l_3 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf30a; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_QACC_L_4: EE_Inst24<(outs), (ins AR:$art), + "wur.qacc_l_4\t $art", [(int_xtensa_wur_qacc_l_4 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf30b; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_SAR_BYTE: EE_Inst24<(outs), (ins AR:$art), + "wur.sar_byte\t $art", [(int_xtensa_wur_sar_byte AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf30d; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_UA_STATE_0: EE_Inst24<(outs), (ins AR:$art), + "wur.ua_state_0\t $art", [(int_xtensa_wur_ua_state_0 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf30f; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_UA_STATE_1: EE_Inst24<(outs), (ins AR:$art), + "wur.ua_state_1\t $art", [(int_xtensa_wur_ua_state_1 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf310; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_UA_STATE_2: EE_Inst24<(outs), (ins AR:$art), + "wur.ua_state_2\t $art", [(int_xtensa_wur_ua_state_2 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf311; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def WUR_UA_STATE_3: EE_Inst24<(outs), (ins AR:$art), + "wur.ua_state_3\t $art", [(int_xtensa_wur_ua_state_3 AR:$art)]>, Requires<[HasESP32S3Ops]> +{ + bits<4> art; + + + let Inst{23-8} = 0xf312; + let Inst{7-4} = art{3-0}; + let Inst{3-0} = 0x0; +} + +def mv_QR: EE_Inst24<(outs QR:$a), (ins QR:$b), + "mv.qr\t $a, $b", []>, Requires<[HasESP32S3Ops]> +{ + bits<3> a; + bits<3> b; + + + let Inst{23-22} = 0x2; + let Inst{21-20} = a{2-1}; + let Inst{19-16} = 0xf; + let Inst{15} = a{0}; + let Inst{14-12} = 0x0; + let Inst{11-10} = b{2-1}; + let Inst{9-6} = 0x0; + let Inst{5} = b{0}; + let Inst{4-0} = 0x4; +} + +let usesCustomInserter = 1 in +def mv_QR_P : Pseudo<(outs), (ins imm8:$a, imm8:$b), + "!xtensa_mv_qr_p, $a, $b", + [(int_xtensa_mv_qr timm:$a, timm:$b)]>; diff --git a/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp new file mode 100644 index 0000000000000..301d225c6442e --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp @@ -0,0 +1,5262 @@ +//===- XtensaS3ISelLowering.cpp - Xtensa S3 DAG Lowering Implementation ---===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that Xtensa uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#include "XtensaISelLowering.h" +#include "XtensaSubtarget.h" + +using namespace llvm; + +MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( + MachineInstr &MI, MachineBasicBlock *MBB, const TargetInstrInfo &TII, + MachineFunction *MF, MachineRegisterInfo &MRI, DebugLoc DL) const { + switch (MI.getOpcode()) { + default: + llvm_unreachable("Unexpected instr type to insert"); + case Xtensa::EE_ANDQ_P: { + unsigned Opc = Xtensa::EE_ANDQ; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_andq first argument, it must " + "be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_andq first argument, it must " + "be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_andq first argument, it must " + "be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_BITREV_P: { + unsigned Opc = Xtensa::EE_BITREV; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_bitrev first argument, it must " + "be in range [0,7]"); + MachineOperand &AX = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Undef) + .addReg(AX.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_CMUL_S16_P: { + unsigned Opc = Xtensa::EE_CMUL_S16; + MachineOperand &QZ = MI.getOperand(0); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_cmul_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_cmul_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_cmul_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &SEL4 = MI.getOperand(3); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addImm(SEL4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_CMUL_S16_LD_INCP_P: { + unsigned Opc = Xtensa::EE_CMUL_S16_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_cmul_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_cmul_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_cmul_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_cmul_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL4 = MI.getOperand(5); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addImm(SEL4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_CMUL_S16_ST_INCP_P: { + unsigned Opc = Xtensa::EE_CMUL_S16_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_cmul_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_cmul_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_cmul_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_cmul_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL4 = MI.getOperand(5); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addImm(SEL4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_FFT_AMS_S16_LD_INCP_P: { + unsigned Opc = Xtensa::EE_FFT_AMS_S16_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QZ1 = MI.getOperand(3); + unsigned QZ1Val = QZ1.getImm(); + assert(QZ1Val < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(4); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(5); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QM = MI.getOperand(6); + unsigned QMVal = QM.getImm(); + assert(QMVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL2 = MI.getOperand(7); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZ1Val) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QMVal) + .addImm(SEL2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_FFT_AMS_S16_LD_INCP_UAUP_P: { + unsigned Opc = Xtensa::EE_FFT_AMS_S16_LD_INCP_UAUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp_uaup first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp_uaup first " + "argument, it must be in range [0,7]"); + MachineOperand &QZ1 = MI.getOperand(3); + unsigned QZ1Val = QZ1.getImm(); + assert(QZ1Val < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp_uaup " + "first argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(4); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp_uaup first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(5); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp_uaup first " + "argument, it must be in range [0,7]"); + MachineOperand &QM = MI.getOperand(6); + unsigned QMVal = QM.getImm(); + assert(QMVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_incp_uaup first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL2 = MI.getOperand(7); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZ1Val) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QMVal) + .addImm(SEL2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_FFT_AMS_S16_LD_R32_DECP_P: { + unsigned Opc = Xtensa::EE_FFT_AMS_S16_LD_R32_DECP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_r32_decp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_r32_decp first " + "argument, it must be in range [0,7]"); + MachineOperand &QZ1 = MI.getOperand(3); + unsigned QZ1Val = QZ1.getImm(); + assert(QZ1Val < 8 && "Unexpected value of ee_fft_ams_s16_ld_r32_decp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(4); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_r32_decp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(5); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_r32_decp first " + "argument, it must be in range [0,7]"); + MachineOperand &QM = MI.getOperand(6); + unsigned QMVal = QM.getImm(); + assert(QMVal < 8 && "Unexpected value of ee_fft_ams_s16_ld_r32_decp first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL2 = MI.getOperand(7); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZ1Val) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QMVal) + .addImm(SEL2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_FFT_AMS_S16_ST_INCP_P: { + unsigned Opc = Xtensa::EE_FFT_AMS_S16_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_fft_ams_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QZ1 = MI.getOperand(1); + unsigned QZ1Val = QZ1.getImm(); + assert(QZ1Val < 8 && "Unexpected value of ee_fft_ams_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS0 = MI.getOperand(2); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AS = MI.getOperand(3); + unsigned R2 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(4); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_fft_ams_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(5); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_fft_ams_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QM = MI.getOperand(6); + unsigned QMVal = QM.getImm(); + assert(QMVal < 8 && "Unexpected value of ee_fft_ams_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL2 = MI.getOperand(7); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QZ1Val) + .addReg(R1, RegState::Undef) + .addReg(R2, RegState::Undef) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS0.getReg()) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QMVal) + .addImm(SEL2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_FFT_CMUL_S16_LD_XP_P: { + unsigned Opc = Xtensa::EE_FFT_CMUL_S16_LD_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_fft_cmul_s16_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_fft_cmul_s16_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(4); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_fft_cmul_s16_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(5); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_fft_cmul_s16_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL8 = MI.getOperand(6); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addImm(SEL8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_FFT_CMUL_S16_ST_XP_P: { + unsigned Opc = Xtensa::EE_FFT_CMUL_S16_ST_XP; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_fft_cmul_s16_st_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_fft_cmul_s16_st_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_fft_cmul_s16_st_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(3); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(4); + MachineOperand &SEL8 = MI.getOperand(5); + MachineOperand &UPD4 = MI.getOperand(6); + MachineOperand &SAR4 = MI.getOperand(7); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addImm(SEL8.getImm()) + .addImm(UPD4.getImm()) + .addImm(SAR4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_FFT_R2BF_S16_P: { + unsigned Opc = Xtensa::EE_FFT_R2BF_S16; + MachineOperand &QA0 = MI.getOperand(0); + unsigned QA0Val = QA0.getImm(); + assert(QA0Val < 8 && "Unexpected value of ee_fft_r2bf_s16 first argument, " + "it must be in range [0,7]"); + MachineOperand &QA1 = MI.getOperand(1); + unsigned QA1Val = QA1.getImm(); + assert(QA1Val < 8 && "Unexpected value of ee_fft_r2bf_s16 first argument, " + "it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_fft_r2bf_s16 first argument, " + "it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_fft_r2bf_s16 first argument, " + "it must be in range [0,7]"); + MachineOperand &SEL2 = MI.getOperand(4); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QA0Val) + .addReg(Xtensa::Q0 + QA1Val) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addImm(SEL2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_FFT_R2BF_S16_ST_INCP_P: { + unsigned Opc = Xtensa::EE_FFT_R2BF_S16_ST_INCP; + MachineOperand &QA0 = MI.getOperand(0); + unsigned QA0Val = QA0.getImm(); + assert(QA0Val < 8 && "Unexpected value of ee_fft_r2bf_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_fft_r2bf_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_fft_r2bf_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(3); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &SAR4 = MI.getOperand(4); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QA0Val) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(AS.getReg()) + .addImm(SAR4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_FFT_VST_R32_DECP_P: { + unsigned Opc = Xtensa::EE_FFT_VST_R32_DECP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_fft_vst_r32_decp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &SAR2 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addImm(SAR2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDF_128_IP_P: { + unsigned Opc = Xtensa::EE_LDF_128_IP; + MachineOperand &FU3 = MI.getOperand(0); + MachineOperand &FU2 = MI.getOperand(1); + MachineOperand &FU1 = MI.getOperand(2); + MachineOperand &FU0 = MI.getOperand(3); + MachineOperand &AS = MI.getOperand(4); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16F = MI.getOperand(5); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(FU3.getReg()) + .addReg(FU2.getReg()) + .addReg(FU1.getReg()) + .addReg(FU0.getReg()) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16F.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDF_128_XP_P: { + unsigned Opc = Xtensa::EE_LDF_128_XP; + MachineOperand &FU3 = MI.getOperand(0); + MachineOperand &FU2 = MI.getOperand(1); + MachineOperand &FU1 = MI.getOperand(2); + MachineOperand &FU0 = MI.getOperand(3); + MachineOperand &AS = MI.getOperand(4); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(5); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(FU3.getReg()) + .addReg(FU2.getReg()) + .addReg(FU1.getReg()) + .addReg(FU0.getReg()) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDF_64_IP_P: { + unsigned Opc = Xtensa::EE_LDF_64_IP; + MachineOperand &FU1 = MI.getOperand(0); + MachineOperand &FU0 = MI.getOperand(1); + MachineOperand &AS = MI.getOperand(2); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM8 = MI.getOperand(3); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(FU1.getReg()) + .addReg(FU0.getReg()) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDF_64_XP_P: { + unsigned Opc = Xtensa::EE_LDF_64_XP; + MachineOperand &FU1 = MI.getOperand(0); + MachineOperand &FU0 = MI.getOperand(1); + MachineOperand &AS = MI.getOperand(2); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(3); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(FU1.getReg()) + .addReg(FU0.getReg()) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDQA_S16_128_IP_P: { + unsigned Opc = Xtensa::EE_LDQA_S16_128_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDQA_S16_128_XP_P: { + unsigned Opc = Xtensa::EE_LDQA_S16_128_XP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDQA_S8_128_IP_P: { + unsigned Opc = Xtensa::EE_LDQA_S8_128_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDQA_S8_128_XP_P: { + unsigned Opc = Xtensa::EE_LDQA_S8_128_XP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDQA_U16_128_IP_P: { + unsigned Opc = Xtensa::EE_LDQA_U16_128_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDQA_U16_128_XP_P: { + unsigned Opc = Xtensa::EE_LDQA_U16_128_XP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDQA_U8_128_IP_P: { + unsigned Opc = Xtensa::EE_LDQA_U8_128_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDQA_U8_128_XP_P: { + unsigned Opc = Xtensa::EE_LDQA_U8_128_XP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LDXQ_32_P: { + unsigned Opc = Xtensa::EE_LDXQ_32; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_ldxq_32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QS = MI.getOperand(1); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_ldxq_32 first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(2); + MachineOperand &SEL4 = MI.getOperand(3); + MachineOperand &SEL8 = MI.getOperand(4); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(Xtensa::Q0 + QSVal) + .addReg(AS.getReg()) + .addImm(SEL4.getImm()) + .addImm(SEL8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LD_128_USAR_IP_P: { + unsigned Opc = Xtensa::EE_LD_128_USAR_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_ld_128_usar_ip first argument, " + "it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LD_128_USAR_XP_P: { + unsigned Opc = Xtensa::EE_LD_128_USAR_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_ld_128_usar_xp first argument, " + "it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LD_ACCX_IP_P: { + unsigned Opc = Xtensa::EE_LD_ACCX_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM8 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LD_QACC_H_H_32_IP_P: { + unsigned Opc = Xtensa::EE_LD_QACC_H_H_32_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM4 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LD_QACC_H_L_128_IP_P: { + unsigned Opc = Xtensa::EE_LD_QACC_H_L_128_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LD_QACC_L_H_32_IP_P: { + unsigned Opc = Xtensa::EE_LD_QACC_L_H_32_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM4 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LD_QACC_L_L_128_IP_P: { + unsigned Opc = Xtensa::EE_LD_QACC_L_L_128_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_LD_UA_STATE_IP_P: { + unsigned Opc = Xtensa::EE_LD_UA_STATE_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_MOVI_32_A_P: { + unsigned Opc = Xtensa::EE_MOVI_32_A; + MachineOperand &QS = MI.getOperand(0); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_movi_32_a first argument, it " + "must be in range [0,7]"); + MachineOperand &AU = MI.getOperand(1); + MachineOperand &SEL4 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(AU.getReg()) + .addReg(Xtensa::Q0 + QSVal) + .addImm(SEL4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_MOVI_32_Q_P: { + unsigned Opc = Xtensa::EE_MOVI_32_Q; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_movi_32_q first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + MachineOperand &SEL4 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(AS.getReg()) + .addImm(SEL4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_MOV_S16_QACC_P: { + unsigned Opc = Xtensa::EE_MOV_S16_QACC; + MachineOperand &QS = MI.getOperand(0); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_mov_s16_qacc first argument, " + "it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(Xtensa::Q0 + QSVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_MOV_S8_QACC_P: { + unsigned Opc = Xtensa::EE_MOV_S8_QACC; + MachineOperand &QS = MI.getOperand(0); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_mov_s8_qacc first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(Xtensa::Q0 + QSVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_MOV_U16_QACC_P: { + unsigned Opc = Xtensa::EE_MOV_U16_QACC; + MachineOperand &QS = MI.getOperand(0); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_mov_u16_qacc first argument, " + "it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(Xtensa::Q0 + QSVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_MOV_U8_QACC_P: { + unsigned Opc = Xtensa::EE_MOV_U8_QACC; + MachineOperand &QS = MI.getOperand(0); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_mov_u8_qacc first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(Xtensa::Q0 + QSVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_NOTQ_P: { + unsigned Opc = Xtensa::EE_NOTQ; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_notq first argument, it must " + "be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_notq first argument, it must " + "be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_ORQ_P: { + unsigned Opc = Xtensa::EE_ORQ; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert( + QAVal < 8 && + "Unexpected value of ee_orq first argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert( + QXVal < 8 && + "Unexpected value of ee_orq first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert( + QYVal < 8 && + "Unexpected value of ee_orq first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SLCI_2Q_P: { + unsigned Opc = Xtensa::EE_SLCI_2Q; + MachineOperand &QS1 = MI.getOperand(0); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_slci_2q first argument, it " + "must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(1); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_slci_2q first argument, it " + "must be in range [0,7]"); + MachineOperand &SAR16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addImm(SAR16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SLCXXP_2Q_P: { + unsigned Opc = Xtensa::EE_SLCXXP_2Q; + MachineOperand &QS1 = MI.getOperand(0); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_slcxxp_2q first argument, it " + "must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(1); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_slcxxp_2q first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(2); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(3); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SRCI_2Q_P: { + unsigned Opc = Xtensa::EE_SRCI_2Q; + MachineOperand &QS1 = MI.getOperand(0); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_srci_2q first argument, it " + "must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(1); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_srci_2q first argument, it " + "must be in range [0,7]"); + MachineOperand &SAR16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addImm(SAR16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SRCMB_S16_QACC_P: { + unsigned Opc = Xtensa::EE_SRCMB_S16_QACC; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_srcmb_s16_qacc first argument, " + "it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + MachineOperand &SEL2 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(AS.getReg()) + .addImm(SEL2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SRCMB_S8_QACC_P: { + unsigned Opc = Xtensa::EE_SRCMB_S8_QACC; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_srcmb_s8_qacc first argument, " + "it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + MachineOperand &SEL2 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(AS.getReg()) + .addImm(SEL2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SRCQ_128_ST_INCP_P: { + unsigned Opc = Xtensa::EE_SRCQ_128_ST_INCP; + MachineOperand &QS0 = MI.getOperand(0); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_srcq_128_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(1); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_srcq_128_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(2); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(AS.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SRCXXP_2Q_P: { + unsigned Opc = Xtensa::EE_SRCXXP_2Q; + MachineOperand &QS1 = MI.getOperand(0); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_srcxxp_2q first argument, it " + "must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(1); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_srcxxp_2q first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(2); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(3); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SRC_Q_P: { + unsigned Opc = Xtensa::EE_SRC_Q; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_src_q first argument, it must " + "be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(1); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_src_q first argument, it must " + "be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(2); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_src_q first argument, it must " + "be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SRC_Q_LD_IP_P: { + unsigned Opc = Xtensa::EE_SRC_Q_LD_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_src_q_ld_ip first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QS0 = MI.getOperand(3); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_src_q_ld_ip first argument, " + "it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(4); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_src_q_ld_ip first argument, " + "it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SRC_Q_LD_XP_P: { + unsigned Opc = Xtensa::EE_SRC_Q_LD_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_src_q_ld_xp first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QS0 = MI.getOperand(3); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_src_q_ld_xp first argument, " + "it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(4); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_src_q_ld_xp first argument, " + "it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SRC_Q_QUP_P: { + unsigned Opc = Xtensa::EE_SRC_Q_QUP; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_src_q_qup first argument, it " + "must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(1); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_src_q_qup first argument, it " + "must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(2); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_src_q_qup first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_SRS_ACCX_P: { + unsigned Opc = Xtensa::EE_SRS_ACCX; + MachineOperand &AU = MI.getOperand(0); + MachineOperand &AS = MI.getOperand(1); + MachineOperand &SEL2 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(AU.getReg()) + .addReg(AS.getReg()) + .addImm(SEL2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_STF_128_IP_P: { + unsigned Opc = Xtensa::EE_STF_128_IP; + MachineOperand &FV3 = MI.getOperand(0); + MachineOperand &FV2 = MI.getOperand(1); + MachineOperand &FV1 = MI.getOperand(2); + MachineOperand &FV0 = MI.getOperand(3); + MachineOperand &AS = MI.getOperand(4); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16F = MI.getOperand(5); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(FV3.getReg()) + .addReg(FV2.getReg()) + .addReg(FV1.getReg()) + .addReg(FV0.getReg()) + .addReg(AS.getReg()) + .addImm(IMM16F.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_STF_128_XP_P: { + unsigned Opc = Xtensa::EE_STF_128_XP; + MachineOperand &FV3 = MI.getOperand(0); + MachineOperand &FV2 = MI.getOperand(1); + MachineOperand &FV1 = MI.getOperand(2); + MachineOperand &FV0 = MI.getOperand(3); + MachineOperand &AS = MI.getOperand(4); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(5); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(FV3.getReg()) + .addReg(FV2.getReg()) + .addReg(FV1.getReg()) + .addReg(FV0.getReg()) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_STF_64_IP_P: { + unsigned Opc = Xtensa::EE_STF_64_IP; + MachineOperand &FV1 = MI.getOperand(0); + MachineOperand &FV0 = MI.getOperand(1); + MachineOperand &AS = MI.getOperand(2); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM8 = MI.getOperand(3); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(FV1.getReg()) + .addReg(FV0.getReg()) + .addReg(AS.getReg()) + .addImm(IMM8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_STF_64_XP_P: { + unsigned Opc = Xtensa::EE_STF_64_XP; + MachineOperand &FV1 = MI.getOperand(0); + MachineOperand &FV0 = MI.getOperand(1); + MachineOperand &AS = MI.getOperand(2); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(3); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(FV1.getReg()) + .addReg(FV0.getReg()) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_STXQ_32_P: { + unsigned Opc = Xtensa::EE_STXQ_32; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_stxq_32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QS = MI.getOperand(1); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_stxq_32 first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(2); + MachineOperand &SEL4 = MI.getOperand(3); + MachineOperand &SEL8 = MI.getOperand(4); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QVVal) + .addReg(Xtensa::Q0 + QSVal) + .addReg(AS.getReg()) + .addImm(SEL4.getImm()) + .addImm(SEL8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_ST_ACCX_IP_P: { + unsigned Opc = Xtensa::EE_ST_ACCX_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM8 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_ST_QACC_H_H_32_IP_P: { + unsigned Opc = Xtensa::EE_ST_QACC_H_H_32_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM4 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_ST_QACC_H_L_128_IP_P: { + unsigned Opc = Xtensa::EE_ST_QACC_H_L_128_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_ST_QACC_L_H_32_IP_P: { + unsigned Opc = Xtensa::EE_ST_QACC_L_H_32_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM4 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_ST_QACC_L_L_128_IP_P: { + unsigned Opc = Xtensa::EE_ST_QACC_L_L_128_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_ST_UA_STATE_IP_P: { + unsigned Opc = Xtensa::EE_ST_UA_STATE_IP; + MachineOperand &AS = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VADDS_S16_P: { + unsigned Opc = Xtensa::EE_VADDS_S16; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vadds_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vadds_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vadds_s16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VADDS_S16_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VADDS_S16_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vadds_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vadds_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vadds_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vadds_s16_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VADDS_S16_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VADDS_S16_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vadds_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vadds_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vadds_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vadds_s16_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VADDS_S32_P: { + unsigned Opc = Xtensa::EE_VADDS_S32; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vadds_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vadds_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vadds_s32 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VADDS_S32_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VADDS_S32_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vadds_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vadds_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vadds_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vadds_s32_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VADDS_S32_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VADDS_S32_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vadds_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vadds_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vadds_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vadds_s32_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VADDS_S8_P: { + unsigned Opc = Xtensa::EE_VADDS_S8; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vadds_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vadds_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vadds_s8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VADDS_S8_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VADDS_S8_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vadds_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vadds_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vadds_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vadds_s8_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VADDS_S8_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VADDS_S8_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vadds_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vadds_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vadds_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vadds_s8_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VCMP_EQ_S16_P: { + unsigned Opc = Xtensa::EE_VCMP_EQ_S16; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vcmp_eq_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vcmp_eq_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vcmp_eq_s16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VCMP_EQ_S32_P: { + unsigned Opc = Xtensa::EE_VCMP_EQ_S32; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vcmp_eq_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vcmp_eq_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vcmp_eq_s32 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VCMP_EQ_S8_P: { + unsigned Opc = Xtensa::EE_VCMP_EQ_S8; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vcmp_eq_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vcmp_eq_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vcmp_eq_s8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VCMP_GT_S16_P: { + unsigned Opc = Xtensa::EE_VCMP_GT_S16; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vcmp_gt_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vcmp_gt_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vcmp_gt_s16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VCMP_GT_S32_P: { + unsigned Opc = Xtensa::EE_VCMP_GT_S32; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vcmp_gt_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vcmp_gt_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vcmp_gt_s32 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VCMP_GT_S8_P: { + unsigned Opc = Xtensa::EE_VCMP_GT_S8; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vcmp_gt_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vcmp_gt_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vcmp_gt_s8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VCMP_LT_S16_P: { + unsigned Opc = Xtensa::EE_VCMP_LT_S16; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vcmp_lt_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vcmp_lt_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vcmp_lt_s16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VCMP_LT_S32_P: { + unsigned Opc = Xtensa::EE_VCMP_LT_S32; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vcmp_lt_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vcmp_lt_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vcmp_lt_s32 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VCMP_LT_S8_P: { + unsigned Opc = Xtensa::EE_VCMP_LT_S8; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vcmp_lt_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vcmp_lt_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vcmp_lt_s8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLDBC_16_P: { + unsigned Opc = Xtensa::EE_VLDBC_16; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vldbc_16 first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(AS.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLDBC_16_IP_P: { + unsigned Opc = Xtensa::EE_VLDBC_16_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vldbc_16_ip first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM2 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLDBC_16_XP_P: { + unsigned Opc = Xtensa::EE_VLDBC_16_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vldbc_16_xp first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLDBC_32_P: { + unsigned Opc = Xtensa::EE_VLDBC_32; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vldbc_32 first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(AS.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLDBC_32_IP_P: { + unsigned Opc = Xtensa::EE_VLDBC_32_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vldbc_32_ip first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM4 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLDBC_32_XP_P: { + unsigned Opc = Xtensa::EE_VLDBC_32_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vldbc_32_xp first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLDBC_8_P: { + unsigned Opc = Xtensa::EE_VLDBC_8; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vldbc_8 first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(AS.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLDBC_8_IP_P: { + unsigned Opc = Xtensa::EE_VLDBC_8_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vldbc_8_ip first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM1 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM1.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLDBC_8_XP_P: { + unsigned Opc = Xtensa::EE_VLDBC_8_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vldbc_8_xp first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLDHBC_16_INCP_P: { + unsigned Opc = Xtensa::EE_VLDHBC_16_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vldhbc_16_incp first argument, " + "it must be in range [0,7]"); + MachineOperand &QU1 = MI.getOperand(1); + unsigned QU1Val = QU1.getImm(); + assert(QU1Val < 8 && "Unexpected value of ee_vldhbc_16_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(2); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(Xtensa::Q0 + QU1Val) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLD_128_IP_P: { + unsigned Opc = Xtensa::EE_VLD_128_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vld_128_ip first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLD_128_XP_P: { + unsigned Opc = Xtensa::EE_VLD_128_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vld_128_xp first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLD_H_64_IP_P: { + unsigned Opc = Xtensa::EE_VLD_H_64_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vld_h_64_ip first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM8 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLD_H_64_XP_P: { + unsigned Opc = Xtensa::EE_VLD_H_64_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vld_h_64_xp first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLD_L_64_IP_P: { + unsigned Opc = Xtensa::EE_VLD_L_64_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vld_l_64_ip first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM8 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VLD_L_64_XP_P: { + unsigned Opc = Xtensa::EE_VLD_L_64_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vld_l_64_xp first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMAX_S16_P: { + unsigned Opc = Xtensa::EE_VMAX_S16; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmax_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmax_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmax_s16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMAX_S16_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VMAX_S16_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmax_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmax_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmax_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmax_s16_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMAX_S16_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VMAX_S16_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vmax_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmax_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmax_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmax_s16_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMAX_S32_P: { + unsigned Opc = Xtensa::EE_VMAX_S32; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmax_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmax_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmax_s32 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMAX_S32_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VMAX_S32_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmax_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmax_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmax_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmax_s32_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMAX_S32_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VMAX_S32_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vmax_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmax_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmax_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmax_s32_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMAX_S8_P: { + unsigned Opc = Xtensa::EE_VMAX_S8; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmax_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmax_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmax_s8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMAX_S8_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VMAX_S8_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmax_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmax_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmax_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmax_s8_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMAX_S8_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VMAX_S8_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vmax_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmax_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmax_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmax_s8_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMIN_S16_P: { + unsigned Opc = Xtensa::EE_VMIN_S16; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmin_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmin_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmin_s16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMIN_S16_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VMIN_S16_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmin_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmin_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmin_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmin_s16_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMIN_S16_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VMIN_S16_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vmin_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmin_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmin_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmin_s16_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMIN_S32_P: { + unsigned Opc = Xtensa::EE_VMIN_S32; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmin_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmin_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmin_s32 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMIN_S32_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VMIN_S32_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmin_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmin_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmin_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmin_s32_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMIN_S32_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VMIN_S32_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vmin_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmin_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmin_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmin_s32_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMIN_S8_P: { + unsigned Opc = Xtensa::EE_VMIN_S8; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmin_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmin_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmin_s8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMIN_S8_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VMIN_S8_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmin_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmin_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmin_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmin_s8_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMIN_S8_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VMIN_S8_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vmin_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vmin_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmin_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmin_s8_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_ACCX_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_ACCX; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_accx first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_accx first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_ACCX_LD_IP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_ACCX_LD_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_ip first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_ACCX_LD_IP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_ACCX_LD_IP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_ACCX_LD_XP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_ACCX_LD_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_xp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_ACCX_LD_XP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_ACCX_LD_XP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_QACC_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_QACC; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_qacc first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_qacc first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_QACC_LDBC_INCP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_QACC_LDBC_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ldbc_incp " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ldbc_incp " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ldbc_incp " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_QACC_LDBC_INCP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_QACC_LDBC_INCP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(4); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_s16_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(5); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s16_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_QACC_LD_IP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_QACC_LD_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_QACC_LD_IP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_QACC_LD_IP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_QACC_LD_XP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_QACC_LD_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S16_QACC_LD_XP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S16_QACC_LD_XP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_ACCX_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_ACCX; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_accx first argument, " + "it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_accx first argument, " + "it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_ACCX_LD_IP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_ACCX_LD_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_ip first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_ACCX_LD_IP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_ACCX_LD_IP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_ACCX_LD_XP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_ACCX_LD_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_xp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_ACCX_LD_XP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_ACCX_LD_XP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_QACC_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_QACC; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_qacc first argument, " + "it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_qacc first argument, " + "it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_QACC_LDBC_INCP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_QACC_LDBC_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ldbc_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ldbc_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ldbc_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_QACC_LDBC_INCP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_QACC_LDBC_INCP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(4); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_s8_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(5); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s8_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_QACC_LD_IP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_QACC_LD_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_QACC_LD_IP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_QACC_LD_IP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_QACC_LD_XP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_QACC_LD_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_S8_QACC_LD_XP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_S8_QACC_LD_XP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_ACCX_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_ACCX; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_accx first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_accx first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_ACCX_LD_IP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_ACCX_LD_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_ip first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_ACCX_LD_IP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_ACCX_LD_IP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_ACCX_LD_XP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_ACCX_LD_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_xp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_ACCX_LD_XP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_ACCX_LD_XP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_QACC_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_QACC; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_qacc first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_qacc first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_QACC_LDBC_INCP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_QACC_LDBC_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ldbc_incp " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ldbc_incp " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ldbc_incp " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_QACC_LDBC_INCP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_QACC_LDBC_INCP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(4); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_u16_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(5); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u16_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_QACC_LD_IP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_QACC_LD_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_QACC_LD_IP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_QACC_LD_IP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_QACC_LD_XP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_QACC_LD_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U16_QACC_LD_XP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U16_QACC_LD_XP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_ACCX_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_ACCX; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_accx first argument, " + "it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_accx first argument, " + "it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_ACCX_LD_IP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_ACCX_LD_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_ip first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_ACCX_LD_IP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_ACCX_LD_IP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_ip_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_ACCX_LD_XP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_ACCX_LD_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_xp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_ACCX_LD_XP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_ACCX_LD_XP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_xp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_QACC_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_QACC; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_qacc first argument, " + "it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_qacc first argument, " + "it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_QACC_LDBC_INCP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_QACC_LDBC_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ldbc_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ldbc_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ldbc_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_QACC_LDBC_INCP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_QACC_LDBC_INCP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(4); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_u8_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(5); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u8_qacc_ldbc_incp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_QACC_LD_IP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_QACC_LD_IP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_ip first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_QACC_LD_IP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_QACC_LD_IP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_ip_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_ip_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_QACC_LD_XP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_QACC_LD_XP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_xp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMULAS_U8_QACC_LD_XP_QUP_P: { + unsigned Opc = Xtensa::EE_VMULAS_U8_QACC_LD_XP_QUP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_xp_qup first " + "argument, it must be in range [0,7]"); + MachineOperand &QS0 = MI.getOperand(5); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(6); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_xp_qup " + "first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(AS.getReg()) + .addReg(AD.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_S16_P: { + unsigned Opc = Xtensa::EE_VMUL_S16; + MachineOperand &QZ = MI.getOperand(0); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_s16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_S16_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VMUL_S16_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmul_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_s16_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_S16_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VMUL_S16_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vmul_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_s16_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_S8_P: { + unsigned Opc = Xtensa::EE_VMUL_S8; + MachineOperand &QZ = MI.getOperand(0); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_s8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_S8_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VMUL_S8_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmul_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_s8_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_S8_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VMUL_S8_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vmul_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_s8_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_U16_P: { + unsigned Opc = Xtensa::EE_VMUL_U16; + MachineOperand &QZ = MI.getOperand(0); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_u16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_u16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_u16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_U16_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VMUL_U16_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmul_u16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_u16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_u16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_u16_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_U16_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VMUL_U16_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vmul_u16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_u16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_u16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_u16_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_U8_P: { + unsigned Opc = Xtensa::EE_VMUL_U8; + MachineOperand &QZ = MI.getOperand(0); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_u8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_u8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_u8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_U8_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VMUL_U8_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vmul_u8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_u8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_u8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_u8_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VMUL_U8_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VMUL_U8_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vmul_u8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vmul_u8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vmul_u8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vmul_u8_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VPRELU_S16_P: { + unsigned Opc = Xtensa::EE_VPRELU_S16; + MachineOperand &QZ = MI.getOperand(0); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vprelu_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vprelu_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vprelu_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &AY = MI.getOperand(3); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(AY.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VPRELU_S8_P: { + unsigned Opc = Xtensa::EE_VPRELU_S8; + MachineOperand &QZ = MI.getOperand(0); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of ee_vprelu_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vprelu_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vprelu_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &AY = MI.getOperand(3); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addReg(AY.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VRELU_S16_P: { + unsigned Opc = Xtensa::EE_VRELU_S16; + MachineOperand &QS = MI.getOperand(0); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_vrelu_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &AX = MI.getOperand(1); + MachineOperand &AY = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QSVal) + .addReg(Xtensa::Q0 + QSVal) + .addReg(AX.getReg()) + .addReg(AY.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VRELU_S8_P: { + unsigned Opc = Xtensa::EE_VRELU_S8; + MachineOperand &QS = MI.getOperand(0); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_vrelu_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &AX = MI.getOperand(1); + MachineOperand &AY = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QSVal) + .addReg(Xtensa::Q0 + QSVal) + .addReg(AX.getReg()) + .addReg(AY.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSL_32_P: { + unsigned Opc = Xtensa::EE_VSL_32; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsl_32 first argument, it must " + "be in range [0,7]"); + MachineOperand &QS = MI.getOperand(1); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_vsl_32 first argument, it must " + "be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QSVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSMULAS_S16_QACC_P: { + unsigned Opc = Xtensa::EE_VSMULAS_S16_QACC; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsmulas_s16_qacc first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsmulas_s16_qacc first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL8 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addImm(SEL8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSMULAS_S16_QACC_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VSMULAS_S16_QACC_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vsmulas_s16_qacc_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsmulas_s16_qacc_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsmulas_s16_qacc_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL8 = MI.getOperand(4); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addImm(SEL8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSMULAS_S8_QACC_P: { + unsigned Opc = Xtensa::EE_VSMULAS_S8_QACC; + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsmulas_s8_qacc first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsmulas_s8_qacc first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addImm(SEL16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSMULAS_S8_QACC_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VSMULAS_S8_QACC_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vsmulas_s8_qacc_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsmulas_s8_qacc_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(3); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsmulas_s8_qacc_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &SEL16 = MI.getOperand(4); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal) + .addImm(SEL16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSR_32_P: { + unsigned Opc = Xtensa::EE_VSR_32; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsr_32 first argument, it must " + "be in range [0,7]"); + MachineOperand &QS = MI.getOperand(1); + unsigned QSVal = QS.getImm(); + assert(QSVal < 8 && "Unexpected value of ee_vsr_32 first argument, it must " + "be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QSVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VST_128_IP_P: { + unsigned Opc = Xtensa::EE_VST_128_IP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vst_128_ip first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addImm(IMM16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VST_128_XP_P: { + unsigned Opc = Xtensa::EE_VST_128_XP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vst_128_xp first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VST_H_64_IP_P: { + unsigned Opc = Xtensa::EE_VST_H_64_IP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vst_h_64_ip first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM8 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addImm(IMM8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VST_H_64_XP_P: { + unsigned Opc = Xtensa::EE_VST_H_64_XP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vst_h_64_xp first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VST_L_64_IP_P: { + unsigned Opc = Xtensa::EE_VST_L_64_IP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vst_l_64_ip first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &IMM8 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addImm(IMM8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VST_L_64_XP_P: { + unsigned Opc = Xtensa::EE_VST_L_64_XP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vst_l_64_xp first argument, it " + "must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &AD = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(AD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSUBS_S16_P: { + unsigned Opc = Xtensa::EE_VSUBS_S16; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsubs_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsubs_s16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsubs_s16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSUBS_S16_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VSUBS_S16_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vsubs_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsubs_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsubs_s16_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsubs_s16_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSUBS_S16_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VSUBS_S16_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vsubs_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsubs_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsubs_s16_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsubs_s16_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSUBS_S32_P: { + unsigned Opc = Xtensa::EE_VSUBS_S32; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsubs_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsubs_s32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsubs_s32 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSUBS_S32_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VSUBS_S32_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vsubs_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsubs_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsubs_s32_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsubs_s32_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSUBS_S32_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VSUBS_S32_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vsubs_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsubs_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsubs_s32_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsubs_s32_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSUBS_S8_P: { + unsigned Opc = Xtensa::EE_VSUBS_S8; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsubs_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsubs_s8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsubs_s8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSUBS_S8_LD_INCP_P: { + unsigned Opc = Xtensa::EE_VSUBS_S8_LD_INCP; + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of ee_vsubs_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsubs_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsubs_s8_ld_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsubs_s8_ld_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QUVal) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VSUBS_S8_ST_INCP_P: { + unsigned Opc = Xtensa::EE_VSUBS_S8_ST_INCP; + MachineOperand &QV = MI.getOperand(0); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of ee_vsubs_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &AS = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &QA = MI.getOperand(2); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_vsubs_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QX = MI.getOperand(3); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_vsubs_s8_st_incp first " + "argument, it must be in range [0,7]"); + MachineOperand &QY = MI.getOperand(4); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_vsubs_s8_st_incp first " + "argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QVVal) + .addReg(AS.getReg()) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VUNZIP_16_P: { + unsigned Opc = Xtensa::EE_VUNZIP_16; + MachineOperand &QS0 = MI.getOperand(0); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vunzip_16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(1); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vunzip_16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VUNZIP_32_P: { + unsigned Opc = Xtensa::EE_VUNZIP_32; + MachineOperand &QS0 = MI.getOperand(0); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vunzip_32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(1); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vunzip_32 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VUNZIP_8_P: { + unsigned Opc = Xtensa::EE_VUNZIP_8; + MachineOperand &QS0 = MI.getOperand(0); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vunzip_8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(1); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vunzip_8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VZIP_16_P: { + unsigned Opc = Xtensa::EE_VZIP_16; + MachineOperand &QS0 = MI.getOperand(0); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vzip_16 first argument, it " + "must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(1); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vzip_16 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VZIP_32_P: { + unsigned Opc = Xtensa::EE_VZIP_32; + MachineOperand &QS0 = MI.getOperand(0); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vzip_32 first argument, it " + "must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(1); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vzip_32 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_VZIP_8_P: { + unsigned Opc = Xtensa::EE_VZIP_8; + MachineOperand &QS0 = MI.getOperand(0); + unsigned QS0Val = QS0.getImm(); + assert(QS0Val < 8 && "Unexpected value of ee_vzip_8 first argument, it " + "must be in range [0,7]"); + MachineOperand &QS1 = MI.getOperand(1); + unsigned QS1Val = QS1.getImm(); + assert(QS1Val < 8 && "Unexpected value of ee_vzip_8 first argument, it " + "must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_XORQ_P: { + unsigned Opc = Xtensa::EE_XORQ; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_xorq first argument, it must " + "be in range [0,7]"); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of ee_xorq first argument, it must " + "be in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of ee_xorq first argument, it must " + "be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QXVal) + .addReg(Xtensa::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_ZERO_ACCX_P: { + unsigned Opc = Xtensa::EE_ZERO_ACCX; + BuildMI(*MBB, MI, DL, TII.get(Opc)); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_ZERO_Q_P: { + unsigned Opc = Xtensa::EE_ZERO_Q; + MachineOperand &QA = MI.getOperand(0); + unsigned QAVal = QA.getImm(); + assert(QAVal < 8 && "Unexpected value of ee_zero_q first argument, it must " + "be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(Xtensa::Q0 + QAVal); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::EE_ZERO_QACC_P: { + unsigned Opc = Xtensa::EE_ZERO_QACC; + BuildMI(*MBB, MI, DL, TII.get(Opc)); + + MI.eraseFromParent(); + return MBB; + } + case Xtensa::mv_QR_P: { + unsigned Opc = Xtensa::mv_QR; + MachineOperand &A = MI.getOperand(0); + unsigned AVal = A.getImm(); + assert( + AVal < 8 && + "Unexpected value of mv_qr first argument, it must be in range [0,7]"); + MachineOperand &B = MI.getOperand(1); + unsigned BVal = B.getImm(); + assert( + BVal < 8 && + "Unexpected value of mv_qr first argument, it must be in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + AVal) + .addReg(Xtensa::Q0 + BVal); + + MI.eraseFromParent(); + return MBB; + } + } +} diff --git a/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll b/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll new file mode 100644 index 0000000000000..9d339f00e539b --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll @@ -0,0 +1,148 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32s3 < %s | FileCheck %s + +; Function Attrs: nounwind +define dso_local void @test(i32 noundef %ptr.coerce, i32 noundef %ptr2.coerce) local_unnamed_addr #0 { +entry: + %coerce.val.ip = inttoptr i32 %ptr.coerce to ptr + %0 = load i32, ptr %coerce.val.ip, align 4, !tbaa !2 + tail call void @llvm.xtensa.ee.vld.128.ip(i32 1, i32 16, i32 0) + tail call void @llvm.xtensa.ee.vld.128.ip(i32 1, i32 16, i32 0) + tail call void @llvm.xtensa.ee.vld.128.ip(i32 1, i32 16, i32 0) + tail call void @llvm.xtensa.ee.vld.128.ip(i32 1, i32 %0, i32 0) + tail call void @llvm.xtensa.ee.vld.128.ip(i32 2, i32 %0, i32 0) + tail call void @llvm.xtensa.ee.vld.128.ip(i32 3, i32 %0, i32 0) + tail call void @llvm.xtensa.ee.vld.128.ip(i32 3, i32 %0, i32 0) + tail call void @llvm.xtensa.ee.vld.128.ip(i32 3, i32 %0, i32 0) + tail call void @llvm.xtensa.ee.vld.128.ip(i32 3, i32 %0, i32 0) + tail call void @llvm.xtensa.ee.vld.128.ip(i32 3, i32 %0, i32 0) + tail call void @llvm.xtensa.ee.vmax.s8(i32 1, i32 2, i32 3) + tail call void @llvm.xtensa.ee.vmax.s16(i32 4, i32 5, i32 7) + tail call void @llvm.xtensa.ee.stf.128.ip(float 0x40099999A0000000, float 0x4002666660000000, float 0x4011CCCCC0000000, float 0x401AA8F5C0000000, i32 %0, i32 16) + tail call void @llvm.xtensa.ee.stf.128.ip(float undef, float undef, float undef, float undef, i32 %0, i32 16) + tail call void @llvm.xtensa.ee.stf.128.xp(float undef, float undef, float undef, float undef, i32 16, i32 %0) + br label %for.body + +for.cond.cleanup: ; preds = %for.body + tail call void @llvm.xtensa.wur.sar.byte(i32 0) + tail call void @llvm.xtensa.wur.accx.0(i32 0) + tail call void @llvm.xtensa.wur.accx.1(i32 0) + ret void + +for.body: ; preds = %entry, %for.body + %i.029 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + tail call void @llvm.xtensa.ee.ld.128.usar.ip(i32 4, i32 %0, i32 16) + tail call void @llvm.xtensa.ee.src.q.ld.ip(i32 3, i32 %0, i32 16, i32 4, i32 2) + tail call void @llvm.xtensa.ee.vmulas.s16.accx.ld.ip(i32 0, i32 10, i32 16, i32 4, i32 6) + tail call void @llvm.xtensa.ee.vmulas.s16.accx.ld.xp.qup(i32 4, i32 %0, i32 16, i32 0, i32 4, i32 2, i32 3) + tail call void @llvm.xtensa.ee.ld.128.usar.xp(i32 4, i32 %0, i32 16) + tail call void @llvm.xtensa.ee.vmulas.s16.accx.ld.ip.qup(i32 3, i32 %0, i32 16, i32 0, i32 3, i32 4, i32 2) + %inc = add nuw nsw i32 %i.029, 1 + %exitcond.not = icmp eq i32 %inc, 32 + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !6 +} + +; CHECK-LABEL: test: # @test +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: entry a1, 32 +; CHECK-NEXT: l32i.n a8, a2, 0 +; CHECK-NEXT: movi.n a9, 16 +; CHECK-NEXT: ee.vld.128.ip q1, a9, 0 +; CHECK-NEXT: ee.vld.128.ip q1, a9, 0 +; CHECK-NEXT: ee.vld.128.ip q1, a9, 0 +; CHECK-NEXT: ee.vld.128.ip q1, a8, 0 +; CHECK-NEXT: ee.vld.128.ip q2, a8, 0 +; CHECK-NEXT: ee.vld.128.ip q3, a8, 0 +; CHECK-NEXT: ee.vld.128.ip q3, a8, 0 +; CHECK-NEXT: ee.vld.128.ip q3, a8, 0 +; CHECK-NEXT: ee.vld.128.ip q3, a8, 0 +; CHECK-NEXT: ee.vld.128.ip q3, a8, 0 +; CHECK-NEXT: ee.vmax.s8 q1, q2, q3 +; CHECK-NEXT: ee.vmax.s16 q4, q5, q7 +; CHECK-NEXT: l32r a10, .LCPI0_0 +; CHECK-NEXT: wfr f8, a10 +; CHECK-NEXT: l32r a10, .LCPI0_1 +; CHECK-NEXT: wfr f9, a10 +; CHECK-NEXT: l32r a10, .LCPI0_2 +; CHECK-NEXT: wfr f10, a10 +; CHECK-NEXT: l32r a10, .LCPI0_3 +; CHECK-NEXT: wfr f11, a10 +; CHECK-NEXT: ee.stf.128.ip f11, f10, f9, f8, a8, 16 +; CHECK-NEXT: ee.stf.128.ip f8, f8, f8, f8, a8, 16 +; CHECK-NEXT: ee.stf.128.xp f8, f8, f8, f8, a9, a8 +; CHECK-NEXT: movi.n a10, 0 +; CHECK-NEXT: movi.n a11, 10 +; CHECK-NEXT: .LBB0_1: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: ee.ld.128.usar.ip q4, a8, 16 +; CHECK-NEXT: ee.src.q.ld.ip q3, a8, 16, q4, q2 +; CHECK-NEXT: ee.vmulas.s16.accx.ld.ip q0, a11, 16, q4, q6 +; CHECK-NEXT: ee.vmulas.s16.accx.ld.xp.qup q4, a8, a9, q0, q4, q2, q3 +; CHECK-NEXT: ee.ld.128.usar.xp q4, a8, a9 +; CHECK-NEXT: ee.vmulas.s16.accx.ld.ip.qup q3, a8, 16, q0, q3, q4, q2 +; CHECK-NEXT: addi.n a10, a10, 1 +; CHECK-NEXT: bnei a10, 32, .LBB0_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: movi.n a8, 0 +; CHECK-NEXT: wur.sar_byte a8 +; CHECK-NEXT: wur.accx_0 a8 +; CHECK-NEXT: wur.accx_1 a8 +; CHECK-NEXT: retw.n + + + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.vld.128.ip(i32 immarg, i32, i32 immarg) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.vmax.s8(i32 immarg, i32 immarg, i32 immarg) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.vmax.s16(i32 immarg, i32 immarg, i32 immarg) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.stf.128.ip(float, float, float, float, i32, i32 immarg) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.stf.128.xp(float, float, float, float, i32, i32) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.ld.128.usar.ip(i32 immarg, i32, i32 immarg) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.src.q.ld.ip(i32 immarg, i32, i32 immarg, i32 immarg, i32 immarg) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.vmulas.s16.accx.ld.ip(i32 immarg, i32, i32 immarg, i32 immarg, i32 immarg) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.vmulas.s16.accx.ld.xp.qup(i32 immarg, i32, i32, i32 immarg, i32 immarg, i32 immarg, i32 immarg) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.ld.128.usar.xp(i32 immarg, i32, i32) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.ee.vmulas.s16.accx.ld.ip.qup(i32 immarg, i32, i32 immarg, i32 immarg, i32 immarg, i32 immarg, i32 immarg) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.wur.sar.byte(i32) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.wur.accx.0(i32) #1 + +; Function Attrs: nounwind +declare void @llvm.xtensa.wur.accx.1(i32) #1 + +attributes #0 = { nounwind "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+atomctl,+bool,+coprocessor,+debug,+density,+dfpaccel,+div32,+exception,+fp,+highpriinterrupts,+interrupt,+loop,+mac16,+memctl,+miscsr,+mul32,+mul32high,+nsa,+prid,+regprotect,+rvector,+s32c1i,+sext,+threadptr,+timerint,+windowed" } +attributes #1 = { nounwind } + +!llvm.module.flags = !{!0} +!llvm.ident = !{!1} + +!0 = !{i32 1, !"wchar_size", i32 1} +!1 = !{!"clang version 15.0.0 (https://github.com/espressif/llvm-project.git a69f9a9e5e52d92f584c8268b1d041b2dfaee1e8)"} +!2 = !{!3, !3, i64 0} +!3 = !{!"int", !4, i64 0} +!4 = !{!"omnipotent char", !5, i64 0} +!5 = !{!"Simple C/C++ TBAA"} +!6 = distinct !{!6, !7} +!7 = !{!"llvm.loop.mustprogress"} diff --git a/llvm/test/CodeGen/Xtensa/xtensa-s3-dsp.ll b/llvm/test/CodeGen/Xtensa/xtensa-s3-dsp.ll new file mode 100644 index 0000000000000..b1782c92dff0b --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-s3-dsp.ll @@ -0,0 +1,764 @@ +; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32s3 %s -o - | FileCheck %s + +; CHECK: @test +define void @test(){ + tail call void @llvm.xtensa.ee.andq(i32 7, i32 0, i32 1) + ; CHECK: ee.andq + tail call void @llvm.xtensa.ee.bitrev(i32 0, i32 3) + ; CHECK: ee.bitrev + tail call void @llvm.xtensa.ee.cmul.s16(i32 2, i32 3, i32 2, i32 1) + ; CHECK: ee.cmul.s16 + tail call void @llvm.xtensa.ee.cmul.s16.ld.incp(i32 2, i32 1, i32 7, i32 5, i32 0, i32 3) + ; CHECK: ee.cmul.s16.ld.incp + tail call void @llvm.xtensa.ee.cmul.s16.st.incp(i32 1, i32 6, i32 6, i32 1, i32 1, i32 1) + ; CHECK: ee.cmul.s16.st.incp + tail call void @llvm.xtensa.ee.fft.ams.s16.ld.incp(i32 4, i32 10, i32 4, i32 2, i32 2, i32 2, i32 0, i32 1) + ; CHECK: ee.fft.ams.s16.ld.incp + tail call void @llvm.xtensa.ee.fft.ams.s16.ld.incp.uaup(i32 0, i32 6, i32 1, i32 6, i32 2, i32 2, i32 6, i32 1) + ; CHECK: ee.fft.ams.s16.ld.incp.uaup + tail call void @llvm.xtensa.ee.fft.ams.s16.ld.r32.decp(i32 7, i32 2, i32 5, i32 6, i32 1, i32 4, i32 0, i32 1) + ; CHECK: ee.fft.ams.s16.ld.r32.decp + tail call void @llvm.xtensa.ee.fft.ams.s16.st.incp(i32 1, i32 1, i32 14, i32 2, i32 7, i32 0, i32 1, i32 0) + ; CHECK: ee.fft.ams.s16.st.incp + tail call void @llvm.xtensa.ee.fft.cmul.s16.ld.xp(i32 7, i32 4, i32 9, i32 0, i32 2, i32 7, i32 1) + ; CHECK: ee.fft.cmul.s16.ld.xp + tail call void @llvm.xtensa.ee.fft.cmul.s16.st.xp(i32 3, i32 0, i32 5, i32 11, i32 8, i32 7, i32 1, i32 0) + ; CHECK: ee.fft.cmul.s16.st.xp + tail call void @llvm.xtensa.ee.fft.r2bf.s16(i32 7, i32 7, i32 2, i32 0, i32 1) + ; CHECK: ee.fft.r2bf.s16 + tail call void @llvm.xtensa.ee.fft.r2bf.s16.st.incp(i32 0, i32 5, i32 1, i32 10, i32 0) + ; CHECK: ee.fft.r2bf.s16.st.incp + tail call void @llvm.xtensa.ee.fft.vst.r32.decp(i32 0, i32 10, i32 1) + ; CHECK: ee.fft.vst.r32.decp + tail call void @llvm.xtensa.ee.ldf.128.ip(float 0x40099999A0000000, float 0x40099999A0000000, float 0x40099999A0000000, float 0x40099999A0000000, i32 0, i32 64) + ; CHECK: ee.ldf.128.ip + tail call void @llvm.xtensa.ee.ldf.128.xp(float 0x40099999A0000000, float 0x40099999A0000000, float 0x40099999A0000000, float 0x40099999A0000000, i32 2, i32 13) + ; CHECK: ee.ldf.128.xp + tail call void @llvm.xtensa.ee.ldf.64.ip(float 0x40099999A0000000, float 0x40099999A0000000, i32 10, i32 -232) + ; CHECK: ee.ldf.64.ip + tail call void @llvm.xtensa.ee.ldf.64.xp(float 0x40099999A0000000, float 0x40099999A0000000, i32 13, i32 8) + ; CHECK: ee.ldf.64.xp + tail call void @llvm.xtensa.ee.ldqa.s16.128.ip(i32 5, i32 160) + ; CHECK: ee.ldqa.s16.128.ip + tail call void @llvm.xtensa.ee.ldqa.s16.128.xp(i32 13, i32 5) + ; CHECK: ee.ldqa.s16.128.xp + tail call void @llvm.xtensa.ee.ldqa.s8.128.ip(i32 13, i32 640) + ; CHECK: ee.ldqa.s8.128.ip + tail call void @llvm.xtensa.ee.ldqa.s8.128.xp(i32 2, i32 13) + ; CHECK: ee.ldqa.s8.128.xp + tail call void @llvm.xtensa.ee.ldqa.u16.128.ip(i32 11, i32 -1184) + ; CHECK: ee.ldqa.u16.128.ip + tail call void @llvm.xtensa.ee.ldqa.u16.128.xp(i32 1, i32 14) + ; CHECK: ee.ldqa.u16.128.xp + tail call void @llvm.xtensa.ee.ldqa.u8.128.ip(i32 14, i32 736) + ; CHECK: ee.ldqa.u8.128.ip + tail call void @llvm.xtensa.ee.ldqa.u8.128.xp(i32 2, i32 13) + ; CHECK: ee.ldqa.u8.128.xp + tail call void @llvm.xtensa.ee.ldxq.32(i32 2, i32 4, i32 0, i32 1, i32 0) + ; CHECK: ee.ldxq.32 + tail call void @llvm.xtensa.ee.ld.128.usar.ip(i32 6, i32 7, i32 1904) + ; CHECK: ee.ld.128.usar.ip + tail call void @llvm.xtensa.ee.ld.128.usar.xp(i32 0, i32 10, i32 8) + ; CHECK: ee.ld.128.usar.xp + tail call void @llvm.xtensa.ee.ld.accx.ip(i32 6, i32 -560) + ; CHECK: ee.ld.accx.ip + tail call void @llvm.xtensa.ee.ld.qacc.h.h.32.ip(i32 6, i32 68) + ; CHECK: ee.ld.qacc_h.h.32.ip + tail call void @llvm.xtensa.ee.ld.qacc.h.l.128.ip(i32 7, i32 304) + ; CHECK: ee.ld.qacc_h.l.128.ip + tail call void @llvm.xtensa.ee.ld.qacc.l.h.32.ip(i32 4, i32 52) + ; CHECK: ee.ld.qacc_l.h.32.ip + tail call void @llvm.xtensa.ee.ld.qacc.l.l.128.ip(i32 7, i32 1040) + ; CHECK: ee.ld.qacc_l.l.128.ip + tail call void @llvm.xtensa.ee.ld.ua.state.ip(i32 7, i32 -1760) + ; CHECK: ee.ld.ua_state.ip + tail call void @llvm.xtensa.ee.movi.32.a(i32 0, i32 7, i32 0) + ; CHECK: ee.movi.32.a + tail call void @llvm.xtensa.ee.movi.32.q(i32 0, i32 1, i32 2) + ; CHECK: ee.movi.32.q + tail call void @llvm.xtensa.ee.mov.s16.qacc(i32 0) + ; CHECK: ee.mov.s16.qacc + tail call void @llvm.xtensa.ee.mov.s8.qacc(i32 0) + ; CHECK: ee.mov.s8.qacc + tail call void @llvm.xtensa.ee.mov.u16.qacc(i32 2) + ; CHECK: ee.mov.u16.qacc + tail call void @llvm.xtensa.ee.mov.u8.qacc(i32 6) + ; CHECK: ee.mov.u8.qacc + tail call void @llvm.xtensa.ee.notq(i32 6, i32 7) + ; CHECK: ee.notq + tail call void @llvm.xtensa.ee.orq(i32 1, i32 2, i32 5) + ; CHECK: ee.orq + tail call void @llvm.xtensa.ee.slci.2q(i32 5, i32 2, i32 8) + ; CHECK: ee.slci.2q + tail call void @llvm.xtensa.ee.slcxxp.2q(i32 2, i32 4, i32 2, i32 11) + ; CHECK: ee.slcxxp.2q + tail call void @llvm.xtensa.ee.srci.2q(i32 4, i32 0, i32 7) + ; CHECK: ee.srci.2q + tail call void @llvm.xtensa.ee.srcmb.s16.qacc(i32 6, i32 5, i32 0) + ; CHECK: ee.srcmb.s16.qacc + tail call void @llvm.xtensa.ee.srcmb.s8.qacc(i32 1, i32 7, i32 1) + ; CHECK: ee.srcmb.s8.qacc + tail call void @llvm.xtensa.ee.srcq.128.st.incp(i32 5, i32 3, i32 6) + ; CHECK: ee.srcq.128.st.incp + tail call void @llvm.xtensa.ee.srcxxp.2q(i32 1, i32 1, i32 1, i32 11) + ; CHECK: ee.srcxxp.2q + tail call void @llvm.xtensa.ee.src.q(i32 7, i32 3, i32 0) + ; CHECK: ee.src.q + tail call void @llvm.xtensa.ee.src.q.ld.ip(i32 1, i32 10, i32 1856, i32 4, i32 0) + ; CHECK: ee.src.q.ld.ip + tail call void @llvm.xtensa.ee.src.q.ld.xp(i32 7, i32 1, i32 1, i32 0, i32 3) + ; CHECK: ee.src.q.ld.xp + tail call void @llvm.xtensa.ee.src.q.qup(i32 5, i32 2, i32 0) + ; CHECK: ee.src.q.qup + tail call void @llvm.xtensa.ee.srs.accx(i32 3, i32 6, i32 1) + ; CHECK: ee.srs.accx + tail call void @llvm.xtensa.ee.stf.128.ip(float 0x40099999A0000000, float 0x40099999A0000000, float 0x40099999A0000000, float 0x40099999A0000000, i32 10, i32 -96) + ; CHECK: ee.stf.128.ip + tail call void @llvm.xtensa.ee.stf.128.xp(float 0x40099999A0000000, float 0x40099999A0000000, float 0x40099999A0000000, float 0x40099999A0000000, i32 1, i32 6) + ; CHECK: ee.stf.128.xp + tail call void @llvm.xtensa.ee.stf.64.ip(float 0x40099999A0000000, float 0x40099999A0000000, i32 8, i32 184) + ; CHECK: ee.stf.64.ip + tail call void @llvm.xtensa.ee.stf.64.xp(float 0x40099999A0000000, float 0x40099999A0000000, i32 1, i32 7) + ; CHECK: ee.stf.64.xp + tail call void @llvm.xtensa.ee.stxq.32(i32 2, i32 5, i32 2, i32 0, i32 4) + ; CHECK: ee.stxq.32 + tail call void @llvm.xtensa.ee.st.accx.ip(i32 4, i32 -136) + ; CHECK: ee.st.accx.ip + tail call void @llvm.xtensa.ee.st.qacc.h.h.32.ip(i32 14, i32 96) + ; CHECK: ee.st.qacc_h.h.32.ip + tail call void @llvm.xtensa.ee.st.qacc.h.l.128.ip(i32 5, i32 -496) + ; CHECK: ee.st.qacc_h.l.128.ip + tail call void @llvm.xtensa.ee.st.qacc.l.h.32.ip(i32 12, i32 348) + ; CHECK: ee.st.qacc_l.h.32.ip + tail call void @llvm.xtensa.ee.st.qacc.l.l.128.ip(i32 9, i32 592) + ; CHECK: ee.st.qacc_l.l.128.ip + tail call void @llvm.xtensa.ee.st.ua.state.ip(i32 11, i32 -1568) + ; CHECK: ee.st.ua_state.ip + tail call void @llvm.xtensa.ee.vadds.s16(i32 2, i32 3, i32 6) + ; CHECK: ee.vadds.s16 + tail call void @llvm.xtensa.ee.vadds.s16.ld.incp(i32 6, i32 5, i32 4, i32 6, i32 1) + ; CHECK: ee.vadds.s16.ld.incp + tail call void @llvm.xtensa.ee.vadds.s16.st.incp(i32 6, i32 9, i32 7, i32 6, i32 2) + ; CHECK: ee.vadds.s16.st.incp + tail call void @llvm.xtensa.ee.vadds.s32(i32 7, i32 3, i32 1) + ; CHECK: ee.vadds.s32 + tail call void @llvm.xtensa.ee.vadds.s32.ld.incp(i32 1, i32 8, i32 7, i32 1, i32 3) + ; CHECK: ee.vadds.s32.ld.incp + tail call void @llvm.xtensa.ee.vadds.s32.st.incp(i32 5, i32 4, i32 3, i32 1, i32 7) + ; CHECK: ee.vadds.s32.st.incp + tail call void @llvm.xtensa.ee.vadds.s8(i32 7, i32 2, i32 2) + ; CHECK: ee.vadds.s8 + tail call void @llvm.xtensa.ee.vadds.s8.ld.incp(i32 6, i32 2, i32 0, i32 3, i32 0) + ; CHECK: ee.vadds.s8.ld.incp + tail call void @llvm.xtensa.ee.vadds.s8.st.incp(i32 1, i32 7, i32 2, i32 3, i32 4) + ; CHECK: ee.vadds.s8.st.incp + tail call void @llvm.xtensa.ee.vcmp.eq.s16(i32 5, i32 3, i32 2) + ; CHECK: ee.vcmp.eq.s16 + tail call void @llvm.xtensa.ee.vcmp.eq.s32(i32 3, i32 3, i32 5) + ; CHECK: ee.vcmp.eq.s32 + tail call void @llvm.xtensa.ee.vcmp.eq.s8(i32 2, i32 6, i32 0) + ; CHECK: ee.vcmp.eq.s8 + tail call void @llvm.xtensa.ee.vcmp.gt.s16(i32 6, i32 2, i32 0) + ; CHECK: ee.vcmp.gt.s16 + tail call void @llvm.xtensa.ee.vcmp.gt.s32(i32 6, i32 7, i32 2) + ; CHECK: ee.vcmp.gt.s32 + tail call void @llvm.xtensa.ee.vcmp.gt.s8(i32 0, i32 3, i32 5) + ; CHECK: ee.vcmp.gt.s8 + tail call void @llvm.xtensa.ee.vcmp.lt.s16(i32 6, i32 6, i32 6) + ; CHECK: ee.vcmp.lt.s16 + tail call void @llvm.xtensa.ee.vcmp.lt.s32(i32 2, i32 2, i32 6) + ; CHECK: ee.vcmp.lt.s32 + tail call void @llvm.xtensa.ee.vcmp.lt.s8(i32 0, i32 3, i32 4) + ; CHECK: ee.vcmp.lt.s8 + tail call void @llvm.xtensa.ee.vldbc.16(i32 0, i32 7) + ; CHECK: ee.vldbc.16 + tail call void @llvm.xtensa.ee.vldbc.16.ip(i32 6, i32 10, i32 220) + ; CHECK: ee.vldbc.16.ip + tail call void @llvm.xtensa.ee.vldbc.16.xp(i32 5, i32 10, i32 9) + ; CHECK: ee.vldbc.16.xp + tail call void @llvm.xtensa.ee.vldbc.32(i32 1, i32 6) + ; CHECK: ee.vldbc.32 + tail call void @llvm.xtensa.ee.vldbc.32.ip(i32 4, i32 9, i32 -352) + ; CHECK: ee.vldbc.32.ip + tail call void @llvm.xtensa.ee.vldbc.32.xp(i32 7, i32 10, i32 7) + ; CHECK: ee.vldbc.32.xp + tail call void @llvm.xtensa.ee.vldbc.8(i32 6, i32 7) + ; CHECK: ee.vldbc.8 + tail call void @llvm.xtensa.ee.vldbc.8.ip(i32 1, i32 8, i32 124) + ; CHECK: ee.vldbc.8.ip + tail call void @llvm.xtensa.ee.vldbc.8.xp(i32 1, i32 9, i32 14) + ; CHECK: ee.vldbc.8.xp + tail call void @llvm.xtensa.ee.vldhbc.16.incp(i32 5, i32 0, i32 0) + ; CHECK: ee.vldhbc.16.incp + tail call void @llvm.xtensa.ee.vld.128.ip(i32 5, i32 4, i32 1344) + ; CHECK: ee.vld.128.ip + tail call void @llvm.xtensa.ee.vld.128.xp(i32 6, i32 9, i32 8) + ; CHECK: ee.vld.128.xp + tail call void @llvm.xtensa.ee.vld.h.64.ip(i32 6, i32 3, i32 408) + ; CHECK: ee.vld.h.64.ip + tail call void @llvm.xtensa.ee.vld.h.64.xp(i32 7, i32 6, i32 7) + ; CHECK: ee.vld.h.64.xp + tail call void @llvm.xtensa.ee.vld.l.64.ip(i32 6, i32 4, i32 -752) + ; CHECK: ee.vld.l.64.ip + tail call void @llvm.xtensa.ee.vld.l.64.xp(i32 6, i32 9, i32 8) + ; CHECK: ee.vld.l.64.xp + tail call void @llvm.xtensa.ee.vmax.s16(i32 5, i32 6, i32 7) + ; CHECK: ee.vmax.s16 + tail call void @llvm.xtensa.ee.vmax.s16.ld.incp(i32 7, i32 5, i32 7, i32 5, i32 3) + ; CHECK: ee.vmax.s16.ld.incp + tail call void @llvm.xtensa.ee.vmax.s16.st.incp(i32 2, i32 0, i32 7, i32 5, i32 2) + ; CHECK: ee.vmax.s16.st.incp + tail call void @llvm.xtensa.ee.vmax.s32(i32 5, i32 5, i32 3) + ; CHECK: ee.vmax.s32 + tail call void @llvm.xtensa.ee.vmax.s32.ld.incp(i32 3, i32 8, i32 5, i32 2, i32 5) + ; CHECK: ee.vmax.s32.ld.incp + tail call void @llvm.xtensa.ee.vmax.s32.st.incp(i32 0, i32 1, i32 2, i32 7, i32 3) + ; CHECK: ee.vmax.s32.st.incp + tail call void @llvm.xtensa.ee.vmax.s8(i32 5, i32 2, i32 5) + ; CHECK: ee.vmax.s8 + tail call void @llvm.xtensa.ee.vmax.s8.ld.incp(i32 6, i32 7, i32 5, i32 4, i32 2) + ; CHECK: ee.vmax.s8.ld.incp + tail call void @llvm.xtensa.ee.vmax.s8.st.incp(i32 2, i32 1, i32 1, i32 3, i32 1) + ; CHECK: ee.vmax.s8.st.incp + tail call void @llvm.xtensa.ee.vmin.s16(i32 0, i32 1, i32 0) + ; CHECK: ee.vmin.s16 + tail call void @llvm.xtensa.ee.vmin.s16.ld.incp(i32 7, i32 0, i32 4, i32 7, i32 6) + ; CHECK: ee.vmin.s16.ld.incp + tail call void @llvm.xtensa.ee.vmin.s16.st.incp(i32 4, i32 3, i32 7, i32 4, i32 5) + ; CHECK: ee.vmin.s16.st.incp + tail call void @llvm.xtensa.ee.vmin.s32(i32 4, i32 0, i32 4) + ; CHECK: ee.vmin.s32 + tail call void @llvm.xtensa.ee.vmin.s32.ld.incp(i32 3, i32 8, i32 1, i32 0, i32 3) + ; CHECK: ee.vmin.s32.ld.incp + tail call void @llvm.xtensa.ee.vmin.s32.st.incp(i32 4, i32 9, i32 7, i32 6, i32 6) + ; CHECK: ee.vmin.s32.st.incp + tail call void @llvm.xtensa.ee.vmin.s8(i32 5, i32 7, i32 6) + ; CHECK: ee.vmin.s8 + tail call void @llvm.xtensa.ee.vmin.s8.ld.incp(i32 3, i32 3, i32 6, i32 1, i32 0) + ; CHECK: ee.vmin.s8.ld.incp + tail call void @llvm.xtensa.ee.vmin.s8.st.incp(i32 4, i32 3, i32 7, i32 5, i32 1) + ; CHECK: ee.vmin.s8.st.incp + tail call void @llvm.xtensa.ee.vmulas.s16.accx(i32 6, i32 7) + ; CHECK: ee.vmulas.s16.accx + tail call void @llvm.xtensa.ee.vmulas.s16.accx.ld.ip(i32 2, i32 2, i32 96, i32 2, i32 5) + ; CHECK: ee.vmulas.s16.accx.ld.ip + tail call void @llvm.xtensa.ee.vmulas.s16.accx.ld.ip.qup(i32 5, i32 5, i32 -96, i32 3, i32 5, i32 0, i32 5) + ; CHECK: ee.vmulas.s16.accx.ld.ip.qup + tail call void @llvm.xtensa.ee.vmulas.s16.accx.ld.xp(i32 7, i32 1, i32 3, i32 2, i32 6) + ; CHECK: ee.vmulas.s16.accx.ld.xp + tail call void @llvm.xtensa.ee.vmulas.s16.accx.ld.xp.qup(i32 5, i32 7, i32 8, i32 1, i32 3, i32 2, i32 5) + ; CHECK: ee.vmulas.s16.accx.ld.xp.qup + tail call void @llvm.xtensa.ee.vmulas.s16.qacc(i32 5, i32 7) + ; CHECK: ee.vmulas.s16.qacc + tail call void @llvm.xtensa.ee.vmulas.s16.qacc.ldbc.incp(i32 0, i32 8, i32 4, i32 3) + ; CHECK: ee.vmulas.s16.qacc.ldbc.incp + tail call void @llvm.xtensa.ee.vmulas.s16.qacc.ldbc.incp.qup(i32 5, i32 12, i32 1, i32 7, i32 7, i32 4) + ; CHECK: ee.vmulas.s16.qacc.ldbc.incp.qup + tail call void @llvm.xtensa.ee.vmulas.s16.qacc.ld.ip(i32 3, i32 9, i32 -112, i32 1, i32 6) + ; CHECK: ee.vmulas.s16.qacc.ld.ip + tail call void @llvm.xtensa.ee.vmulas.s16.qacc.ld.ip.qup(i32 2, i32 12, i32 -112, i32 0, i32 2, i32 7, i32 0) + ; CHECK: ee.vmulas.s16.qacc.ld.ip.qup + tail call void @llvm.xtensa.ee.vmulas.s16.qacc.ld.xp(i32 1, i32 1, i32 10, i32 0, i32 5) + ; CHECK: ee.vmulas.s16.qacc.ld.xp + tail call void @llvm.xtensa.ee.vmulas.s16.qacc.ld.xp.qup(i32 5, i32 4, i32 14, i32 7, i32 1, i32 1, i32 6) + ; CHECK: ee.vmulas.s16.qacc.ld.xp.qup + tail call void @llvm.xtensa.ee.vmulas.s8.accx(i32 5, i32 1) + ; CHECK: ee.vmulas.s8.accx + tail call void @llvm.xtensa.ee.vmulas.s8.accx.ld.ip(i32 4, i32 1, i32 -112, i32 0, i32 1) + ; CHECK: ee.vmulas.s8.accx.ld.ip + tail call void @llvm.xtensa.ee.vmulas.s8.accx.ld.ip.qup(i32 3, i32 11, i32 64, i32 6, i32 0, i32 1, i32 1) + ; CHECK: ee.vmulas.s8.accx.ld.ip.qup + tail call void @llvm.xtensa.ee.vmulas.s8.accx.ld.xp(i32 1, i32 0, i32 9, i32 7, i32 7) + ; CHECK: ee.vmulas.s8.accx.ld.xp + tail call void @llvm.xtensa.ee.vmulas.s8.accx.ld.xp.qup(i32 0, i32 6, i32 12, i32 2, i32 3, i32 6, i32 5) + ; CHECK: ee.vmulas.s8.accx.ld.xp.qup + tail call void @llvm.xtensa.ee.vmulas.s8.qacc(i32 4, i32 0) + ; CHECK: ee.vmulas.s8.qacc + tail call void @llvm.xtensa.ee.vmulas.s8.qacc.ldbc.incp(i32 6, i32 12, i32 6, i32 3) + ; CHECK: ee.vmulas.s8.qacc.ldbc.incp + tail call void @llvm.xtensa.ee.vmulas.s8.qacc.ldbc.incp.qup(i32 7, i32 14, i32 2, i32 3, i32 1, i32 6) + ; CHECK: ee.vmulas.s8.qacc.ldbc.incp.qup + tail call void @llvm.xtensa.ee.vmulas.s8.qacc.ld.ip(i32 6, i32 3, i32 0, i32 6, i32 1) + ; CHECK: ee.vmulas.s8.qacc.ld.ip + tail call void @llvm.xtensa.ee.vmulas.s8.qacc.ld.ip.qup(i32 2, i32 14, i32 16, i32 1, i32 4, i32 0, i32 1) + ; CHECK: ee.vmulas.s8.qacc.ld.ip.qup + tail call void @llvm.xtensa.ee.vmulas.s8.qacc.ld.xp(i32 0, i32 2, i32 5, i32 7, i32 4) + ; CHECK: ee.vmulas.s8.qacc.ld.xp + tail call void @llvm.xtensa.ee.vmulas.s8.qacc.ld.xp.qup(i32 3, i32 2, i32 7, i32 5, i32 4, i32 3, i32 0) + ; CHECK: ee.vmulas.s8.qacc.ld.xp.qup + tail call void @llvm.xtensa.ee.vmulas.u16.accx(i32 0, i32 0) + ; CHECK: ee.vmulas.u16.accx + tail call void @llvm.xtensa.ee.vmulas.u16.accx.ld.ip(i32 0, i32 0, i32 64, i32 7, i32 2) + ; CHECK: ee.vmulas.u16.accx.ld.ip + tail call void @llvm.xtensa.ee.vmulas.u16.accx.ld.ip.qup(i32 5, i32 3, i32 96, i32 1, i32 1, i32 0, i32 0) + ; CHECK: ee.vmulas.u16.accx.ld.ip.qup + tail call void @llvm.xtensa.ee.vmulas.u16.accx.ld.xp(i32 6, i32 12, i32 9, i32 1, i32 5) + ; CHECK: ee.vmulas.u16.accx.ld.xp + tail call void @llvm.xtensa.ee.vmulas.u16.accx.ld.xp.qup(i32 7, i32 11, i32 14, i32 3, i32 3, i32 3, i32 0) + ; CHECK: ee.vmulas.u16.accx.ld.xp.qup + tail call void @llvm.xtensa.ee.vmulas.u16.qacc(i32 5, i32 7) + ; CHECK: ee.vmulas.u16.qacc + tail call void @llvm.xtensa.ee.vmulas.u16.qacc.ldbc.incp(i32 6, i32 8, i32 0, i32 3) + ; CHECK: ee.vmulas.u16.qacc.ldbc.incp + tail call void @llvm.xtensa.ee.vmulas.u16.qacc.ldbc.incp.qup(i32 6, i32 9, i32 7, i32 7, i32 6, i32 0) + ; CHECK: ee.vmulas.u16.qacc.ldbc.incp.qup + tail call void @llvm.xtensa.ee.vmulas.u16.qacc.ld.ip(i32 4, i32 9, i32 16, i32 0, i32 4) + ; CHECK: ee.vmulas.u16.qacc.ld.ip + tail call void @llvm.xtensa.ee.vmulas.u16.qacc.ld.ip.qup(i32 2, i32 8, i32 64, i32 6, i32 2, i32 6, i32 6) + ; CHECK: ee.vmulas.u16.qacc.ld.ip.qup + tail call void @llvm.xtensa.ee.vmulas.u16.qacc.ld.xp(i32 4, i32 7, i32 14, i32 3, i32 2) + ; CHECK: ee.vmulas.u16.qacc.ld.xp + tail call void @llvm.xtensa.ee.vmulas.u16.qacc.ld.xp.qup(i32 2, i32 9, i32 9, i32 2, i32 4, i32 5, i32 3) + ; CHECK: ee.vmulas.u16.qacc.ld.xp.qup + tail call void @llvm.xtensa.ee.vmulas.u8.accx(i32 7, i32 6) + ; CHECK: ee.vmulas.u8.accx + tail call void @llvm.xtensa.ee.vmulas.u8.accx.ld.ip(i32 2, i32 7, i32 -48, i32 1, i32 1) + ; CHECK: ee.vmulas.u8.accx.ld.ip + tail call void @llvm.xtensa.ee.vmulas.u8.accx.ld.ip.qup(i32 5, i32 8, i32 16, i32 2, i32 3, i32 5, i32 5) + ; CHECK: ee.vmulas.u8.accx.ld.ip.qup + tail call void @llvm.xtensa.ee.vmulas.u8.accx.ld.xp(i32 3, i32 8, i32 14, i32 3, i32 4) + ; CHECK: ee.vmulas.u8.accx.ld.xp + tail call void @llvm.xtensa.ee.vmulas.u8.accx.ld.xp.qup(i32 4, i32 12, i32 3, i32 1, i32 6, i32 1, i32 7) + ; CHECK: ee.vmulas.u8.accx.ld.xp.qup + tail call void @llvm.xtensa.ee.vmulas.u8.qacc(i32 0, i32 3) + ; CHECK: ee.vmulas.u8.qacc + tail call void @llvm.xtensa.ee.vmulas.u8.qacc.ldbc.incp(i32 4, i32 8, i32 2, i32 4) + ; CHECK: ee.vmulas.u8.qacc.ldbc.incp + tail call void @llvm.xtensa.ee.vmulas.u8.qacc.ldbc.incp.qup(i32 0, i32 2, i32 4, i32 5, i32 6, i32 7) + ; CHECK: ee.vmulas.u8.qacc.ldbc.incp.qup + tail call void @llvm.xtensa.ee.vmulas.u8.qacc.ld.ip(i32 6, i32 2, i32 -128, i32 5, i32 3) + ; CHECK: ee.vmulas.u8.qacc.ld.ip + tail call void @llvm.xtensa.ee.vmulas.u8.qacc.ld.ip.qup(i32 7, i32 1, i32 48, i32 5, i32 6, i32 5, i32 5) + ; CHECK: ee.vmulas.u8.qacc.ld.ip.qup + tail call void @llvm.xtensa.ee.vmulas.u8.qacc.ld.xp(i32 1, i32 9, i32 12, i32 4, i32 2) + ; CHECK: ee.vmulas.u8.qacc.ld.xp + tail call void @llvm.xtensa.ee.vmulas.u8.qacc.ld.xp.qup(i32 7, i32 13, i32 13, i32 0, i32 4, i32 2, i32 6) + ; CHECK: ee.vmulas.u8.qacc.ld.xp.qup + tail call void @llvm.xtensa.ee.vmul.s16(i32 4, i32 1, i32 0) + ; CHECK: ee.vmul.s16 + tail call void @llvm.xtensa.ee.vmul.s16.ld.incp(i32 0, i32 5, i32 1, i32 3, i32 5) + ; CHECK: ee.vmul.s16.ld.incp + tail call void @llvm.xtensa.ee.vmul.s16.st.incp(i32 3, i32 9, i32 3, i32 7, i32 7) + ; CHECK: ee.vmul.s16.st.incp + tail call void @llvm.xtensa.ee.vmul.s8(i32 3, i32 7, i32 7) + ; CHECK: ee.vmul.s8 + tail call void @llvm.xtensa.ee.vmul.s8.ld.incp(i32 3, i32 0, i32 4, i32 3, i32 1) + ; CHECK: ee.vmul.s8.ld.incp + tail call void @llvm.xtensa.ee.vmul.s8.st.incp(i32 7, i32 14, i32 1, i32 4, i32 4) + ; CHECK: ee.vmul.s8.st.incp + tail call void @llvm.xtensa.ee.vmul.u16(i32 5, i32 6, i32 3) + ; CHECK: ee.vmul.u16 + tail call void @llvm.xtensa.ee.vmul.u16.ld.incp(i32 0, i32 12, i32 3, i32 0, i32 6) + ; CHECK: ee.vmul.u16.ld.incp + tail call void @llvm.xtensa.ee.vmul.u16.st.incp(i32 0, i32 14, i32 0, i32 5, i32 7) + ; CHECK: ee.vmul.u16.st.incp + tail call void @llvm.xtensa.ee.vmul.u8(i32 5, i32 1, i32 2) + ; CHECK: ee.vmul.u8 + tail call void @llvm.xtensa.ee.vmul.u8.ld.incp(i32 3, i32 6, i32 4, i32 4, i32 0) + ; CHECK: ee.vmul.u8.ld.incp + tail call void @llvm.xtensa.ee.vmul.u8.st.incp(i32 4, i32 13, i32 4, i32 4, i32 2) + ; CHECK: ee.vmul.u8.st.incp + tail call void @llvm.xtensa.ee.vprelu.s16(i32 5, i32 2, i32 5, i32 3) + ; CHECK: ee.vprelu.s16 + tail call void @llvm.xtensa.ee.vprelu.s8(i32 7, i32 7, i32 0, i32 14) + ; CHECK: ee.vprelu.s8 + tail call void @llvm.xtensa.ee.vrelu.s16(i32 3, i32 4, i32 4) + ; CHECK: ee.vrelu.s16 + tail call void @llvm.xtensa.ee.vrelu.s8(i32 5, i32 8, i32 5) + ; CHECK: ee.vrelu.s8 + tail call void @llvm.xtensa.ee.vsl.32(i32 6, i32 6) + ; CHECK: ee.vsl.32 + tail call void @llvm.xtensa.ee.vsmulas.s16.qacc(i32 2, i32 3, i32 5) + ; CHECK: ee.vsmulas.s16.qacc + tail call void @llvm.xtensa.ee.vsmulas.s16.qacc.ld.incp(i32 5, i32 11, i32 7, i32 6, i32 6) + ; CHECK: ee.vsmulas.s16.qacc.ld.incp + tail call void @llvm.xtensa.ee.vsmulas.s8.qacc(i32 6, i32 4, i32 9) + ; CHECK: ee.vsmulas.s8.qacc + tail call void @llvm.xtensa.ee.vsmulas.s8.qacc.ld.incp(i32 3, i32 10, i32 7, i32 1, i32 2) + ; CHECK: ee.vsmulas.s8.qacc.ld.incp + tail call void @llvm.xtensa.ee.vsr.32(i32 6, i32 6) + ; CHECK: ee.vsr.32 + tail call void @llvm.xtensa.ee.vst.128.ip(i32 1, i32 2, i32 160) + ; CHECK: ee.vst.128.ip + tail call void @llvm.xtensa.ee.vst.128.xp(i32 1, i32 12, i32 0) + ; CHECK: ee.vst.128.xp + tail call void @llvm.xtensa.ee.vst.h.64.ip(i32 5, i32 3, i32 400) + ; CHECK: ee.vst.h.64.ip + tail call void @llvm.xtensa.ee.vst.h.64.xp(i32 7, i32 2, i32 0) + ; CHECK: ee.vst.h.64.xp + tail call void @llvm.xtensa.ee.vst.l.64.ip(i32 0, i32 12, i32 -952) + ; CHECK: ee.vst.l.64.ip + tail call void @llvm.xtensa.ee.vst.l.64.xp(i32 2, i32 7, i32 10) + ; CHECK: ee.vst.l.64.xp + tail call void @llvm.xtensa.ee.vsubs.s16(i32 0, i32 3, i32 1) + ; CHECK: ee.vsubs.s16 + tail call void @llvm.xtensa.ee.vsubs.s16.ld.incp(i32 0, i32 13, i32 1, i32 2, i32 1) + ; CHECK: ee.vsubs.s16.ld.incp + tail call void @llvm.xtensa.ee.vsubs.s16.st.incp(i32 4, i32 7, i32 6, i32 6, i32 3) + ; CHECK: ee.vsubs.s16.st.incp + tail call void @llvm.xtensa.ee.vsubs.s32(i32 1, i32 6, i32 7) + ; CHECK: ee.vsubs.s32 + tail call void @llvm.xtensa.ee.vsubs.s32.ld.incp(i32 5, i32 11, i32 0, i32 6, i32 7) + ; CHECK: ee.vsubs.s32.ld.incp + tail call void @llvm.xtensa.ee.vsubs.s32.st.incp(i32 4, i32 9, i32 0, i32 0, i32 2) + ; CHECK: ee.vsubs.s32.st.incp + tail call void @llvm.xtensa.ee.vsubs.s8(i32 6, i32 1, i32 0) + ; CHECK: ee.vsubs.s8 + tail call void @llvm.xtensa.ee.vsubs.s8.ld.incp(i32 5, i32 12, i32 5, i32 4, i32 7) + ; CHECK: ee.vsubs.s8.ld.incp + tail call void @llvm.xtensa.ee.vsubs.s8.st.incp(i32 1, i32 11, i32 5, i32 2, i32 3) + ; CHECK: ee.vsubs.s8.st.incp + tail call void @llvm.xtensa.ee.vunzip.16(i32 3, i32 4) + ; CHECK: ee.vunzip.16 + tail call void @llvm.xtensa.ee.vunzip.32(i32 7, i32 3) + ; CHECK: ee.vunzip.32 + tail call void @llvm.xtensa.ee.vunzip.8(i32 4, i32 2) + ; CHECK: ee.vunzip.8 + tail call void @llvm.xtensa.ee.vzip.16(i32 2, i32 0) + ; CHECK: ee.vzip.16 + tail call void @llvm.xtensa.ee.vzip.32(i32 0, i32 3) + ; CHECK: ee.vzip.32 + tail call void @llvm.xtensa.ee.vzip.8(i32 5, i32 6) + ; CHECK: ee.vzip.8 + tail call void @llvm.xtensa.ee.xorq(i32 0, i32 7, i32 7) + ; CHECK: ee.xorq + tail call void @llvm.xtensa.ee.zero.accx() + ; CHECK: ee.zero.accx + tail call void @llvm.xtensa.ee.zero.q(i32 4) + ; CHECK: ee.zero.q + tail call void @llvm.xtensa.ee.zero.qacc() + ; CHECK: ee.zero.qacc + tail call i32 @llvm.xtensa.rur.accx.0() + ; CHECK: rur.accx_0 + tail call i32 @llvm.xtensa.rur.accx.1() + ; CHECK: rur.accx_1 + tail call i32 @llvm.xtensa.rur.fft.bit.width() + ; CHECK: rur.fft_bit_width + tail call i32 @llvm.xtensa.rur.gpio.out() + ; CHECK: rur.gpio_out + tail call i32 @llvm.xtensa.rur.qacc.h.0() + ; CHECK: rur.qacc_h_0 + tail call i32 @llvm.xtensa.rur.qacc.h.1() + ; CHECK: rur.qacc_h_1 + tail call i32 @llvm.xtensa.rur.qacc.h.2() + ; CHECK: rur.qacc_h_2 + tail call i32 @llvm.xtensa.rur.qacc.h.3() + ; CHECK: rur.qacc_h_3 + tail call i32 @llvm.xtensa.rur.qacc.h.4() + ; CHECK: rur.qacc_h_4 + tail call i32 @llvm.xtensa.rur.qacc.l.0() + ; CHECK: rur.qacc_l_0 + tail call i32 @llvm.xtensa.rur.qacc.l.1() + ; CHECK: rur.qacc_l_1 + tail call i32 @llvm.xtensa.rur.qacc.l.2() + ; CHECK: rur.qacc_l_2 + tail call i32 @llvm.xtensa.rur.qacc.l.3() + ; CHECK: rur.qacc_l_3 + tail call i32 @llvm.xtensa.rur.qacc.l.4() + ; CHECK: rur.qacc_l_4 + tail call i32 @llvm.xtensa.rur.sar.byte() + ; CHECK: rur.sar_byte + tail call i32 @llvm.xtensa.rur.ua.state.0() + ; CHECK: rur.ua_state_0 + tail call i32 @llvm.xtensa.rur.ua.state.1() + ; CHECK: rur.ua_state_1 + tail call i32 @llvm.xtensa.rur.ua.state.2() + ; CHECK: rur.ua_state_2 + tail call i32 @llvm.xtensa.rur.ua.state.3() + ; CHECK: rur.ua_state_3 + tail call void @llvm.xtensa.wur.accx.0(i32 13) + ; CHECK: wur.accx_0 + tail call void @llvm.xtensa.wur.accx.1(i32 4) + ; CHECK: wur.accx_1 + tail call void @llvm.xtensa.wur.fft.bit.width(i32 7) + ; CHECK: wur.fft_bit_width + tail call void @llvm.xtensa.wur.gpio.out(i32 0) + ; CHECK: wur.gpio_out + tail call void @llvm.xtensa.wur.qacc.h.0(i32 8) + ; CHECK: wur.qacc_h_0 + tail call void @llvm.xtensa.wur.qacc.h.1(i32 6) + ; CHECK: wur.qacc_h_1 + tail call void @llvm.xtensa.wur.qacc.h.2(i32 12) + ; CHECK: wur.qacc_h_2 + tail call void @llvm.xtensa.wur.qacc.h.3(i32 7) + ; CHECK: wur.qacc_h_3 + tail call void @llvm.xtensa.wur.qacc.h.4(i32 2) + ; CHECK: wur.qacc_h_4 + tail call void @llvm.xtensa.wur.qacc.l.0(i32 0) + ; CHECK: wur.qacc_l_0 + tail call void @llvm.xtensa.wur.qacc.l.1(i32 6) + ; CHECK: wur.qacc_l_1 + tail call void @llvm.xtensa.wur.qacc.l.2(i32 10) + ; CHECK: wur.qacc_l_2 + tail call void @llvm.xtensa.wur.qacc.l.3(i32 2) + ; CHECK: wur.qacc_l_3 + tail call void @llvm.xtensa.wur.qacc.l.4(i32 6) + ; CHECK: wur.qacc_l_4 + tail call void @llvm.xtensa.wur.sar.byte(i32 13) + ; CHECK: wur.sar_byte + tail call void @llvm.xtensa.wur.ua.state.0(i32 0) + ; CHECK: wur.ua_state_0 + tail call void @llvm.xtensa.wur.ua.state.1(i32 9) + ; CHECK: wur.ua_state_1 + tail call void @llvm.xtensa.wur.ua.state.2(i32 3) + ; CHECK: wur.ua_state_2 + tail call void @llvm.xtensa.wur.ua.state.3(i32 3) + ; CHECK: wur.ua_state_3 + tail call void @llvm.xtensa.mv.qr(i32 0, i32 1) + ; CHECK: mv.qr + ret void +} + +declare void @llvm.xtensa.ee.andq(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.bitrev(i32, i32) nounwind +declare void @llvm.xtensa.ee.cmul.s16(i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.cmul.s16.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.cmul.s16.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.fft.ams.s16.ld.incp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.fft.ams.s16.ld.incp.uaup(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.fft.ams.s16.ld.r32.decp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.fft.ams.s16.st.incp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.fft.cmul.s16.ld.xp(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.fft.cmul.s16.st.xp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.fft.r2bf.s16(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.fft.r2bf.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.fft.vst.r32.decp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.ldf.128.ip(float, float, float, float, i32, i32) nounwind +declare void @llvm.xtensa.ee.ldf.128.xp(float, float, float, float, i32, i32) nounwind +declare void @llvm.xtensa.ee.ldf.64.ip(float, float, i32, i32) nounwind +declare void @llvm.xtensa.ee.ldf.64.xp(float, float, i32, i32) nounwind +declare void @llvm.xtensa.ee.ldqa.s16.128.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.ldqa.s16.128.xp(i32, i32) nounwind +declare void @llvm.xtensa.ee.ldqa.s8.128.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.ldqa.s8.128.xp(i32, i32) nounwind +declare void @llvm.xtensa.ee.ldqa.u16.128.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.ldqa.u16.128.xp(i32, i32) nounwind +declare void @llvm.xtensa.ee.ldqa.u8.128.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.ldqa.u8.128.xp(i32, i32) nounwind +declare void @llvm.xtensa.ee.ldxq.32(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.ld.128.usar.ip(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.ld.128.usar.xp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.ld.accx.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.ld.qacc.h.h.32.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.ld.qacc.h.l.128.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.ld.qacc.l.h.32.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.ld.qacc.l.l.128.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.ld.ua.state.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.movi.32.a(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.movi.32.q(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.mov.s16.qacc(i32) nounwind +declare void @llvm.xtensa.ee.mov.s8.qacc(i32) nounwind +declare void @llvm.xtensa.ee.mov.u16.qacc(i32) nounwind +declare void @llvm.xtensa.ee.mov.u8.qacc(i32) nounwind +declare void @llvm.xtensa.ee.notq(i32, i32) nounwind +declare void @llvm.xtensa.ee.orq(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.slci.2q(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.slcxxp.2q(i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.srci.2q(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.srcmb.s16.qacc(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.srcmb.s8.qacc(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.srcq.128.st.incp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.srcxxp.2q(i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.src.q(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.src.q.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.src.q.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.src.q.qup(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.srs.accx(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.stf.128.ip(float, float, float, float, i32, i32) nounwind +declare void @llvm.xtensa.ee.stf.128.xp(float, float, float, float, i32, i32) nounwind +declare void @llvm.xtensa.ee.stf.64.ip(float, float, i32, i32) nounwind +declare void @llvm.xtensa.ee.stf.64.xp(float, float, i32, i32) nounwind +declare void @llvm.xtensa.ee.stxq.32(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.st.accx.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.st.qacc.h.h.32.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.st.qacc.h.l.128.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.st.qacc.l.h.32.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.st.qacc.l.l.128.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.st.ua.state.ip(i32, i32) nounwind +declare void @llvm.xtensa.ee.vadds.s16(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vadds.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vadds.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vadds.s32(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vadds.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vadds.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vadds.s8(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vadds.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vadds.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vcmp.eq.s16(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vcmp.eq.s32(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vcmp.eq.s8(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vcmp.gt.s16(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vcmp.gt.s32(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vcmp.gt.s8(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vcmp.lt.s16(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vcmp.lt.s32(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vcmp.lt.s8(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vldbc.16(i32, i32) nounwind +declare void @llvm.xtensa.ee.vldbc.16.ip(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vldbc.16.xp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vldbc.32(i32, i32) nounwind +declare void @llvm.xtensa.ee.vldbc.32.ip(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vldbc.32.xp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vldbc.8(i32, i32) nounwind +declare void @llvm.xtensa.ee.vldbc.8.ip(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vldbc.8.xp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vldhbc.16.incp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vld.128.ip(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vld.128.xp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vld.h.64.ip(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vld.h.64.xp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vld.l.64.ip(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vld.l.64.xp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmax.s16(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmax.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmax.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmax.s32(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmax.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmax.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmax.s8(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmax.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmax.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmin.s16(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmin.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmin.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmin.s32(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmin.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmin.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmin.s8(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmin.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmin.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.accx(i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.accx.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.accx.ld.ip.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.accx.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.accx.ld.xp.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.qacc(i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.qacc.ldbc.incp.qup(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.qacc.ld.ip.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s16.qacc.ld.xp.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.accx(i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.accx.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.accx.ld.ip.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.accx.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.accx.ld.xp.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.qacc(i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.qacc.ldbc.incp.qup(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.qacc.ld.ip.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.s8.qacc.ld.xp.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.accx(i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.accx.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.accx.ld.ip.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.accx.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.accx.ld.xp.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.qacc(i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.qacc.ldbc.incp.qup(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.qacc.ld.ip.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u16.qacc.ld.xp.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.accx(i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.accx.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.accx.ld.ip.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.accx.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.accx.ld.xp.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.qacc(i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.qacc.ldbc.incp.qup(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.qacc.ld.ip.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmulas.u8.qacc.ld.xp.qup(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.s16(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.s8(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.u16(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.u8(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vmul.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vprelu.s16(i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vprelu.s8(i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vrelu.s16(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vrelu.s8(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsl.32(i32, i32) nounwind +declare void @llvm.xtensa.ee.vsmulas.s16.qacc(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsmulas.s16.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsmulas.s8.qacc(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsmulas.s8.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsr.32(i32, i32) nounwind +declare void @llvm.xtensa.ee.vst.128.ip(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vst.128.xp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vst.h.64.ip(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vst.h.64.xp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vst.l.64.ip(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vst.l.64.xp(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsubs.s16(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsubs.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsubs.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsubs.s32(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsubs.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsubs.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsubs.s8(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsubs.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vsubs.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.vunzip.16(i32, i32) nounwind +declare void @llvm.xtensa.ee.vunzip.32(i32, i32) nounwind +declare void @llvm.xtensa.ee.vunzip.8(i32, i32) nounwind +declare void @llvm.xtensa.ee.vzip.16(i32, i32) nounwind +declare void @llvm.xtensa.ee.vzip.32(i32, i32) nounwind +declare void @llvm.xtensa.ee.vzip.8(i32, i32) nounwind +declare void @llvm.xtensa.ee.xorq(i32, i32, i32) nounwind +declare void @llvm.xtensa.ee.zero.accx() nounwind +declare void @llvm.xtensa.ee.zero.q(i32) nounwind +declare void @llvm.xtensa.ee.zero.qacc() nounwind +declare i32 @llvm.xtensa.rur.accx.0() nounwind +declare i32 @llvm.xtensa.rur.accx.1() nounwind +declare i32 @llvm.xtensa.rur.fft.bit.width() nounwind +declare i32 @llvm.xtensa.rur.gpio.out() nounwind +declare i32 @llvm.xtensa.rur.qacc.h.0() nounwind +declare i32 @llvm.xtensa.rur.qacc.h.1() nounwind +declare i32 @llvm.xtensa.rur.qacc.h.2() nounwind +declare i32 @llvm.xtensa.rur.qacc.h.3() nounwind +declare i32 @llvm.xtensa.rur.qacc.h.4() nounwind +declare i32 @llvm.xtensa.rur.qacc.l.0() nounwind +declare i32 @llvm.xtensa.rur.qacc.l.1() nounwind +declare i32 @llvm.xtensa.rur.qacc.l.2() nounwind +declare i32 @llvm.xtensa.rur.qacc.l.3() nounwind +declare i32 @llvm.xtensa.rur.qacc.l.4() nounwind +declare i32 @llvm.xtensa.rur.sar.byte() nounwind +declare i32 @llvm.xtensa.rur.ua.state.0() nounwind +declare i32 @llvm.xtensa.rur.ua.state.1() nounwind +declare i32 @llvm.xtensa.rur.ua.state.2() nounwind +declare i32 @llvm.xtensa.rur.ua.state.3() nounwind +declare void @llvm.xtensa.wur.accx.0(i32) nounwind +declare void @llvm.xtensa.wur.accx.1(i32) nounwind +declare void @llvm.xtensa.wur.fft.bit.width(i32) nounwind +declare void @llvm.xtensa.wur.gpio.out(i32) nounwind +declare void @llvm.xtensa.wur.qacc.h.0(i32) nounwind +declare void @llvm.xtensa.wur.qacc.h.1(i32) nounwind +declare void @llvm.xtensa.wur.qacc.h.2(i32) nounwind +declare void @llvm.xtensa.wur.qacc.h.3(i32) nounwind +declare void @llvm.xtensa.wur.qacc.h.4(i32) nounwind +declare void @llvm.xtensa.wur.qacc.l.0(i32) nounwind +declare void @llvm.xtensa.wur.qacc.l.1(i32) nounwind +declare void @llvm.xtensa.wur.qacc.l.2(i32) nounwind +declare void @llvm.xtensa.wur.qacc.l.3(i32) nounwind +declare void @llvm.xtensa.wur.qacc.l.4(i32) nounwind +declare void @llvm.xtensa.wur.sar.byte(i32) nounwind +declare void @llvm.xtensa.wur.ua.state.0(i32) nounwind +declare void @llvm.xtensa.wur.ua.state.1(i32) nounwind +declare void @llvm.xtensa.wur.ua.state.2(i32) nounwind +declare void @llvm.xtensa.wur.ua.state.3(i32) nounwind +declare void @llvm.xtensa.mv.qr(i32, i32) nounwind + diff --git a/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s b/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s index 50037ea38df15..ee37f4bca7b1b 100644 --- a/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s +++ b/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s @@ -1,4 +1,4 @@ -# RUN: llvm-mc %s -triple=xtensa -mattr=+esp32s3 -show-encoding \ +# RUN: llvm-mc %s -triple=xtensa -mcpu=esp32s3 -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s .align 4 @@ -19,3 +19,509 @@ ee.set_bit_gpio_out 18 # CHECK-INST: ee.wr_mask_gpio_out a3, a2 # CHECK: encoding: [0x34,0x42,0x72] ee.wr_mask_gpio_out a3, a2 + +ee.andq q5, q6, q4 +# CHECK: ee.andq q5, q6, q4 # encoding: [0xc4,0xb8,0xed] +ee.bitrev q2, a6 +# CHECK: ee.bitrev q2, a6 # encoding: [0x64,0x7b,0xdd] +ee.cmul.s16 q3, q6, q2, 3 +# CHECK: ee.cmul.s16 q3, q6, q2, 3 # encoding: [0x34,0x96,0x9e] +ee.cmul.s16.ld.incp q2, a7, q5, q1, q4, 2 +# CHECK: ee.cmul.s16.ld.incp q2, a7, q5, q1, q4, 2 # encoding: [0xe7,0x60,0x2a,0x1c] +ee.cmul.s16.st.incp q7, a11, q1, q5, q2, 3 +# CHECK: ee.cmul.s16.st.incp q7, a11, q1, q5, q2, 3 # encoding: [0x3b,0x57,0x83,0x1c] +ee.fft.ams.s16.ld.incp q5, a5, q3, q1, q1, q2, q5, 1 +# CHECK: ee.fft.ams.s16.ld.incp q5, a5, q3, q1, q1, q2, q5, 1 # encoding: [0xd5,0x5a,0x4a,0x1a] +ee.fft.ams.s16.ld.incp.uaup q7, a12, q4, q1, q5, q6, q3, 0 +# CHECK: ee.fft.ams.s16.ld.incp.uaup q7, a12, q4, q1, q5, q6, q3, 0 # encoding: [0xfc,0x66,0x87,0x1a] +ee.fft.ams.s16.ld.r32.decp q6, a5, q0, q2, q7, q2, q0, 0 +# CHECK: ee.fft.ams.s16.ld.r32.decp q6, a5, q0, q2, q7, q2, q0, 0 # encoding: [0x65,0xc2,0x11,0x1b] +ee.fft.ams.s16.st.incp q3, q6, a7, a6, q5, q5, q1, 1 +# CHECK: ee.fft.ams.s16.st.incp q3, q6, a7, a6, q5, q5, q1, 1 # encoding: [0x67,0x4b,0xeb,0x14] +ee.fft.cmul.s16.ld.xp q3, a12, a6, q7, q0, q7, 2 +# CHECK: ee.fft.cmul.s16.ld.xp q3, a12, a6, q7, q0, q7, 2 # encoding: [0x6c,0x1f,0xae,0x1b] +ee.fft.cmul.s16.st.xp q4, q0, q0, a2, a8, 6, 1, 1 +# CHECK: ee.fft.cmul.s16.st.xp q4, q0, q0, a2, a8, 6, 1, 1 # encoding: [0x82,0x30,0x51,0x15] +ee.fft.r2bf.s16 q7, q1, q3, q6, 1 +# CHECK: ee.fft.r2bf.s16 q7, q1, q3, q6, 1 # encoding: [0x54,0x9d,0xfc] +ee.fft.r2bf.s16.st.incp q7, q3, q7, a2, 2 +# CHECK: ee.fft.r2bf.s16.st.incp q7, q3, q7, a2, 2 # encoding: [0x42,0xd7,0x1e,0x1d] +ee.fft.vst.r32.decp q3, a14, 0 +# CHECK: ee.fft.vst.r32.decp q3, a14, 0 # encoding: [0xe4,0xb3,0xdd] +ee.ldf.128.ip f3, f5, f8, f0, a13, 64 +# CHECK: ee.ldf.128.ip f3, f5, f8, f0, a13, 64 # encoding: [0x0d,0x80,0x35,0x10] +ee.ldf.128.xp f5, f2, f4, f4, a7, a8 +# CHECK: ee.ldf.128.xp f5, f2, f4, f4, a7, a8 # encoding: [0x87,0x44,0x52,0x11] +ee.ldf.64.ip f6, f5, a1, 488 +# CHECK: ee.ldf.64.ip f6, f5, a1, 488 # encoding: [0x41,0x65,0x74,0x1c] +ee.ldf.64.xp f0, f6, a3, a8 +# CHECK: ee.ldf.64.xp f0, f6, a3, a8 # encoding: [0x30,0x08,0x66] +ee.ldqa.s16.128.ip a11, 1904 +# CHECK: ee.ldqa.s16.128.ip a11, 1904 # encoding: [0xb4,0x70,0x01] +ee.ldqa.s16.128.xp a6, a2 +# CHECK: ee.ldqa.s16.128.xp a6, a2 # encoding: [0x64,0x42,0x7e] +ee.ldqa.s8.128.ip a8, 320 +# CHECK: ee.ldqa.s8.128.ip a8, 320 # encoding: [0x84,0x40,0x11] +ee.ldqa.s8.128.xp a6, a11 +# CHECK: ee.ldqa.s8.128.xp a6, a11 # encoding: [0x64,0x4b,0x71] +ee.ldqa.u16.128.ip a2, -1424 +# CHECK: ee.ldqa.u16.128.ip a2, -1424 # encoding: [0x24,0x70,0x05] +ee.ldqa.u16.128.xp a3, a4 +# CHECK: ee.ldqa.u16.128.xp a3, a4 # encoding: [0x34,0x44,0x7a] +ee.ldqa.u8.128.ip a4, 784 +# CHECK: ee.ldqa.u8.128.ip a4, 784 # encoding: [0x44,0x10,0x15] +ee.ldqa.u8.128.xp a4, a9 +# CHECK: ee.ldqa.u8.128.xp a4, a9 # encoding: [0x44,0x49,0x70] +ee.ldxq.32 q2, q6, a11, 2, 1 +# CHECK: ee.ldxq.32 q2, q6, a11, 2, 1 # encoding: [0xdb,0x8f,0x25,0x1c] +ee.ld.128.usar.ip q4, a8, -592 +# CHECK: ee.ld.128.usar.ip q4, a8, -592 # encoding: [0x84,0x30,0xe1] +ee.ld.128.usar.xp q1, a9, a7 +# CHECK: ee.ld.128.usar.xp q1, a9, a7 # encoding: [0x94,0x87,0x8d] +ee.ld.accx.ip a2, 720 +# CHECK: ee.ld.accx.ip a2, 720 # encoding: [0x24,0x50,0x4e] +ee.ld.qacc_h.h.32.ip a6, -292 +# CHECK: ee.ld.qacc_h.h.32.ip a6, -292 # encoding: [0x64,0x5c,0x5e] +ee.ld.qacc_h.l.128.ip a14, 96 +# CHECK: ee.ld.qacc_h.l.128.ip a14, 96 # encoding: [0xe4,0x60,0x06] +ee.ld.qacc_l.h.32.ip a0, -184 +# CHECK: ee.ld.qacc_l.h.32.ip a0, -184 # encoding: [0x04,0x48,0x16] +ee.ld.qacc_l.l.128.ip a5, -352 +# CHECK: ee.ld.qacc_l.l.128.ip a5, -352 # encoding: [0x54,0x20,0x40] +ee.ld.ua_state.ip a3, 864 +# CHECK: ee.ld.ua_state.ip a3, 864 # encoding: [0x34,0x60,0x10] +ee.movi.32.a q7, a0, 1 +# CHECK: ee.movi.32.a q7, a0, 1 # encoding: [0x04,0xf5,0xfd] +ee.movi.32.q q5, a5, 3 +# CHECK: ee.movi.32.q q5, a5, 3 # encoding: [0x54,0xbe,0xed] +ee.mov.s16.qacc q1 +# CHECK: ee.mov.s16.qacc q1 # encoding: [0x24,0xff,0xcd] +ee.mov.s8.qacc q7 +# CHECK: ee.mov.s8.qacc q7 # encoding: [0x34,0xff,0xfd] +ee.mov.u16.qacc q2 +# CHECK: ee.mov.u16.qacc q2 # encoding: [0x64,0x7f,0xdd] +ee.mov.u8.qacc q2 +# CHECK: ee.mov.u8.qacc q2 # encoding: [0x74,0x7f,0xdd] +ee.notq q7, q0 +# CHECK: ee.notq q7, q0 # encoding: [0x04,0xff,0xfd] +ee.orq q1, q5, q3 +# CHECK: ee.orq q1, q5, q3 # encoding: [0xb4,0xf4,0xcd] +ee.slci.2q q7, q4, 2 +# CHECK: ee.slci.2q q7, q4, 2 # encoding: [0x24,0xc6,0xfc] +ee.slcxxp.2q q6, q7, a11, a4 +# CHECK: ee.slcxxp.2q q6, q7, a11, a4 # encoding: [0xb4,0x74,0xb6] +ee.srci.2q q6, q6, 14 +# CHECK: ee.srci.2q q6, q6, 14 # encoding: [0xe4,0x6a,0xfc] +ee.srcmb.s16.qacc q3, a7, 0 +# CHECK: ee.srcmb.s16.qacc q3, a7, 0 # encoding: [0x74,0xf2,0xdd] +ee.srcmb.s8.qacc q4, a1, 1 +# CHECK: ee.srcmb.s8.qacc q4, a1, 1 # encoding: [0x14,0x7e,0xed] +ee.srcq.128.st.incp q1, q4, a6 +# CHECK: ee.srcq.128.st.incp q1, q4, a6 # encoding: [0x64,0x1e,0xec] +ee.srcxxp.2q q6, q0, a2, a14 +# CHECK: ee.srcxxp.2q q6, q0, a2, a14 # encoding: [0x24,0x0e,0xf6] +ee.src.q q6, q7, q5 +# CHECK: ee.src.q q6, q7, q5 # encoding: [0x64,0xf3,0xec] +ee.src.q.ld.ip q2, a2, 1792, q6, q7 +# CHECK: ee.src.q.ld.ip q2, a2, 1792, q6, q7 # encoding: [0x02,0x87,0x05,0x1c] +ee.src.q.ld.xp q2, a4, a9, q1, q7 +# CHECK: ee.src.q.ld.xp q2, a4, a9, q1, q7 # encoding: [0x94,0x47,0x04,0x1d] +ee.src.q.qup q4, q3, q7 +# CHECK: ee.src.q.qup q4, q3, q7 # encoding: [0x44,0xb7,0xfc] +ee.srs.accx a12, a1, 0 +# CHECK: ee.srs.accx a12, a1, 0 # encoding: [0x14,0x1c,0x7e] +ee.stf.128.ip f4, f3, f8, f2, a4, -128 +# CHECK: ee.stf.128.ip f4, f3, f8, f2, a4, -128 # encoding: [0x04,0x82,0x43,0x12] +ee.stf.128.xp f2, f0, f5, f8, a11, a5 +# CHECK: ee.stf.128.xp f2, f0, f5, f8, a11, a5 # encoding: [0x5b,0x58,0x20,0x13] +ee.stf.64.ip f3, f6, a10, -848 +# CHECK: ee.stf.64.ip f3, f6, a10, -848 # encoding: [0x6a,0x36,0x58,0x1c] +ee.stf.64.xp f2, f1, a1, a14 +# CHECK: ee.stf.64.xp f2, f1, a1, a14 # encoding: [0x10,0x2e,0x17] +ee.stxq.32 q5, q2, a5, 0, 1 +# CHECK: ee.stxq.32 q5, q2, a5, 0, 1 # encoding: [0x05,0x8d,0xc0,0x1c] +ee.st.accx.ip a10, 24 +# CHECK: ee.st.accx.ip a10, 24 # encoding: [0xa4,0x18,0x02] +ee.st.qacc_h.h.32.ip a14, 380 +# CHECK: ee.st.qacc_h.h.32.ip a14, 380 # encoding: [0xe4,0x7c,0x12] +ee.st.qacc_h.l.128.ip a7, -624 +# CHECK: ee.st.qacc_h.l.128.ip a7, -624 # encoding: [0x74,0x10,0x4d] +ee.st.qacc_l.h.32.ip a10, -20 +# CHECK: ee.st.qacc_l.h.32.ip a10, -20 # encoding: [0xa4,0x6c,0x5d] +ee.st.qacc_l.l.128.ip a4, 1936 +# CHECK: ee.st.qacc_l.l.128.ip a4, 1936 # encoding: [0x44,0x10,0x4c] +ee.st.ua_state.ip a4, -1728 +# CHECK: ee.st.ua_state.ip a4, -1728 # encoding: [0x44,0x40,0x1c] +ee.vadds.s16 q5, q1, q4 +# CHECK: ee.vadds.s16 q5, q1, q4 # encoding: [0x64,0xc1,0xae] +ee.vadds.s16.ld.incp q6, a6, q1, q3, q1 +# CHECK: ee.vadds.s16.ld.incp q6, a6, q1, q3, q1 # encoding: [0xd6,0xca,0x62,0x1c] +ee.vadds.s16.st.incp q4, a0, q1, q3, q1 +# CHECK: ee.vadds.s16.st.incp q4, a0, q1, q3, q1 # encoding: [0x00,0xcc,0x92,0x1c] +ee.vadds.s32 q3, q5, q2 +# CHECK: ee.vadds.s32 q3, q5, q2 # encoding: [0x74,0x95,0x9e] +ee.vadds.s32.ld.incp q4, a4, q1, q6, q5 +# CHECK: ee.vadds.s32.ld.incp q4, a4, q1, q6, q5 # encoding: [0xd4,0xab,0x43,0x1c] +ee.vadds.s32.st.incp q5, a1, q0, q6, q0 +# CHECK: ee.vadds.s32.st.incp q5, a1, q0, q6, q0 # encoding: [0x11,0x85,0x91,0x1c] +ee.vadds.s8 q4, q4, q5 +# CHECK: ee.vadds.s8 q4, q4, q5 # encoding: [0x84,0x4c,0xae] +ee.vadds.s8.ld.incp q2, a14, q0, q3, q3 +# CHECK: ee.vadds.s8.ld.incp q2, a14, q0, q3, q3 # encoding: [0xce,0xd9,0x20,0x1c] +ee.vadds.s8.st.incp q0, a9, q4, q7, q0 +# CHECK: ee.vadds.s8.st.incp q0, a9, q4, q7, q0 # encoding: [0x29,0xc0,0x99,0x1c] +ee.vcmp.eq.s16 q5, q3, q0 +# CHECK: ee.vcmp.eq.s16 q5, q3, q0 # encoding: [0x94,0x83,0xae] +ee.vcmp.eq.s32 q5, q5, q4 +# CHECK: ee.vcmp.eq.s32 q5, q5, q4 # encoding: [0xa4,0xc5,0xae] +ee.vcmp.eq.s8 q0, q4, q2 +# CHECK: ee.vcmp.eq.s8 q0, q4, q2 # encoding: [0xb4,0x14,0x8e] +ee.vcmp.gt.s16 q1, q5, q2 +# CHECK: ee.vcmp.gt.s16 q1, q5, q2 # encoding: [0xc4,0x95,0x8e] +ee.vcmp.gt.s32 q4, q1, q5 +# CHECK: ee.vcmp.gt.s32 q4, q1, q5 # encoding: [0xd4,0x49,0xae] +ee.vcmp.gt.s8 q3, q6, q3 +# CHECK: ee.vcmp.gt.s8 q3, q6, q3 # encoding: [0xe4,0x9e,0x9e] +ee.vcmp.lt.s16 q3, q7, q0 +# CHECK: ee.vcmp.lt.s16 q3, q7, q0 # encoding: [0xf4,0x87,0x9e] +ee.vcmp.lt.s32 q2, q2, q1 +# CHECK: ee.vcmp.lt.s32 q2, q2, q1 # encoding: [0x04,0x2a,0x9e] +ee.vcmp.lt.s8 q7, q1, q6 +# CHECK: ee.vcmp.lt.s8 q7, q1, q6 # encoding: [0x14,0xf1,0xbe] +ee.vldbc.16 q6, a11 +# CHECK: ee.vldbc.16 q6, a11 # encoding: [0xb4,0x73,0xfd] +ee.vldbc.16.ip q6, a4, 124 +# CHECK: ee.vldbc.16.ip q6, a4, 124 # encoding: [0x44,0x7c,0xb5] +ee.vldbc.16.xp q2, a0, a7 +# CHECK: ee.vldbc.16.xp q2, a0, a7 # encoding: [0x04,0x47,0x9d] +ee.vldbc.32 q4, a0 +# CHECK: ee.vldbc.32 q4, a0 # encoding: [0x04,0x77,0xed] +ee.vldbc.32.ip q6, a12, 308 +# CHECK: ee.vldbc.32.ip q6, a12, 308 # encoding: [0xc4,0x34,0xb2] +ee.vldbc.32.xp q1, a11, a0 +# CHECK: ee.vldbc.32.xp q1, a11, a0 # encoding: [0xb4,0x90,0x8d] +ee.vldbc.8 q2, a3 +# CHECK: ee.vldbc.8 q2, a3 # encoding: [0x34,0x3b,0xdd] +ee.vldbc.8.ip q3, a3, 103 +# CHECK: ee.vldbc.8.ip q3, a3, 103 # encoding: [0x34,0xe7,0xd5] +ee.vldbc.8.xp q2, a0, a13 +# CHECK: ee.vldbc.8.xp q2, a0, a13 # encoding: [0x04,0x5d,0x9d] +ee.vldhbc.16.incp q5, q5, a14 +# CHECK: ee.vldhbc.16.incp q5, q5, a14 # encoding: [0xe4,0xd2,0xec] +ee.vld.128.ip q3, a14, 1248 +# CHECK: ee.vld.128.ip q3, a14, 1248 # encoding: [0xe4,0xe0,0xd3] +ee.vld.128.xp q5, a10, a12 +# CHECK: ee.vld.128.xp q5, a10, a12 # encoding: [0xa4,0xac,0xad] +ee.vld.h.64.ip q4, a14, 240 +# CHECK: ee.vld.h.64.ip q4, a14, 240 # encoding: [0xe4,0x70,0xe8] +ee.vld.h.64.xp q7, a4, a8 +# CHECK: ee.vld.h.64.xp q7, a4, a8 # encoding: [0x44,0xe8,0xbd] +ee.vld.l.64.ip q1, a8, 8 +# CHECK: ee.vld.l.64.ip q1, a8, 8 # encoding: [0x84,0x88,0x89] +ee.vld.l.64.xp q1, a2, a9 +# CHECK: ee.vld.l.64.xp q1, a2, a9 # encoding: [0x24,0xb9,0x8d] +ee.vmax.s16 q2, q5, q6 +# CHECK: ee.vmax.s16 q2, q5, q6 # encoding: [0x24,0x75,0x9e] +ee.vmax.s16.ld.incp q0, a0, q6, q1, q2 +# CHECK: ee.vmax.s16.ld.incp q0, a0, q6, q1, q2 # encoding: [0xd0,0x51,0x0c,0x1c] +ee.vmax.s16.st.incp q5, a10, q6, q6, q7 +# CHECK: ee.vmax.s16.st.incp q5, a10, q6, q6, q7 # encoding: [0x3a,0xbd,0x9d,0x1c] +ee.vmax.s32 q3, q2, q7 +# CHECK: ee.vmax.s32 q3, q2, q7 # encoding: [0x34,0xfa,0x9e] +ee.vmax.s32.ld.incp q1, a3, q1, q1, q0 +# CHECK: ee.vmax.s32.ld.incp q1, a3, q1, q1, q0 # encoding: [0xe3,0x41,0x12,0x1c] +ee.vmax.s32.st.incp q3, a12, q4, q6, q3 +# CHECK: ee.vmax.s32.st.incp q3, a12, q4, q6, q3 # encoding: [0x0c,0x9b,0xa9,0x1c] +ee.vmax.s8 q4, q1, q6 +# CHECK: ee.vmax.s8 q4, q1, q6 # encoding: [0x44,0x71,0xae] +ee.vmax.s8.ld.incp q3, a10, q5, q1, q5 +# CHECK: ee.vmax.s8.ld.incp q3, a10, q5, q1, q5 # encoding: [0xfa,0x69,0x3a,0x1c] +ee.vmax.s8.st.incp q3, a9, q3, q6, q7 +# CHECK: ee.vmax.s8.st.incp q3, a9, q3, q6, q7 # encoding: [0x09,0xbb,0xb7,0x1c] +ee.vmin.s16 q6, q2, q5 +# CHECK: ee.vmin.s16 q6, q2, q5 # encoding: [0x54,0x6a,0xbe] +ee.vmin.s16.ld.incp q5, a3, q2, q4, q0 +# CHECK: ee.vmin.s16.ld.incp q5, a3, q2, q4, q0 # encoding: [0xe3,0x02,0x55,0x1c] +ee.vmin.s16.st.incp q4, a9, q4, q6, q0 +# CHECK: ee.vmin.s16.st.incp q4, a9, q4, q6, q0 # encoding: [0x19,0x84,0xa9,0x1c] +ee.vmin.s32 q1, q1, q6 +# CHECK: ee.vmin.s32 q1, q1, q6 # encoding: [0x64,0xf1,0x8e] +ee.vmin.s32.ld.incp q0, a1, q3, q2, q0 +# CHECK: ee.vmin.s32.ld.incp q0, a1, q3, q2, q0 # encoding: [0xe1,0x83,0x06,0x1c] +ee.vmin.s32.st.incp q0, a12, q4, q4, q3 +# CHECK: ee.vmin.s32.st.incp q0, a12, q4, q4, q3 # encoding: [0x1c,0x18,0xb9,0x1c] +ee.vmin.s8 q7, q6, q0 +# CHECK: ee.vmin.s8 q7, q6, q0 # encoding: [0x74,0xa6,0xbe] +ee.vmin.s8.ld.incp q2, a13, q7, q7, q3 +# CHECK: ee.vmin.s8.ld.incp q2, a13, q7, q7, q3 # encoding: [0xfd,0xda,0x2f,0x1c] +ee.vmin.s8.st.incp q2, a4, q4, q7, q1 +# CHECK: ee.vmin.s8.st.incp q2, a4, q4, q7, q1 # encoding: [0x24,0xca,0xa9,0x1c] +ee.vmulas.s16.accx q0, q7 +# CHECK: ee.vmulas.s16.accx q0, q7 # encoding: [0x84,0x58,0x1a] +ee.vmulas.s16.accx.ld.ip q7, a7, -16, q2, q0 +# CHECK: ee.vmulas.s16.accx.ld.ip q7, a7, -16, q2, q0 # encoding: [0x07,0x80,0xf0,0x1f] +ee.vmulas.s16.accx.ld.ip.qup q5, a14, 32, q0, q2, q0, q2 +# CHECK: ee.vmulas.s16.accx.ld.ip.qup q5, a14, 32, q0, q2, q0, q2 # encoding: [0x0e,0x10,0x54,0x01] +ee.vmulas.s16.accx.ld.xp q1, a0, a1, q2, q6 +# CHECK: ee.vmulas.s16.accx.ld.xp q1, a0, a1, q2, q6 # encoding: [0x10,0xb1,0x10,0x1e] +ee.vmulas.s16.accx.ld.xp.qup q4, a8, a10, q4, q0, q0, q3 +# CHECK: ee.vmulas.s16.accx.ld.xp.qup q4, a8, a10, q4, q0, q0, q3 # encoding: [0xa8,0x00,0x47,0x16] +ee.vmulas.s16.qacc q0, q6 +# CHECK: ee.vmulas.s16.qacc q0, q6 # encoding: [0x84,0x70,0x1a] +ee.vmulas.s16.qacc.ldbc.incp q2, a6, q3, q4 +# CHECK: ee.vmulas.s16.qacc.ldbc.incp q2, a6, q3, q4 # encoding: [0x64,0xc3,0x87] +ee.vmulas.s16.qacc.ldbc.incp.qup q0, a4, q1, q6, q4, q5 +# CHECK: ee.vmulas.s16.qacc.ldbc.incp.qup q0, a4, q1, q6, q4, q5 # encoding: [0x84,0x74,0x0a,0x1c] +ee.vmulas.s16.qacc.ld.ip q7, a7, -64, q7, q7 +# CHECK: ee.vmulas.s16.qacc.ld.ip q7, a7, -64, q7, q7 # encoding: [0x07,0xf8,0x73,0x1e] +ee.vmulas.s16.qacc.ld.ip.qup q0, a10, 48, q3, q6, q3, q6 +# CHECK: ee.vmulas.s16.qacc.ld.ip.qup q0, a10, 48, q3, q6, q3, q6 # encoding: [0x0a,0xf3,0x8c,0x03] +ee.vmulas.s16.qacc.ld.xp q3, a11, a4, q4, q5 +# CHECK: ee.vmulas.s16.qacc.ld.xp q3, a11, a4, q4, q5 # encoding: [0x4b,0x29,0x33,0x1e] +ee.vmulas.s16.qacc.ld.xp.qup q2, a9, a1, q3, q2, q1, q7 +# CHECK: ee.vmulas.s16.qacc.ld.xp.qup q2, a9, a1, q3, q2, q1, q7 # encoding: [0x19,0xd1,0xae,0x16] +ee.vmulas.s8.accx q1, q0 +# CHECK: ee.vmulas.s8.accx q1, q0 # encoding: [0xc4,0x01,0x1a] +ee.vmulas.s8.accx.ld.ip q2, a8, 80, q3, q0 +# CHECK: ee.vmulas.s8.accx.ld.ip q2, a8, 80, q3, q0 # encoding: [0x08,0xc0,0xa4,0x1e] +ee.vmulas.s8.accx.ld.ip.qup q2, a9, -80, q1, q2, q6, q3 +# CHECK: ee.vmulas.s8.accx.ld.ip.qup q2, a9, -80, q1, q2, q6, q3 # encoding: [0x09,0x56,0xa6,0x05] +ee.vmulas.s8.accx.ld.xp q3, a3, a4, q4, q7 +# CHECK: ee.vmulas.s8.accx.ld.xp q3, a3, a4, q4, q7 # encoding: [0x43,0x39,0x35,0x1e] +ee.vmulas.s8.accx.ld.xp.qup q0, a3, a1, q4, q5, q3, q3 +# CHECK: ee.vmulas.s8.accx.ld.xp.qup q0, a3, a1, q4, q5, q3, q3 # encoding: [0x13,0x2b,0x07,0x17] +ee.vmulas.s8.qacc q5, q7 +# CHECK: ee.vmulas.s8.qacc q5, q7 # encoding: [0xc4,0x7d,0x1a] +ee.vmulas.s8.qacc.ldbc.incp q7, a1, q6, q1 +# CHECK: ee.vmulas.s8.qacc.ldbc.incp q7, a1, q6, q1 # encoding: [0x14,0xae,0xb7] +ee.vmulas.s8.qacc.ldbc.incp.qup q3, a11, q4, q6, q5, q6 +# CHECK: ee.vmulas.s8.qacc.ldbc.incp.qup q3, a11, q4, q6, q5, q6 # encoding: [0x9b,0x35,0x3d,0x1c] +ee.vmulas.s8.qacc.ld.ip q5, a10, -16, q0, q0 +# CHECK: ee.vmulas.s8.qacc.ld.ip q5, a10, -16, q0, q0 # encoding: [0x0a,0x00,0xd6,0x1f] +ee.vmulas.s8.qacc.ld.ip.qup q7, a9, -48, q6, q2, q1, q2 +# CHECK: ee.vmulas.s8.qacc.ld.ip.qup q7, a9, -48, q6, q2, q1, q2 # encoding: [0x09,0x91,0xf5,0x06] +ee.vmulas.s8.qacc.ld.xp q1, a1, a12, q5, q0 +# CHECK: ee.vmulas.s8.qacc.ld.xp q1, a1, a12, q5, q0 # encoding: [0xc1,0x41,0x17,0x1e] +ee.vmulas.s8.qacc.ld.xp.qup q0, a1, a14, q1, q6, q2, q4 +# CHECK: ee.vmulas.s8.qacc.ld.xp.qup q0, a1, a14, q1, q6, q2, q4 # encoding: [0xe1,0x72,0x88,0x17] +ee.vmulas.u16.accx q7, q1 +# CHECK: ee.vmulas.u16.accx q7, q1 # encoding: [0x84,0x0f,0x0a] +ee.vmulas.u16.accx.ld.ip q5, a8, -32, q1, q4 +# CHECK: ee.vmulas.u16.accx.ld.ip q5, a8, -32, q1, q4 # encoding: [0x08,0x60,0x58,0x1f] +ee.vmulas.u16.accx.ld.ip.qup q1, a0, 48, q7, q4, q4, q0 +# CHECK: ee.vmulas.u16.accx.ld.ip.qup q1, a0, 48, q7, q4, q4, q0 # encoding: [0x00,0xe4,0x91,0x09] +ee.vmulas.u16.accx.ld.xp q3, a14, a4, q5, q4 +# CHECK: ee.vmulas.u16.accx.ld.xp q3, a14, a4, q5, q4 # encoding: [0x4e,0x61,0x39,0x1e] +ee.vmulas.u16.accx.ld.xp.qup q4, a3, a7, q6, q2, q4, q4 +# CHECK: ee.vmulas.u16.accx.ld.xp.qup q4, a3, a7, q6, q2, q4, q4 # encoding: [0x73,0x94,0x49,0x18] +ee.vmulas.u16.qacc q5, q5 +# CHECK: ee.vmulas.u16.qacc q5, q5 # encoding: [0x84,0x6d,0x0a] +ee.vmulas.u16.qacc.ldbc.incp q6, a7, q0, q3 +# CHECK: ee.vmulas.u16.qacc.ldbc.incp q6, a7, q0, q3 # encoding: [0x74,0x98,0xd7] +ee.vmulas.u16.qacc.ldbc.incp.qup q0, a12, q6, q3, q2, q0 +# CHECK: ee.vmulas.u16.qacc.ldbc.incp.qup q0, a12, q6, q3, q2, q0 # encoding: [0xac,0x9a,0x01,0x1c] +ee.vmulas.u16.qacc.ld.ip q4, a10, 16, q3, q2 +# CHECK: ee.vmulas.u16.qacc.ld.ip q4, a10, 16, q3, q2 # encoding: [0x0a,0xd0,0xca,0x1e] +ee.vmulas.u16.qacc.ld.ip.qup q2, a4, 0, q5, q4, q2, q6 +# CHECK: ee.vmulas.u16.qacc.ld.ip.qup q2, a4, 0, q5, q4, q2, q6 # encoding: [0x04,0x62,0x2d,0x0a] +ee.vmulas.u16.qacc.ld.xp q6, a14, a2, q4, q0 +# CHECK: ee.vmulas.u16.qacc.ld.xp q6, a14, a2, q4, q0 # encoding: [0x2e,0x01,0x6b,0x1e] +ee.vmulas.u16.qacc.ld.xp.qup q6, a12, a11, q6, q7, q4, q1 +# CHECK: ee.vmulas.u16.qacc.ld.xp.qup q6, a12, a11, q6, q7, q4, q1 # encoding: [0xbc,0xbc,0xe3,0x18] +ee.vmulas.u8.accx q2, q1 +# CHECK: ee.vmulas.u8.accx q2, q1 # encoding: [0xc4,0x0a,0x0a] +ee.vmulas.u8.accx.ld.ip q6, a3, -112, q2, q7 +# CHECK: ee.vmulas.u8.accx.ld.ip q6, a3, -112, q2, q7 # encoding: [0x03,0xb8,0xec,0x1e] +ee.vmulas.u8.accx.ld.ip.qup q7, a3, -32, q3, q3, q7, q5 +# CHECK: ee.vmulas.u8.accx.ld.ip.qup q7, a3, -32, q3, q3, q7, q5 # encoding: [0x03,0xdf,0x7a,0x0d] +ee.vmulas.u8.accx.ld.xp q4, a4, a9, q4, q0 +# CHECK: ee.vmulas.u8.accx.ld.xp q4, a4, a9, q4, q0 # encoding: [0x94,0x01,0x4d,0x1e] +ee.vmulas.u8.accx.ld.xp.qup q5, a7, a13, q4, q7, q2, q6 +# CHECK: ee.vmulas.u8.accx.ld.xp.qup q5, a7, a13, q4, q7, q2, q6 # encoding: [0xd7,0x3a,0x5d,0x19] +ee.vmulas.u8.qacc q3, q6 +# CHECK: ee.vmulas.u8.qacc q3, q6 # encoding: [0xc4,0x73,0x0a] +ee.vmulas.u8.qacc.ldbc.incp q4, a1, q0, q5 +# CHECK: ee.vmulas.u8.qacc.ldbc.incp q4, a1, q0, q5 # encoding: [0x14,0x48,0xf7] +ee.vmulas.u8.qacc.ldbc.incp.qup q2, a1, q5, q7, q6, q4 +# CHECK: ee.vmulas.u8.qacc.ldbc.incp.qup q2, a1, q5, q7, q6, q4 # encoding: [0xb1,0x7e,0x29,0x1c] +ee.vmulas.u8.qacc.ld.ip q2, a12, 32, q1, q4 +# CHECK: ee.vmulas.u8.qacc.ld.ip q2, a12, 32, q1, q4 # encoding: [0x0c,0x60,0x2e,0x1f] +ee.vmulas.u8.qacc.ld.ip.qup q0, a6, 48, q0, q0, q6, q0 +# CHECK: ee.vmulas.u8.qacc.ld.ip.qup q0, a6, 48, q0, q0, q6, q0 # encoding: [0x06,0x06,0x80,0x0f] +ee.vmulas.u8.qacc.ld.xp q6, a1, a1, q2, q5 +# CHECK: ee.vmulas.u8.qacc.ld.xp q6, a1, a1, q2, q5 # encoding: [0x11,0xa9,0x6e,0x1e] +ee.vmulas.u8.qacc.ld.xp.qup q1, a8, a10, q3, q7, q1, q3 +# CHECK: ee.vmulas.u8.qacc.ld.xp.qup q1, a8, a10, q3, q7, q1, q3 # encoding: [0xa8,0xf9,0x96,0x19] +ee.vmul.s16 q0, q4, q1 +# CHECK: ee.vmul.s16 q0, q4, q1 # encoding: [0x84,0x2c,0x8e] +ee.vmul.s16.ld.incp q4, a5, q1, q5, q5 +# CHECK: ee.vmul.s16.ld.incp q4, a5, q1, q5, q5 # encoding: [0xf5,0x6b,0x43,0x1c] +ee.vmul.s16.st.incp q4, a4, q2, q5, q0 +# CHECK: ee.vmul.s16.st.incp q4, a4, q2, q5, q0 # encoding: [0x24,0x44,0xb5,0x1c] +ee.vmul.s8 q5, q3, q2 +# CHECK: ee.vmul.s8 q5, q3, q2 # encoding: [0x94,0xb3,0xae] +ee.vmul.s8.ld.incp q6, a11, q3, q6, q4 +# CHECK: ee.vmul.s8.ld.incp q6, a11, q3, q6, q4 # encoding: [0xcb,0xa4,0x67,0x1c] +ee.vmul.s8.st.incp q5, a5, q5, q2, q4 +# CHECK: ee.vmul.s8.st.incp q5, a5, q5, q2, q4 # encoding: [0x35,0xa5,0xaa,0x1c] +ee.vmul.u16 q0, q0, q5 +# CHECK: ee.vmul.u16 q0, q0, q5 # encoding: [0xa4,0x68,0x8e] +ee.vmul.u16.ld.incp q4, a2, q0, q1, q1 +# CHECK: ee.vmul.u16.ld.incp q4, a2, q0, q1, q1 # encoding: [0xc2,0x4d,0x40,0x1c] +ee.vmul.u16.st.incp q6, a5, q1, q2, q7 +# CHECK: ee.vmul.u16.st.incp q6, a5, q1, q2, q7 # encoding: [0x35,0xbe,0xb2,0x1c] +ee.vmul.u8 q6, q4, q5 +# CHECK: ee.vmul.u8 q6, q4, q5 # encoding: [0xb4,0x6c,0xbe] +ee.vmul.u8.ld.incp q1, a5, q4, q1, q1 +# CHECK: ee.vmul.u8.ld.incp q1, a5, q4, q1, q1 # encoding: [0xc5,0x4e,0x18,0x1c] +ee.vmul.u8.st.incp q4, a12, q5, q0, q4 +# CHECK: ee.vmul.u8.st.incp q4, a12, q5, q0, q4 # encoding: [0x0c,0x24,0x1a,0x1d] +ee.vprelu.s16 q2, q7, q0, a1 +# CHECK: ee.vprelu.s16 q2, q7, q0, a1 # encoding: [0x14,0x07,0x9c] +ee.vprelu.s8 q5, q6, q5, a13 +# CHECK: ee.vprelu.s8 q5, q6, q5, a13 # encoding: [0xd4,0xee,0xac] +ee.vrelu.s16 q2, a14, a5 +# CHECK: ee.vrelu.s16 q2, a14, a5 # encoding: [0x54,0x1e,0xdd] +ee.vrelu.s8 q4, a14, a1 +# CHECK: ee.vrelu.s8 q4, a14, a1 # encoding: [0x14,0x5e,0xed] +ee.vsl.32 q0, q1 +# CHECK: ee.vsl.32 q0, q1 # encoding: [0x04,0xbf,0xcd] +ee.vsmulas.s16.qacc q2, q7, 2 +# CHECK: ee.vsmulas.s16.qacc q2, q7, 2 # encoding: [0xc4,0x7a,0x9e] +ee.vsmulas.s16.qacc.ld.incp q7, a3, q3, q4, 3 +# CHECK: ee.vsmulas.s16.qacc.ld.incp q7, a3, q3, q4, 3 # encoding: [0xc3,0xe7,0x76,0x1c] +ee.vsmulas.s8.qacc q3, q6, 3 +# CHECK: ee.vsmulas.s8.qacc q3, q6, 3 # encoding: [0x54,0xd3,0x8e] +ee.vsmulas.s8.qacc.ld.incp q1, a8, q1, q1, 4 +# CHECK: ee.vsmulas.s8.qacc.ld.incp q1, a8, q1, q1, 4 # encoding: [0xc8,0x4a,0x14,0x1c] +ee.vsr.32 q4, q3 +# CHECK: ee.vsr.32 q4, q3 # encoding: [0xc4,0xbf,0xdd] +ee.vst.128.ip q3, a6, -816 +# CHECK: ee.vst.128.ip q3, a6, -816 # encoding: [0x64,0xd0,0xda] +ee.vst.128.xp q6, a12, a14 +# CHECK: ee.vst.128.xp q6, a12, a14 # encoding: [0xc4,0x7e,0xbd] +ee.vst.h.64.ip q2, a5, 40 +# CHECK: ee.vst.h.64.ip q2, a5, 40 # encoding: [0x54,0x28,0x9b] +ee.vst.h.64.xp q2, a13, a6 +# CHECK: ee.vst.h.64.xp q2, a13, a6 # encoding: [0xd4,0x06,0xdd] +ee.vst.l.64.ip q5, a8, 16 +# CHECK: ee.vst.l.64.ip q5, a8, 16 # encoding: [0x84,0x90,0xa4] +ee.vst.l.64.xp q0, a13, a6 +# CHECK: ee.vst.l.64.xp q0, a13, a6 # encoding: [0xd4,0x46,0xcd] +ee.vsubs.s16 q5, q1, q4 +# CHECK: ee.vsubs.s16 q5, q1, q4 # encoding: [0xd4,0xe1,0xae] +ee.vsubs.s16.ld.incp q1, a4, q6, q0, q1 +# CHECK: ee.vsubs.s16.ld.incp q1, a4, q6, q0, q1 # encoding: [0xd4,0x0c,0x1c,0x1c] +ee.vsubs.s16.st.incp q7, a13, q7, q5, q2 +# CHECK: ee.vsubs.s16.st.incp q7, a13, q7, q5, q2 # encoding: [0x1d,0x57,0x1f,0x1d] +ee.vsubs.s32 q2, q7, q6 +# CHECK: ee.vsubs.s32 q2, q7, q6 # encoding: [0xe4,0x77,0x9e] +ee.vsubs.s32.ld.incp q1, a8, q1, q4, q0 +# CHECK: ee.vsubs.s32.ld.incp q1, a8, q1, q4, q0 # encoding: [0xd8,0x05,0x13,0x1c] +ee.vsubs.s32.st.incp q1, a5, q7, q4, q0 +# CHECK: ee.vsubs.s32.st.incp q1, a5, q7, q4, q0 # encoding: [0x25,0x01,0x1f,0x1d] +ee.vsubs.s8 q7, q1, q5 +# CHECK: ee.vsubs.s8 q7, q1, q5 # encoding: [0xf4,0xe9,0xbe] +ee.vsubs.s8.ld.incp q4, a2, q6, q1, q6 +# CHECK: ee.vsubs.s8.ld.incp q4, a2, q6, q1, q6 # encoding: [0xd2,0x76,0x4c,0x1c] +ee.vsubs.s8.st.incp q6, a1, q6, q2, q3 +# CHECK: ee.vsubs.s8.st.incp q6, a1, q6, q2, q3 # encoding: [0x31,0x9e,0x1c,0x1d] +ee.vunzip.16 q6, q5 +# CHECK: ee.vunzip.16 q6, q5 # encoding: [0x84,0xe3,0xec] +ee.vunzip.32 q0, q6 +# CHECK: ee.vunzip.32 q0, q6 # encoding: [0x94,0x03,0xfc] +ee.vunzip.8 q5, q1 +# CHECK: ee.vunzip.8 q5, q1 # encoding: [0xa4,0xd3,0xcc] +ee.vzip.16 q2, q0 +# CHECK: ee.vzip.16 q2, q0 # encoding: [0xb4,0x23,0xcc] +ee.vzip.32 q0, q3 +# CHECK: ee.vzip.32 q0, q3 # encoding: [0xc4,0x83,0xdc] +ee.vzip.8 q4, q5 +# CHECK: ee.vzip.8 q4, q5 # encoding: [0xd4,0xc3,0xec] +ee.xorq q1, q3, q4 +# CHECK: ee.xorq q1, q3, q4 # encoding: [0x54,0xb9,0xcd] +ee.zero.accx +# CHECK: ee.zero.accx # encoding: [0x04,0x08,0x25] +ee.zero.q q0 +# CHECK: ee.zero.q q0 # encoding: [0xa4,0x7f,0xcd] +ee.zero.qacc +# CHECK: ee.zero.qacc # encoding: [0x44,0x08,0x25] + +rur.accx_0 a11 +# CHECK: rur a11, accx # encoding: [0xd0,0xbe,0xe3] +rur.accx_1 a11 +# CHECK: rur a11, accx # encoding: [0xd0,0xbe,0xe3] +rur.fft_bit_width a11 +# CHECK: rur a11, fft_bit_width # encoding: [0xf0,0xbe,0xe3] +rur.gpio_out a3 +# CHECK: rur a3, gpio_out # encoding: [0x00,0x30,0xe3] +rur.qacc_h_0 a1 +# CHECK: rur a1, qacc # encoding: [0xe0,0x1e,0xe3] +rur.qacc_h_1 a10 +# CHECK: rur a10, qacc # encoding: [0xe0,0xae,0xe3] +rur.qacc_h_2 a2 +# CHECK: rur a2, qacc # encoding: [0xe0,0x2e,0xe3] +rur.qacc_h_3 a11 +# CHECK: rur a11, qacc # encoding: [0xe0,0xbe,0xe3] +rur.qacc_h_4 a13 +# CHECK: rur a13, qacc # encoding: [0xe0,0xde,0xe3] +rur.qacc_l_0 a8 +# CHECK: rur a8, qacc # encoding: [0xe0,0x8e,0xe3] +rur.qacc_l_1 a7 +# CHECK: rur a7, qacc # encoding: [0xe0,0x7e,0xe3] +rur.qacc_l_2 a2 +# CHECK: rur a2, qacc # encoding: [0xe0,0x2e,0xe3] +rur.qacc_l_3 a13 +# CHECK: rur a13, qacc # encoding: [0xe0,0xde,0xe3] +rur.qacc_l_4 a7 +# CHECK: rur a7, qacc # encoding: [0xe0,0x7e,0xe3] +rur.sar_byte a9 +# CHECK: rur a9, sar_byte # encoding: [0x00,0x9f,0xe3] +rur.ua_state_0 a12 +# CHECK: rur a12, ua_state # encoding: [0x10,0xcf,0xe3] +rur.ua_state_1 a2 +# CHECK: rur a2, ua_state # encoding: [0x10,0x2f,0xe3] +rur.ua_state_2 a5 +# CHECK: rur a5, ua_state # encoding: [0x10,0x5f,0xe3] +rur.ua_state_3 a3 +# CHECK: rur a3, ua_state # encoding: [0x10,0x3f,0xe3] +wur.accx_0 a6 +# CHECK: wur a6, accx # encoding: [0x60,0xed,0xf3] +wur.accx_1 a6 +# CHECK: wur a6, accx # encoding: [0x60,0xed,0xf3] +wur.fft_bit_width a13 +# CHECK: wur a13, fft_bit_width # encoding: [0xd0,0xef,0xf3] +wur.gpio_out a0 +# CHECK: wur a0, gpio_out # encoding: [0x00,0x00,0xf3] +wur.qacc_h_0 a12 +# CHECK: wur a12, qacc # encoding: [0xc0,0xee,0xf3] +wur.qacc_h_1 a1 +# CHECK: wur a1, qacc # encoding: [0x10,0xee,0xf3] +wur.qacc_h_2 a2 +# CHECK: wur a2, qacc # encoding: [0x20,0xee,0xf3] +wur.qacc_h_3 a12 +# CHECK: wur a12, qacc # encoding: [0xc0,0xee,0xf3] +wur.qacc_h_4 a14 +# CHECK: wur a14, qacc # encoding: [0xe0,0xee,0xf3] +wur.qacc_l_0 a6 +# CHECK: wur a6, qacc # encoding: [0x60,0xee,0xf3] +wur.qacc_l_1 a5 +# CHECK: wur a5, qacc # encoding: [0x50,0xee,0xf3] +wur.qacc_l_2 a6 +# CHECK: wur a6, qacc # encoding: [0x60,0xee,0xf3] +wur.qacc_l_3 a6 +# CHECK: wur a6, qacc # encoding: [0x60,0xee,0xf3] +wur.qacc_l_4 a7 +# CHECK: wur a7, qacc # encoding: [0x70,0xee,0xf3] +wur.sar_byte a9 +# CHECK: wur a9, sar_byte # encoding: [0x90,0xf0,0xf3] +wur.ua_state_0 a8 +# CHECK: wur a8, ua_state # encoding: [0x80,0xf1,0xf3] +wur.ua_state_1 a14 +# CHECK: wur a14, ua_state # encoding: [0xe0,0xf1,0xf3] +wur.ua_state_2 a9 +# CHECK: wur a9, ua_state # encoding: [0x90,0xf1,0xf3] +wur.ua_state_3 a10 +# CHECK: wur a10, ua_state # encoding: [0xa0,0xf1,0xf3] +mv.qr q4, q7 +# CHECK: mv.qr q4, q7 # encoding: [0x24,0x0c,0xaf] From 902086bc44bb95391ddc65822f9555c35fdc679f Mon Sep 17 00:00:00 2001 From: Stefan Stipanovic Date: Tue, 8 Aug 2023 19:29:45 +0200 Subject: [PATCH 150/289] [Xtensa][esp32s3] Fix encoding for immediates with step increment --- .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 14 +-- llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s | 90 +++++++++---------- 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 1e960cdce26fd..02d4d91bf1af7 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -711,7 +711,7 @@ XtensaMCCodeEmitter::getOffset_16_16OpValue(const MCInst &MI, unsigned OpNo, assert(((Res >= -128) && (Res <= 112) && ((Res & 0xf) == 0)) && "Unexpected operand value!"); - return Res; + return Res / 16; } int16_t @@ -724,7 +724,7 @@ XtensaMCCodeEmitter::getOffset_256_8OpValue(const MCInst &MI, unsigned OpNo, assert(((Res >= -1024) && (Res <= 1016) && ((Res & 0x7) == 0)) && "Unexpected operand value!"); - return Res; + return Res / 8; } int16_t @@ -736,8 +736,8 @@ XtensaMCCodeEmitter::getOffset_256_16OpValue(const MCInst &MI, unsigned OpNo, assert(((Res >= -2048) && (Res <= 2032) && ((Res & 0xf) == 0)) && "Unexpected operand value!"); - - return Res; + + return Res / 16; } int16_t @@ -750,7 +750,7 @@ XtensaMCCodeEmitter::getOffset_256_4OpValue(const MCInst &MI, unsigned OpNo, assert(((Res >= -512) && (Res <= 508) && ((Res & 0x3) == 0)) && "Unexpected operand value!"); - return Res; + return Res / 4; } uint8_t @@ -763,7 +763,7 @@ XtensaMCCodeEmitter::getOffset_128_2OpValue(const MCInst &MI, unsigned OpNo, assert(((Res >= 0) && (Res <= 254) && ((Res & 0x1) == 0)) && "Unexpected operand value!"); - return Res; + return Res / 2; } uint8_t @@ -788,7 +788,7 @@ XtensaMCCodeEmitter::getOffset_64_16OpValue(const MCInst &MI, unsigned OpNo, assert(((Res >= -512) && (Res <= 496) && ((Res & 0xf) == 0)) && "Unexpected operand value!"); - return Res; + return Res / 16; } #include "XtensaGenMCCodeEmitter.inc" diff --git a/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s b/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s index ee37f4bca7b1b..c5ed22386ec0c 100644 --- a/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s +++ b/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s @@ -49,47 +49,47 @@ ee.fft.r2bf.s16.st.incp q7, q3, q7, a2, 2 ee.fft.vst.r32.decp q3, a14, 0 # CHECK: ee.fft.vst.r32.decp q3, a14, 0 # encoding: [0xe4,0xb3,0xdd] ee.ldf.128.ip f3, f5, f8, f0, a13, 64 -# CHECK: ee.ldf.128.ip f3, f5, f8, f0, a13, 64 # encoding: [0x0d,0x80,0x35,0x10] +# CHECK: ee.ldf.128.ip f3, f5, f8, f0, a13, 64 # encoding: [0x4d,0x80,0x35,0x10] ee.ldf.128.xp f5, f2, f4, f4, a7, a8 # CHECK: ee.ldf.128.xp f5, f2, f4, f4, a7, a8 # encoding: [0x87,0x44,0x52,0x11] ee.ldf.64.ip f6, f5, a1, 488 -# CHECK: ee.ldf.64.ip f6, f5, a1, 488 # encoding: [0x41,0x65,0x74,0x1c] +# CHECK: ee.ldf.64.ip f6, f5, a1, 488 # encoding: [0x51,0x65,0x1e,0x1c] ee.ldf.64.xp f0, f6, a3, a8 # CHECK: ee.ldf.64.xp f0, f6, a3, a8 # encoding: [0x30,0x08,0x66] ee.ldqa.s16.128.ip a11, 1904 -# CHECK: ee.ldqa.s16.128.ip a11, 1904 # encoding: [0xb4,0x70,0x01] +# CHECK: ee.ldqa.s16.128.ip a11, 1904 # encoding: [0xb4,0x77,0x01] ee.ldqa.s16.128.xp a6, a2 # CHECK: ee.ldqa.s16.128.xp a6, a2 # encoding: [0x64,0x42,0x7e] ee.ldqa.s8.128.ip a8, 320 -# CHECK: ee.ldqa.s8.128.ip a8, 320 # encoding: [0x84,0x40,0x11] +# CHECK: ee.ldqa.s8.128.ip a8, 320 # encoding: [0x84,0x14,0x11] ee.ldqa.s8.128.xp a6, a11 # CHECK: ee.ldqa.s8.128.xp a6, a11 # encoding: [0x64,0x4b,0x71] ee.ldqa.u16.128.ip a2, -1424 -# CHECK: ee.ldqa.u16.128.ip a2, -1424 # encoding: [0x24,0x70,0x05] +# CHECK: ee.ldqa.u16.128.ip a2, -1424 # encoding: [0x24,0x27,0x45] ee.ldqa.u16.128.xp a3, a4 # CHECK: ee.ldqa.u16.128.xp a3, a4 # encoding: [0x34,0x44,0x7a] ee.ldqa.u8.128.ip a4, 784 -# CHECK: ee.ldqa.u8.128.ip a4, 784 # encoding: [0x44,0x10,0x15] +# CHECK: ee.ldqa.u8.128.ip a4, 784 # encoding: [0x44,0x31,0x15] ee.ldqa.u8.128.xp a4, a9 # CHECK: ee.ldqa.u8.128.xp a4, a9 # encoding: [0x44,0x49,0x70] ee.ldxq.32 q2, q6, a11, 2, 1 # CHECK: ee.ldxq.32 q2, q6, a11, 2, 1 # encoding: [0xdb,0x8f,0x25,0x1c] ee.ld.128.usar.ip q4, a8, -592 -# CHECK: ee.ld.128.usar.ip q4, a8, -592 # encoding: [0x84,0x30,0xe1] +# CHECK: ee.ld.128.usar.ip q4, a8, -592 # encoding: [0x84,0x5b,0xe1] ee.ld.128.usar.xp q1, a9, a7 # CHECK: ee.ld.128.usar.xp q1, a9, a7 # encoding: [0x94,0x87,0x8d] ee.ld.accx.ip a2, 720 -# CHECK: ee.ld.accx.ip a2, 720 # encoding: [0x24,0x50,0x4e] +# CHECK: ee.ld.accx.ip a2, 720 # encoding: [0x24,0x5a,0x0e] ee.ld.qacc_h.h.32.ip a6, -292 -# CHECK: ee.ld.qacc_h.h.32.ip a6, -292 # encoding: [0x64,0x5c,0x5e] +# CHECK: ee.ld.qacc_h.h.32.ip a6, -292 # encoding: [0x64,0x37,0x5e] ee.ld.qacc_h.l.128.ip a14, 96 -# CHECK: ee.ld.qacc_h.l.128.ip a14, 96 # encoding: [0xe4,0x60,0x06] +# CHECK: ee.ld.qacc_h.l.128.ip a14, 96 # encoding: [0xe4,0x06,0x06] ee.ld.qacc_l.h.32.ip a0, -184 -# CHECK: ee.ld.qacc_l.h.32.ip a0, -184 # encoding: [0x04,0x48,0x16] +# CHECK: ee.ld.qacc_l.h.32.ip a0, -184 # encoding: [0x04,0x52,0x56] ee.ld.qacc_l.l.128.ip a5, -352 -# CHECK: ee.ld.qacc_l.l.128.ip a5, -352 # encoding: [0x54,0x20,0x40] +# CHECK: ee.ld.qacc_l.l.128.ip a5, -352 # encoding: [0x54,0x6a,0x40] ee.ld.ua_state.ip a3, 864 -# CHECK: ee.ld.ua_state.ip a3, 864 # encoding: [0x34,0x60,0x10] +# CHECK: ee.ld.ua_state.ip a3, 864 # encoding: [0x34,0x36,0x10] ee.movi.32.a q7, a0, 1 # CHECK: ee.movi.32.a q7, a0, 1 # encoding: [0x04,0xf5,0xfd] ee.movi.32.q q5, a5, 3 @@ -123,7 +123,7 @@ ee.srcxxp.2q q6, q0, a2, a14 ee.src.q q6, q7, q5 # CHECK: ee.src.q q6, q7, q5 # encoding: [0x64,0xf3,0xec] ee.src.q.ld.ip q2, a2, 1792, q6, q7 -# CHECK: ee.src.q.ld.ip q2, a2, 1792, q6, q7 # encoding: [0x02,0x87,0x05,0x1c] +# CHECK: ee.src.q.ld.ip q2, a2, 1792, q6, q7 # encoding: [0x02,0xa7,0x35,0x1c] ee.src.q.ld.xp q2, a4, a9, q1, q7 # CHECK: ee.src.q.ld.xp q2, a4, a9, q1, q7 # encoding: [0x94,0x47,0x04,0x1d] ee.src.q.qup q4, q3, q7 @@ -131,27 +131,27 @@ ee.src.q.qup q4, q3, q7 ee.srs.accx a12, a1, 0 # CHECK: ee.srs.accx a12, a1, 0 # encoding: [0x14,0x1c,0x7e] ee.stf.128.ip f4, f3, f8, f2, a4, -128 -# CHECK: ee.stf.128.ip f4, f3, f8, f2, a4, -128 # encoding: [0x04,0x82,0x43,0x12] +# CHECK: ee.stf.128.ip f4, f3, f8, f2, a4, -128 # encoding: [0x84,0x82,0x43,0x12] ee.stf.128.xp f2, f0, f5, f8, a11, a5 # CHECK: ee.stf.128.xp f2, f0, f5, f8, a11, a5 # encoding: [0x5b,0x58,0x20,0x13] ee.stf.64.ip f3, f6, a10, -848 -# CHECK: ee.stf.64.ip f3, f6, a10, -848 # encoding: [0x6a,0x36,0x58,0x1c] +# CHECK: ee.stf.64.ip f3, f6, a10, -848 # encoding: [0x6a,0x36,0x4b,0x1c] ee.stf.64.xp f2, f1, a1, a14 # CHECK: ee.stf.64.xp f2, f1, a1, a14 # encoding: [0x10,0x2e,0x17] ee.stxq.32 q5, q2, a5, 0, 1 # CHECK: ee.stxq.32 q5, q2, a5, 0, 1 # encoding: [0x05,0x8d,0xc0,0x1c] ee.st.accx.ip a10, 24 -# CHECK: ee.st.accx.ip a10, 24 # encoding: [0xa4,0x18,0x02] +# CHECK: ee.st.accx.ip a10, 24 # encoding: [0xa4,0x03,0x02] ee.st.qacc_h.h.32.ip a14, 380 -# CHECK: ee.st.qacc_h.h.32.ip a14, 380 # encoding: [0xe4,0x7c,0x12] +# CHECK: ee.st.qacc_h.h.32.ip a14, 380 # encoding: [0xe4,0x5f,0x12] ee.st.qacc_h.l.128.ip a7, -624 -# CHECK: ee.st.qacc_h.l.128.ip a7, -624 # encoding: [0x74,0x10,0x4d] +# CHECK: ee.st.qacc_h.l.128.ip a7, -624 # encoding: [0x74,0x59,0x4d] ee.st.qacc_l.h.32.ip a10, -20 -# CHECK: ee.st.qacc_l.h.32.ip a10, -20 # encoding: [0xa4,0x6c,0x5d] +# CHECK: ee.st.qacc_l.h.32.ip a10, -20 # encoding: [0xa4,0x7b,0x5d] ee.st.qacc_l.l.128.ip a4, 1936 -# CHECK: ee.st.qacc_l.l.128.ip a4, 1936 # encoding: [0x44,0x10,0x4c] +# CHECK: ee.st.qacc_l.l.128.ip a4, 1936 # encoding: [0x44,0x79,0x0c] ee.st.ua_state.ip a4, -1728 -# CHECK: ee.st.ua_state.ip a4, -1728 # encoding: [0x44,0x40,0x1c] +# CHECK: ee.st.ua_state.ip a4, -1728 # encoding: [0x44,0x14,0x5c] ee.vadds.s16 q5, q1, q4 # CHECK: ee.vadds.s16 q5, q1, q4 # encoding: [0x64,0xc1,0xae] ee.vadds.s16.ld.incp q6, a6, q1, q3, q1 @@ -191,13 +191,13 @@ ee.vcmp.lt.s8 q7, q1, q6 ee.vldbc.16 q6, a11 # CHECK: ee.vldbc.16 q6, a11 # encoding: [0xb4,0x73,0xfd] ee.vldbc.16.ip q6, a4, 124 -# CHECK: ee.vldbc.16.ip q6, a4, 124 # encoding: [0x44,0x7c,0xb5] +# CHECK: ee.vldbc.16.ip q6, a4, 124 # encoding: [0x44,0x3e,0xb5] ee.vldbc.16.xp q2, a0, a7 # CHECK: ee.vldbc.16.xp q2, a0, a7 # encoding: [0x04,0x47,0x9d] ee.vldbc.32 q4, a0 # CHECK: ee.vldbc.32 q4, a0 # encoding: [0x04,0x77,0xed] ee.vldbc.32.ip q6, a12, 308 -# CHECK: ee.vldbc.32.ip q6, a12, 308 # encoding: [0xc4,0x34,0xb2] +# CHECK: ee.vldbc.32.ip q6, a12, 308 # encoding: [0xc4,0x4d,0xb2] ee.vldbc.32.xp q1, a11, a0 # CHECK: ee.vldbc.32.xp q1, a11, a0 # encoding: [0xb4,0x90,0x8d] ee.vldbc.8 q2, a3 @@ -209,15 +209,15 @@ ee.vldbc.8.xp q2, a0, a13 ee.vldhbc.16.incp q5, q5, a14 # CHECK: ee.vldhbc.16.incp q5, q5, a14 # encoding: [0xe4,0xd2,0xec] ee.vld.128.ip q3, a14, 1248 -# CHECK: ee.vld.128.ip q3, a14, 1248 # encoding: [0xe4,0xe0,0xd3] +# CHECK: ee.vld.128.ip q3, a14, 1248 # encoding: [0xe4,0xce,0x93] ee.vld.128.xp q5, a10, a12 # CHECK: ee.vld.128.xp q5, a10, a12 # encoding: [0xa4,0xac,0xad] ee.vld.h.64.ip q4, a14, 240 -# CHECK: ee.vld.h.64.ip q4, a14, 240 # encoding: [0xe4,0x70,0xe8] +# CHECK: ee.vld.h.64.ip q4, a14, 240 # encoding: [0xe4,0x1e,0xa8] ee.vld.h.64.xp q7, a4, a8 # CHECK: ee.vld.h.64.xp q7, a4, a8 # encoding: [0x44,0xe8,0xbd] ee.vld.l.64.ip q1, a8, 8 -# CHECK: ee.vld.l.64.ip q1, a8, 8 # encoding: [0x84,0x88,0x89] +# CHECK: ee.vld.l.64.ip q1, a8, 8 # encoding: [0x84,0x81,0x89] ee.vld.l.64.xp q1, a2, a9 # CHECK: ee.vld.l.64.xp q1, a2, a9 # encoding: [0x24,0xb9,0x8d] ee.vmax.s16 q2, q5, q6 @@ -259,9 +259,9 @@ ee.vmin.s8.st.incp q2, a4, q4, q7, q1 ee.vmulas.s16.accx q0, q7 # CHECK: ee.vmulas.s16.accx q0, q7 # encoding: [0x84,0x58,0x1a] ee.vmulas.s16.accx.ld.ip q7, a7, -16, q2, q0 -# CHECK: ee.vmulas.s16.accx.ld.ip q7, a7, -16, q2, q0 # encoding: [0x07,0x80,0xf0,0x1f] +# CHECK: ee.vmulas.s16.accx.ld.ip q7, a7, -16, q2, q0 # encoding: [0xf7,0x80,0xf0,0x1f] ee.vmulas.s16.accx.ld.ip.qup q5, a14, 32, q0, q2, q0, q2 -# CHECK: ee.vmulas.s16.accx.ld.ip.qup q5, a14, 32, q0, q2, q0, q2 # encoding: [0x0e,0x10,0x54,0x01] +# CHECK: ee.vmulas.s16.accx.ld.ip.qup q5, a14, 32, q0, q2, q0, q2 # encoding: [0x2e,0x10,0x54,0x00] ee.vmulas.s16.accx.ld.xp q1, a0, a1, q2, q6 # CHECK: ee.vmulas.s16.accx.ld.xp q1, a0, a1, q2, q6 # encoding: [0x10,0xb1,0x10,0x1e] ee.vmulas.s16.accx.ld.xp.qup q4, a8, a10, q4, q0, q0, q3 @@ -273,9 +273,9 @@ ee.vmulas.s16.qacc.ldbc.incp q2, a6, q3, q4 ee.vmulas.s16.qacc.ldbc.incp.qup q0, a4, q1, q6, q4, q5 # CHECK: ee.vmulas.s16.qacc.ldbc.incp.qup q0, a4, q1, q6, q4, q5 # encoding: [0x84,0x74,0x0a,0x1c] ee.vmulas.s16.qacc.ld.ip q7, a7, -64, q7, q7 -# CHECK: ee.vmulas.s16.qacc.ld.ip q7, a7, -64, q7, q7 # encoding: [0x07,0xf8,0x73,0x1e] +# CHECK: ee.vmulas.s16.qacc.ld.ip q7, a7, -64, q7, q7 # encoding: [0xc7,0xf8,0xf3,0x1f] ee.vmulas.s16.qacc.ld.ip.qup q0, a10, 48, q3, q6, q3, q6 -# CHECK: ee.vmulas.s16.qacc.ld.ip.qup q0, a10, 48, q3, q6, q3, q6 # encoding: [0x0a,0xf3,0x8c,0x03] +# CHECK: ee.vmulas.s16.qacc.ld.ip.qup q0, a10, 48, q3, q6, q3, q6 # encoding: [0x3a,0xf3,0x0c,0x02] ee.vmulas.s16.qacc.ld.xp q3, a11, a4, q4, q5 # CHECK: ee.vmulas.s16.qacc.ld.xp q3, a11, a4, q4, q5 # encoding: [0x4b,0x29,0x33,0x1e] ee.vmulas.s16.qacc.ld.xp.qup q2, a9, a1, q3, q2, q1, q7 @@ -283,9 +283,9 @@ ee.vmulas.s16.qacc.ld.xp.qup q2, a9, a1, q3, q2, q1, q7 ee.vmulas.s8.accx q1, q0 # CHECK: ee.vmulas.s8.accx q1, q0 # encoding: [0xc4,0x01,0x1a] ee.vmulas.s8.accx.ld.ip q2, a8, 80, q3, q0 -# CHECK: ee.vmulas.s8.accx.ld.ip q2, a8, 80, q3, q0 # encoding: [0x08,0xc0,0xa4,0x1e] +# CHECK: ee.vmulas.s8.accx.ld.ip q2, a8, 80, q3, q0 # encoding: [0x58,0xc0,0x24,0x1e] ee.vmulas.s8.accx.ld.ip.qup q2, a9, -80, q1, q2, q6, q3 -# CHECK: ee.vmulas.s8.accx.ld.ip.qup q2, a9, -80, q1, q2, q6, q3 # encoding: [0x09,0x56,0xa6,0x05] +# CHECK: ee.vmulas.s8.accx.ld.ip.qup q2, a9, -80, q1, q2, q6, q3 # encoding: [0xb9,0x56,0xa6,0x05] ee.vmulas.s8.accx.ld.xp q3, a3, a4, q4, q7 # CHECK: ee.vmulas.s8.accx.ld.xp q3, a3, a4, q4, q7 # encoding: [0x43,0x39,0x35,0x1e] ee.vmulas.s8.accx.ld.xp.qup q0, a3, a1, q4, q5, q3, q3 @@ -297,9 +297,9 @@ ee.vmulas.s8.qacc.ldbc.incp q7, a1, q6, q1 ee.vmulas.s8.qacc.ldbc.incp.qup q3, a11, q4, q6, q5, q6 # CHECK: ee.vmulas.s8.qacc.ldbc.incp.qup q3, a11, q4, q6, q5, q6 # encoding: [0x9b,0x35,0x3d,0x1c] ee.vmulas.s8.qacc.ld.ip q5, a10, -16, q0, q0 -# CHECK: ee.vmulas.s8.qacc.ld.ip q5, a10, -16, q0, q0 # encoding: [0x0a,0x00,0xd6,0x1f] +# CHECK: ee.vmulas.s8.qacc.ld.ip q5, a10, -16, q0, q0 # encoding: [0xfa,0x00,0xd6,0x1f] ee.vmulas.s8.qacc.ld.ip.qup q7, a9, -48, q6, q2, q1, q2 -# CHECK: ee.vmulas.s8.qacc.ld.ip.qup q7, a9, -48, q6, q2, q1, q2 # encoding: [0x09,0x91,0xf5,0x06] +# CHECK: ee.vmulas.s8.qacc.ld.ip.qup q7, a9, -48, q6, q2, q1, q2 # encoding: [0xd9,0x91,0xf5,0x07] ee.vmulas.s8.qacc.ld.xp q1, a1, a12, q5, q0 # CHECK: ee.vmulas.s8.qacc.ld.xp q1, a1, a12, q5, q0 # encoding: [0xc1,0x41,0x17,0x1e] ee.vmulas.s8.qacc.ld.xp.qup q0, a1, a14, q1, q6, q2, q4 @@ -307,9 +307,9 @@ ee.vmulas.s8.qacc.ld.xp.qup q0, a1, a14, q1, q6, q2, q4 ee.vmulas.u16.accx q7, q1 # CHECK: ee.vmulas.u16.accx q7, q1 # encoding: [0x84,0x0f,0x0a] ee.vmulas.u16.accx.ld.ip q5, a8, -32, q1, q4 -# CHECK: ee.vmulas.u16.accx.ld.ip q5, a8, -32, q1, q4 # encoding: [0x08,0x60,0x58,0x1f] +# CHECK: ee.vmulas.u16.accx.ld.ip q5, a8, -32, q1, q4 # encoding: [0xe8,0x60,0xd8,0x1f] ee.vmulas.u16.accx.ld.ip.qup q1, a0, 48, q7, q4, q4, q0 -# CHECK: ee.vmulas.u16.accx.ld.ip.qup q1, a0, 48, q7, q4, q4, q0 # encoding: [0x00,0xe4,0x91,0x09] +# CHECK: ee.vmulas.u16.accx.ld.ip.qup q1, a0, 48, q7, q4, q4, q0 # encoding: [0x30,0xe4,0x11,0x08] ee.vmulas.u16.accx.ld.xp q3, a14, a4, q5, q4 # CHECK: ee.vmulas.u16.accx.ld.xp q3, a14, a4, q5, q4 # encoding: [0x4e,0x61,0x39,0x1e] ee.vmulas.u16.accx.ld.xp.qup q4, a3, a7, q6, q2, q4, q4 @@ -321,7 +321,7 @@ ee.vmulas.u16.qacc.ldbc.incp q6, a7, q0, q3 ee.vmulas.u16.qacc.ldbc.incp.qup q0, a12, q6, q3, q2, q0 # CHECK: ee.vmulas.u16.qacc.ldbc.incp.qup q0, a12, q6, q3, q2, q0 # encoding: [0xac,0x9a,0x01,0x1c] ee.vmulas.u16.qacc.ld.ip q4, a10, 16, q3, q2 -# CHECK: ee.vmulas.u16.qacc.ld.ip q4, a10, 16, q3, q2 # encoding: [0x0a,0xd0,0xca,0x1e] +# CHECK: ee.vmulas.u16.qacc.ld.ip q4, a10, 16, q3, q2 # encoding: [0x1a,0xd0,0x4a,0x1e] ee.vmulas.u16.qacc.ld.ip.qup q2, a4, 0, q5, q4, q2, q6 # CHECK: ee.vmulas.u16.qacc.ld.ip.qup q2, a4, 0, q5, q4, q2, q6 # encoding: [0x04,0x62,0x2d,0x0a] ee.vmulas.u16.qacc.ld.xp q6, a14, a2, q4, q0 @@ -331,9 +331,9 @@ ee.vmulas.u16.qacc.ld.xp.qup q6, a12, a11, q6, q7, q4, q1 ee.vmulas.u8.accx q2, q1 # CHECK: ee.vmulas.u8.accx q2, q1 # encoding: [0xc4,0x0a,0x0a] ee.vmulas.u8.accx.ld.ip q6, a3, -112, q2, q7 -# CHECK: ee.vmulas.u8.accx.ld.ip q6, a3, -112, q2, q7 # encoding: [0x03,0xb8,0xec,0x1e] +# CHECK: ee.vmulas.u8.accx.ld.ip q6, a3, -112, q2, q7 # encoding: [0x93,0xb8,0xec,0x1f] ee.vmulas.u8.accx.ld.ip.qup q7, a3, -32, q3, q3, q7, q5 -# CHECK: ee.vmulas.u8.accx.ld.ip.qup q7, a3, -32, q3, q3, q7, q5 # encoding: [0x03,0xdf,0x7a,0x0d] +# CHECK: ee.vmulas.u8.accx.ld.ip.qup q7, a3, -32, q3, q3, q7, q5 # encoding: [0xe3,0xdf,0xfa,0x0d] ee.vmulas.u8.accx.ld.xp q4, a4, a9, q4, q0 # CHECK: ee.vmulas.u8.accx.ld.xp q4, a4, a9, q4, q0 # encoding: [0x94,0x01,0x4d,0x1e] ee.vmulas.u8.accx.ld.xp.qup q5, a7, a13, q4, q7, q2, q6 @@ -345,9 +345,9 @@ ee.vmulas.u8.qacc.ldbc.incp q4, a1, q0, q5 ee.vmulas.u8.qacc.ldbc.incp.qup q2, a1, q5, q7, q6, q4 # CHECK: ee.vmulas.u8.qacc.ldbc.incp.qup q2, a1, q5, q7, q6, q4 # encoding: [0xb1,0x7e,0x29,0x1c] ee.vmulas.u8.qacc.ld.ip q2, a12, 32, q1, q4 -# CHECK: ee.vmulas.u8.qacc.ld.ip q2, a12, 32, q1, q4 # encoding: [0x0c,0x60,0x2e,0x1f] +# CHECK: ee.vmulas.u8.qacc.ld.ip q2, a12, 32, q1, q4 # encoding: [0x2c,0x60,0x2e,0x1e] ee.vmulas.u8.qacc.ld.ip.qup q0, a6, 48, q0, q0, q6, q0 -# CHECK: ee.vmulas.u8.qacc.ld.ip.qup q0, a6, 48, q0, q0, q6, q0 # encoding: [0x06,0x06,0x80,0x0f] +# CHECK: ee.vmulas.u8.qacc.ld.ip.qup q0, a6, 48, q0, q0, q6, q0 # encoding: [0x36,0x06,0x00,0x0e] ee.vmulas.u8.qacc.ld.xp q6, a1, a1, q2, q5 # CHECK: ee.vmulas.u8.qacc.ld.xp q6, a1, a1, q2, q5 # encoding: [0x11,0xa9,0x6e,0x1e] ee.vmulas.u8.qacc.ld.xp.qup q1, a8, a10, q3, q7, q1, q3 @@ -397,15 +397,15 @@ ee.vsmulas.s8.qacc.ld.incp q1, a8, q1, q1, 4 ee.vsr.32 q4, q3 # CHECK: ee.vsr.32 q4, q3 # encoding: [0xc4,0xbf,0xdd] ee.vst.128.ip q3, a6, -816 -# CHECK: ee.vst.128.ip q3, a6, -816 # encoding: [0x64,0xd0,0xda] +# CHECK: ee.vst.128.ip q3, a6, -816 # encoding: [0x64,0xcd,0xda] ee.vst.128.xp q6, a12, a14 # CHECK: ee.vst.128.xp q6, a12, a14 # encoding: [0xc4,0x7e,0xbd] ee.vst.h.64.ip q2, a5, 40 -# CHECK: ee.vst.h.64.ip q2, a5, 40 # encoding: [0x54,0x28,0x9b] +# CHECK: ee.vst.h.64.ip q2, a5, 40 # encoding: [0x54,0x05,0x9b] ee.vst.h.64.xp q2, a13, a6 # CHECK: ee.vst.h.64.xp q2, a13, a6 # encoding: [0xd4,0x06,0xdd] ee.vst.l.64.ip q5, a8, 16 -# CHECK: ee.vst.l.64.ip q5, a8, 16 # encoding: [0x84,0x90,0xa4] +# CHECK: ee.vst.l.64.ip q5, a8, 16 # encoding: [0x84,0x82,0xa4] ee.vst.l.64.xp q0, a13, a6 # CHECK: ee.vst.l.64.xp q0, a13, a6 # encoding: [0xd4,0x46,0xcd] ee.vsubs.s16 q5, q1, q4 From d1742048d59a7a6e262272e792fd5821dd751c6c Mon Sep 17 00:00:00 2001 From: Ronen Ulanovsky Date: Thu, 13 Jul 2023 19:57:30 +0300 Subject: [PATCH 151/289] [Xtensa] Fix FP mul-sub fusion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `performMADD_MSUBCombine` treats `x * y ± z` and `z ± x * y` interchangeably which is wrong for `msub.s` which expects the latter. Added a check to determine that the orientation is correct, and if not, negate the result. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index cfa090eeea273..38e4c5c1220ba 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -485,32 +485,37 @@ void XtensaTargetLowering::LowerAsmOperandForConstraint( static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, const XtensaSubtarget &Subtarget) { - if (ROOTNode->getOperand(0).getValueType() != MVT::f32) - return SDValue(); + SDValue LHS = ROOTNode->getOperand(0); + SDValue RHS = ROOTNode->getOperand(1); - if (ROOTNode->getOperand(0).getOpcode() != ISD::FMUL && - ROOTNode->getOperand(1).getOpcode() != ISD::FMUL) + if (LHS.getValueType() != MVT::f32) return SDValue(); - SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::FMUL - ? ROOTNode->getOperand(0) - : ROOTNode->getOperand(1); + bool IsAdd = ROOTNode->getOpcode() == ISD::FADD; + + SDValue Mult = LHS, AddOperand = RHS; + bool NegRes = !IsAdd; - SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::FMUL - ? ROOTNode->getOperand(1) - : ROOTNode->getOperand(0); + if (LHS.getOpcode() != ISD::FMUL && RHS.getOpcode() != ISD::FMUL) + return SDValue(); + else if (RHS.getOpcode() == ISD::FMUL) { + Mult = RHS; + AddOperand = LHS; + NegRes = false; + } if (!Mult.hasOneUse()) return SDValue(); SDLoc DL(ROOTNode); - bool IsAdd = ROOTNode->getOpcode() == ISD::FADD; unsigned Opcode = IsAdd ? XtensaISD::MADD : XtensaISD::MSUB; SDValue MAddOps[3] = {AddOperand, Mult->getOperand(0), Mult->getOperand(1)}; EVT VTs[3] = {MVT::f32, MVT::f32, MVT::f32}; SDValue MAdd = CurDAG.getNode(Opcode, DL, VTs, MAddOps); + if (NegRes) + return CurDAG.getNode(ISD::FNEG, DL, MVT::f32, MAdd); return MAdd; } From 03a7ede7225d9e7811c84e625531cc6147dff4f2 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 23:35:11 +0300 Subject: [PATCH 152/289] [Xtensa] Add more valid FMA patterns and tests 1. Prefer `fneg.s` to `l32r ar, 0x80000000; xor ar, as, ar` when `wfr/rfr` are used anyway 2. Add Patterns for fma/madd/msub to automatically generate msub when it's the better choice 3. XtensaISelLowering.cpp: Rely on LLVM to lower FMA to madd.s/msub.s instead of hardcoding 4. Add float-fma.ll with various fused multiply add/subtract permutations --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 49 ++++-- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 2 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 7 +- llvm/test/CodeGen/Xtensa/float-fma.ll | 146 ++++++++++++++++++ 4 files changed, 186 insertions(+), 18 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/float-fma.ll diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 38e4c5c1220ba..8b6c5819ee2d9 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -347,6 +347,20 @@ MVT XtensaTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); } +bool XtensaTargetLowering::isFNegFree(EVT VT) const { + if (!VT.isSimple()) + return false; + + switch (VT.getSimpleVT().SimpleTy) { + case MVT::f32: + return Subtarget.hasSingleFloat(); + default: + break; + } + + return false; +} + bool XtensaTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const { if (!VT.isSimple()) @@ -488,35 +502,36 @@ static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG, SDValue LHS = ROOTNode->getOperand(0); SDValue RHS = ROOTNode->getOperand(1); - if (LHS.getValueType() != MVT::f32) + if (LHS.getValueType() != MVT::f32 || (LHS.getOpcode() != ISD::FMUL && RHS.getOpcode() != ISD::FMUL)) return SDValue(); + SDLoc DL(ROOTNode); bool IsAdd = ROOTNode->getOpcode() == ISD::FADD; - SDValue Mult = LHS, AddOperand = RHS; - bool NegRes = !IsAdd; + SDValue Mult, AddOperand; + bool Inverted; - if (LHS.getOpcode() != ISD::FMUL && RHS.getOpcode() != ISD::FMUL) - return SDValue(); - else if (RHS.getOpcode() == ISD::FMUL) { - Mult = RHS; - AddOperand = LHS; - NegRes = false; - } + if (LHS.getOpcode() == ISD::FMUL) + Mult = LHS, AddOperand = RHS, Inverted = false; + else + Mult = RHS, AddOperand = LHS, Inverted = true; if (!Mult.hasOneUse()) return SDValue(); - SDLoc DL(ROOTNode); + SDValue MultOperand0 = Mult->getOperand(0), MultOperand1 = Mult->getOperand(1); + + if (!IsAdd) { + if (Inverted) + MultOperand0 = CurDAG.getNode(ISD::FNEG, DL, MVT::f32, MultOperand0); + else + AddOperand = CurDAG.getNode(ISD::FNEG, DL, MVT::f32, AddOperand); + } - unsigned Opcode = IsAdd ? XtensaISD::MADD : XtensaISD::MSUB; - SDValue MAddOps[3] = {AddOperand, Mult->getOperand(0), Mult->getOperand(1)}; + SDValue FMAOps[3] = {MultOperand0, MultOperand1, AddOperand}; EVT VTs[3] = {MVT::f32, MVT::f32, MVT::f32}; - SDValue MAdd = CurDAG.getNode(Opcode, DL, VTs, MAddOps); - if (NegRes) - return CurDAG.getNode(ISD::FNEG, DL, MVT::f32, MAdd); - return MAdd; + return CurDAG.getNode(ISD::FMA, DL, VTs, FMAOps); } static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG, diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 84f6c1245252d..2340323ae9e23 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -119,6 +119,8 @@ class XtensaTargetLowering : public TargetLowering { bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override; + bool isFNegFree(EVT VT) const override; + /// If a physical register, this returns the register that receives the /// exception address on entry to an EH pad. Register diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index a66ba66431d76..63cc8a14942cd 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1059,9 +1059,10 @@ def FLOOR_S : RRR_Inst<0x00, 0x0A, 0x0A, (outs AR:$r), (ins FPR:$s, uimm4:$imm), let t = imm; } -def MADDN_S : RRR_Inst<0x00, 0x0A, 0x06, (outs FPR:$r), (ins FPR:$s, FPR:$t), +def MADDN_S : RRR_Inst<0x00, 0x0A, 0x06, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), "maddn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]> { let isCommutable = 0; + let Constraints = "$r = $a"; } // FP multipy-add @@ -1122,6 +1123,10 @@ def MSUB_S : RRR_Inst<0x00, 0x0A, 0x05, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR: let Constraints = "$r = $a"; } +// fmsub: -r1 * r2 + r3 +def : Pat<(fma (fneg FPR:$r1), FPR:$r2, FPR:$r3), + (MSUB_S $r3, $r1, $r2)>; + def NEXP01_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "nexp01.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0B; diff --git a/llvm/test/CodeGen/Xtensa/float-fma.ll b/llvm/test/CodeGen/Xtensa/float-fma.ll new file mode 100644 index 0000000000000..484b2705a18c6 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/float-fma.ll @@ -0,0 +1,146 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +define float @fmadd_s(float %a, float %b, float %c) nounwind { +; XTENSA-LABEL: fmadd_s: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: wfr f8, a3 +; XTENSA-NEXT: wfr f9, a2 +; XTENSA-NEXT: wfr f10, a4 +; XTENSA-NEXT: madd.s f10, f9, f8 +; XTENSA-NEXT: rfr a2, f10 +; XTENSA-NEXT: retw.n + %mul = fmul float %a, %b + %add = fadd float %mul, %c + ret float %add +} + +define float @fmsub_s(float %a, float %b, float %c) nounwind { +; XTENSA-LABEL: fmsub_s: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: wfr f8, a3 +; XTENSA-NEXT: wfr f9, a2 +; XTENSA-NEXT: wfr f10, a4 +; XTENSA-NEXT: neg.s f10, f10 +; XTENSA-NEXT: madd.s f10, f9, f8 +; XTENSA-NEXT: rfr a2, f10 +; XTENSA-NEXT: retw.n + %mul = fmul float %a, %b + %sub = fsub float %mul, %c + ret float %sub +} + +define float @fnmadd_s(float %a, float %b, float %c) nounwind { +; XTENSA-LABEL: fnmadd_s: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: wfr f8, a3 +; XTENSA-NEXT: wfr f9, a2 +; XTENSA-NEXT: wfr f10, a4 +; XTENSA-NEXT: madd.s f10, f9, f8 +; XTENSA-NEXT: neg.s f8, f10 +; XTENSA-NEXT: rfr a2, f8 +; XTENSA-NEXT: retw.n + %mul = fmul float %a, %b + %add = fadd float %mul, %c + %negadd = fneg float %add + ret float %negadd +} + + +define float @fnmsub_s(float %a, float %b, float %c) nounwind { +; XTENSA-LABEL: fnmsub_s: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: wfr f8, a3 +; XTENSA-NEXT: wfr f9, a2 +; XTENSA-NEXT: wfr f10, a4 +; XTENSA-NEXT: msub.s f10, f9, f8 +; XTENSA-NEXT: rfr a2, f10 +; XTENSA-NEXT: retw.n + %nega = fneg float %a + %mul = fmul float %nega, %b + %add = fadd float %mul, %c + ret float %add +} + +declare float @llvm.fma.f32(float, float, float) + +define float @fmadd_s_intrinsics(float %a, float %b, float %c) nounwind { +; XTENSA-LABEL: fmadd_s_intrinsics: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: wfr f8, a3 +; XTENSA-NEXT: wfr f9, a2 +; XTENSA-NEXT: wfr f10, a4 +; XTENSA-NEXT: madd.s f10, f9, f8 +; XTENSA-NEXT: rfr a2, f10 +; XTENSA-NEXT: retw.n + %fma = call float @llvm.fma.f32(float %a, float %b, float %c) + ret float %fma +} + +define float @fmsub_s_intrinsics(float %a, float %b, float %c) nounwind { +; XTENSA-LABEL: fmsub_s_intrinsics: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: wfr f8, a3 +; XTENSA-NEXT: wfr f9, a2 +; XTENSA-NEXT: wfr f10, a4 +; XTENSA-NEXT: neg.s f10, f10 +; XTENSA-NEXT: madd.s f10, f9, f8 +; XTENSA-NEXT: rfr a2, f10 +; XTENSA-NEXT: retw.n + %negc = fneg float %c + %fma = call float @llvm.fma.f32(float %a, float %b, float %negc) + ret float %fma +} + +define float @fnmadd_s_intrinsics(float %a, float %b, float %c) nounwind { +; XTENSA-LABEL: fnmadd_s_intrinsics: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: wfr f8, a3 +; XTENSA-NEXT: wfr f9, a2 +; XTENSA-NEXT: wfr f10, a4 +; XTENSA-NEXT: madd.s f10, f9, f8 +; XTENSA-NEXT: neg.s f8, f10 +; XTENSA-NEXT: rfr a2, f8 +; XTENSA-NEXT: retw.n + %fma = call float @llvm.fma.f32(float %a, float %b, float %c) + %neg = fneg float %fma + ret float %neg +} + +define float @fnmsub_s_intrinsics(float %a, float %b, float %c) nounwind { +; XTENSA-LABEL: fnmsub_s_intrinsics: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: wfr f8, a3 +; XTENSA-NEXT: wfr f9, a2 +; XTENSA-NEXT: wfr f10, a4 +; XTENSA-NEXT: msub.s f10, f9, f8 +; XTENSA-NEXT: rfr a2, f10 +; XTENSA-NEXT: retw.n + %nega = fneg float %a + %fma = call float @llvm.fma.f32(float %nega, float %b, float %c) + ret float %fma +} + +define float @fnmsub_s_swap_intrinsics(float %a, float %b, float %c) nounwind { +; XTENSA-LABEL: fnmsub_s_swap_intrinsics: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: wfr f8, a2 +; XTENSA-NEXT: wfr f9, a3 +; XTENSA-NEXT: wfr f10, a4 +; XTENSA-NEXT: msub.s f10, f9, f8 +; XTENSA-NEXT: rfr a2, f10 +; XTENSA-NEXT: retw.n + %negb = fneg float %b + %fma = call float @llvm.fma.f32(float %a, float %negb, float %c) + ret float %fma +} From bbb862ce87fc5bbc3723564a061ec0e4df6e6195 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 23:36:47 +0300 Subject: [PATCH 153/289] [Xtensa] Add MINMAX feature --- llvm/lib/Target/Xtensa/Xtensa.td | 13 ++++- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 13 +++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 9 +++ llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 5 ++ llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll | 55 +++++++++++++++++++ llvm/test/MC/Xtensa/xtensa-valid-minmax.s | 22 ++++++++ 7 files changed, 115 insertions(+), 3 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll create mode 100644 llvm/test/MC/Xtensa/xtensa-valid-minmax.s diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index abf189cf43e97..b84809ec51748 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -52,6 +52,11 @@ def FeatureNSA : SubtargetFeature<"nsa", "HasNSA", "true", def HasNSA : Predicate<"Subtarget->hasNSA()">, AssemblerPredicate<(all_of FeatureNSA)>; +def FeatureMINMAX : SubtargetFeature<"minmax", "HasMINMAX", "true", + "Enable Xtensa MINMAX option">; +def HasMINMAX : Predicate<"Subtarget->hasMINMAX()">, + AssemblerPredicate<(all_of FeatureMINMAX)>; + def FeatureMul16 : SubtargetFeature<"mul16", "HasMul16", "true", "Enable Xtensa Mul16 option">; def HasMul16 : Predicate<"Subtarget->hasMul16()">, @@ -179,20 +184,22 @@ def : Proc<"generic", []>; def : Proc<"esp32", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, FeatureSEXT, FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, - FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR]>; + FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, + FeatureMINMAX]>; def : Proc<"esp8266", [FeatureDensity, FeatureNSA, FeatureMul16, FeatureMul32, FeatureExtendedL32R, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeatureRegionProtection, FeaturePRID]>; def : Proc<"esp32s2", [FeatureDensity, FeatureWindowed, FeatureSEXT, FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureTHREADPTR, FeatureDiv32, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, - FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureESP32S2Ops]>; + FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureMINMAX, + FeatureESP32S2Ops]>; def : Proc<"esp32s3", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, FeatureSEXT, FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, - FeatureESP32S3Ops]>; + FeatureMINMAX, FeatureESP32S3Ops]>; //===----------------------------------------------------------------------===// // Register File Description diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 8b6c5819ee2d9..11a9a70eeb9a6 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -179,6 +179,19 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, + MVT::i32, Subtarget.hasMINMAX() ? Legal : Expand); + + setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); + setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); + setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); + setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); + + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); + // Handle floating-point types. for (unsigned I = MVT::FIRST_FP_VALUETYPE; I <= MVT::LAST_FP_VALUETYPE; ++I) { MVT VT = MVT::SimpleValueType(I); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 63cc8a14942cd..17be97f0d5dde 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1302,6 +1302,15 @@ def NSAU : RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), let r = 0xF; } +//===----------------------------------------------------------------------===// +// MINMAX Instructions +//===----------------------------------------------------------------------===// + +def MIN : ArithLogic_RRR<0x04, 0x03, "min", smin, 1>, Requires<[HasMINMAX]>; +def MAX : ArithLogic_RRR<0x05, 0x03, "max", smax, 1>, Requires<[HasMINMAX]>; +def MINU : ArithLogic_RRR<0x06, 0x03, "minu", umin, 1>, Requires<[HasMINMAX]>; +def MAXU : ArithLogic_RRR<0x07, 0x03, "maxu", umax, 1>, Requires<[HasMINMAX]>; + //===----------------------------------------------------------------------===// // Mul16 Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 45953ff2ad451..6ea6e500c07b9 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -50,6 +50,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasLoop = false; HasSEXT = false; HasNSA = false; + HasMINMAX = false; HasMul16 = false; HasMul32 = false; HasMul32High = false; diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index 1db34d354d575..13be4d5280302 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -57,6 +57,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa NSA option bool HasNSA; + // Enable Xtensa MINMAX option + bool HasMINMAX; + // Enable Xtensa Mul16 option bool HasMul16; @@ -165,6 +168,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasNSA() const { return HasNSA; } + bool hasMINMAX() const { return HasMINMAX; } + bool hasMul16() const { return HasMul16; } bool hasMul32() const { return HasMul32; } diff --git a/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll b/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll new file mode 100644 index 0000000000000..e6faa89c0ec59 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll @@ -0,0 +1,55 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +declare i32 @llvm.smin.i32(i32, i32) + +define i32 @smin_i32(i32 %a, i32 %b) { +; XTENSA-LABEL: smin_i32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-NEXT: min a2, a2, a3 +; XTENSA-NEXT: retw.n + %1 = tail call i32 @llvm.smin.i32(i32 %a, i32 %b) + ret i32 %1 +} + +declare i32 @llvm.smax.i32(i32, i32) + +define i32 @smax_i32(i32 %a, i32 %b) { +; XTENSA-LABEL: smax_i32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-NEXT: max a2, a2, a3 +; XTENSA-NEXT: retw.n + %1 = tail call i32 @llvm.smax.i32(i32 %a, i32 %b) + ret i32 %1 +} + +declare i32 @llvm.umin.i32(i32, i32) + +define i32 @umin_i32(i32 %a, i32 %b) { +; XTENSA-LABEL: umin_i32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-NEXT: minu a2, a2, a3 +; XTENSA-NEXT: retw.n + %1 = tail call i32 @llvm.umin.i32(i32 %a, i32 %b) + ret i32 %1 +} + +declare i32 @llvm.umax.i32(i32, i32) + +define i32 @umax_i32(i32 %a, i32 %b) { +; XTENSA-LABEL: umax_i32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-NEXT: maxu a2, a2, a3 +; XTENSA-NEXT: retw.n + %1 = tail call i32 @llvm.umax.i32(i32 %a, i32 %b) + ret i32 %1 +} diff --git a/llvm/test/MC/Xtensa/xtensa-valid-minmax.s b/llvm/test/MC/Xtensa/xtensa-valid-minmax.s new file mode 100644 index 0000000000000..7fa7b6c31dbb3 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-valid-minmax.s @@ -0,0 +1,22 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+minmax -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +# Instruction format RRR +# CHECK-INST: min a2, a3, a4 +# CHECK: encoding: [0x40,0x23,0x43] +min a2, a3, a4 + +# Instruction format RRR +# CHECK-INST: max a2, a3, a4 +# CHECK: encoding: [0x40,0x23,0x53] +max a2, a3, a4 + +# Instruction format RRR +# CHECK-INST: minu a2, a3, a4 +# CHECK: encoding: [0x40,0x23,0x63] +minu a2, a3, a4 + +# Instruction format RRR +# CHECK-INST: maxu a2, a3, a4 +# CHECK: encoding: [0x40,0x23,0x73] +maxu a2, a3, a4 From 45694d107bce6fbd8168121d653746daf063b12a Mon Sep 17 00:00:00 2001 From: Ronen Ulanovsky Date: Sun, 16 Jul 2023 23:06:43 +0300 Subject: [PATCH 154/289] [Xtensa] Add CLAMPS feature --- .../lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp | 4 ++-- .../Xtensa/Disassembler/XtensaDisassembler.cpp | 6 +++--- .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 4 ++-- .../Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h | 2 +- .../Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp | 12 ++++++------ llvm/lib/Target/Xtensa/Xtensa.td | 11 ++++++++--- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 13 ++++++++++++- llvm/lib/Target/Xtensa/XtensaOperands.td | 10 +++++----- llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 5 +++++ llvm/test/MC/Xtensa/xtensa-clamps-invalid.s | 9 +++++++++ llvm/test/MC/Xtensa/xtensa-clamps-valid.s | 12 ++++++++++++ llvm/test/MC/Xtensa/xtensa-sext-invalid.s | 9 +++++++++ llvm/test/MC/Xtensa/xtensa-sext-valid.s | 12 ++++++++++++ 14 files changed, 87 insertions(+), 23 deletions(-) create mode 100644 llvm/test/MC/Xtensa/xtensa-clamps-invalid.s create mode 100644 llvm/test/MC/Xtensa/xtensa-clamps-valid.s create mode 100644 llvm/test/MC/Xtensa/xtensa-sext-invalid.s create mode 100644 llvm/test/MC/Xtensa/xtensa-sext-valid.s diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 7e3f8d16678a0..9c79b18965ee6 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -371,7 +371,7 @@ struct XtensaOperand : public MCParsedAsmOperand { return false; } - bool isseimm7_22() const { return isImm(7, 22); } + bool isimm7_22() const { return isImm(7, 22); } bool isSelect_2() const { return isImm(0, 1); } @@ -729,7 +729,7 @@ bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_Invalidentry_imm12: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [0, 32760]"); - case Match_Invalidseimm7_22: + case Match_Invalidimm7_22: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [7, 22]"); case Match_InvalidSelect_2: diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 0c78d30ea1ab8..e0c9186bbec8c 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -576,9 +576,9 @@ static DecodeStatus decodeShimm1_31Operand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } -static DecodeStatus decodeSeimm7_22Operand(MCInst &Inst, uint64_t Imm, - int64_t Address, - const void *Decoder) { +static DecodeStatus decodeImm7_22Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { assert(isUInt<4>(Imm) && "Invalid immediate"); Inst.addOperand(MCOperand::createImm(Imm + 7)); return MCDisassembler::Success; diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index 5919ef5401431..a8c1aaed10b1a 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -438,8 +438,8 @@ void XtensaInstPrinter::printB4constu_AsmOperand(const MCInst *MI, int OpNum, printOperand(MI, OpNum, O); } -void XtensaInstPrinter::printSeimm7_22_AsmOperand(const MCInst *MI, int OpNum, - raw_ostream &O) { +void XtensaInstPrinter::printImm7_22_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { if (MI->getOperand(OpNum).isImm()) { int64_t Value = MI->getOperand(OpNum).getImm(); assert((Value >= 7 && Value <= 22) && diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index a8416279d6ae0..9224d0a98c14b 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -69,7 +69,7 @@ class XtensaInstPrinter : public MCInstPrinter { void printEntry_Imm12_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printB4const_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printB4constu_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); - void printSeimm7_22_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printImm7_22_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printSelect_2_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printSelect_4_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printSelect_8_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 02d4d91bf1af7..185507a93c410 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -139,9 +139,9 @@ class XtensaMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; - uint32_t getSeimm7_22OpValue(const MCInst &MI, unsigned OpNo, - SmallVectorImpl &Fixups, - const MCSubtargetInfo &STI) const; + uint32_t getImm7_22OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; uint8_t getSelect_2OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, @@ -629,9 +629,9 @@ XtensaMCCodeEmitter::getB4constuOpValue(const MCInst &MI, unsigned OpNo, } uint32_t -XtensaMCCodeEmitter::getSeimm7_22OpValue(const MCInst &MI, unsigned OpNo, - SmallVectorImpl &Fixups, - const MCSubtargetInfo &STI) const { +XtensaMCCodeEmitter::getImm7_22OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { const MCOperand &MO = MI.getOperand(OpNo); uint32_t res = static_cast(MO.getImm()); diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index b84809ec51748..572c76cc7c1ff 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -47,6 +47,11 @@ def FeatureSEXT : SubtargetFeature<"sext", "HasSEXT", "true", def HasSEXT : Predicate<"Subtarget->hasSEXT()">, AssemblerPredicate<(all_of FeatureSEXT)>; +def FeatureCLAMPS : SubtargetFeature<"clamps", "HasCLAMPS", "true", + "Enable Xtensa CLAMPS option">; +def HasCLAMPS : Predicate<"Subtarget->hasCLAMPS()">, + AssemblerPredicate<(all_of FeatureCLAMPS)>; + def FeatureNSA : SubtargetFeature<"nsa", "HasNSA", "true", "Enable Xtensa NSA option">; def HasNSA : Predicate<"Subtarget->hasNSA()">, @@ -185,7 +190,7 @@ def : Proc<"esp32", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, - FeatureMINMAX]>; + FeatureMINMAX, FeatureCLAMPS]>; def : Proc<"esp8266", [FeatureDensity, FeatureNSA, FeatureMul16, FeatureMul32, FeatureExtendedL32R, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeatureRegionProtection, FeaturePRID]>; @@ -193,13 +198,13 @@ def : Proc<"esp8266", [FeatureDensity, FeatureNSA, FeatureMul16, FeatureMul32, F def : Proc<"esp32s2", [FeatureDensity, FeatureWindowed, FeatureSEXT, FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureTHREADPTR, FeatureDiv32, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureMINMAX, - FeatureESP32S2Ops]>; + FeatureCLAMPS, FeatureESP32S2Ops]>; def : Proc<"esp32s3", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureMAC16, FeatureWindowed, FeatureBoolean, FeatureSEXT, FeatureNSA, FeatureMul16, FeatureMul32, FeatureMul32High, FeatureDFPAccel, FeatureS32C1I, FeatureTHREADPTR, FeatureDiv32, FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, - FeatureMINMAX, FeatureESP32S3Ops]>; + FeatureMINMAX, FeatureCLAMPS, FeatureESP32S3Ops]>; //===----------------------------------------------------------------------===// // Register File Description diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 17be97f0d5dde..c766b57451ed0 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1281,13 +1281,24 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 0, Size = // SEXT Instructions //===----------------------------------------------------------------------===// -def SEXT : RRR_Inst<0x00, 0x03, 0x02, (outs AR:$r), (ins AR:$s, seimm7_22:$imm), +def SEXT : RRR_Inst<0x00, 0x03, 0x02, (outs AR:$r), (ins AR:$s, imm7_22:$imm), "sext\t$r, $s, $imm", []>, Requires<[HasSEXT]> { bits<4> imm; let t = imm; } +//===----------------------------------------------------------------------===// +// CLAMPS Instructions +//===----------------------------------------------------------------------===// + +def CLAMPS : RRR_Inst<0x00, 0x03, 0x03, (outs AR:$r), (ins AR:$s, imm7_22:$imm), + "clamps\t$r, $s, $imm", []>, Requires<[HasSEXT]> { + bits<4> imm; + + let t = imm; +} + //===----------------------------------------------------------------------===// // NSA Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index 7810d0a36a354..6a9bf514be8d3 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -168,11 +168,11 @@ def b4constu: Immediate; -def seimm7_22: Immediate= 7 && Imm <= 22; }], "Seimm7_22_AsmOperand"> { - let EncoderMethod = "getSeimm7_22OpValue"; - let DecoderMethod = "decodeSeimm7_22Operand"; +// imm7_22 predicate - Immediate in the range [7,22] for sign extend and clamps +def Imm7_22_AsmOperand: ImmAsmOperand<"imm7_22">; +def imm7_22: Immediate= 7 && Imm <= 22; }], "Imm7_22_AsmOperand"> { + let EncoderMethod = "getImm7_22OpValue"; + let DecoderMethod = "decodeImm7_22Operand"; } // select_256 predicate - Immediate in the range [0,255] diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 6ea6e500c07b9..64d61b7bbd83d 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -49,6 +49,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasBoolean = false; HasLoop = false; HasSEXT = false; + HasCLAMPS = false; HasNSA = false; HasMINMAX = false; HasMul16 = false; diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index 13be4d5280302..32773eb6a23ae 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -54,6 +54,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa Sign Extend option bool HasSEXT; + // Enable Xtensa CLAMPS option + bool HasCLAMPS; + // Enable Xtensa NSA option bool HasNSA; @@ -166,6 +169,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasSEXT() const { return HasSEXT; } + bool hasCLAMPS() const { return HasCLAMPS; } + bool hasNSA() const { return HasNSA; } bool hasMINMAX() const { return HasMINMAX; } diff --git a/llvm/test/MC/Xtensa/xtensa-clamps-invalid.s b/llvm/test/MC/Xtensa/xtensa-clamps-invalid.s new file mode 100644 index 0000000000000..524880a638afb --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-clamps-invalid.s @@ -0,0 +1,9 @@ +# RUN: not llvm-mc %s -triple=xtensa -mattr=+clamps 2>&1 | FileCheck %s + +# imm7_22 +clamps a3, a2, 6 +# CHECK: :[[#@LINE-1]]:16: error: expected immediate in range [7, 22] + +# imm7_22 +clamps a3, a2, 23 +# CHECK: :[[#@LINE-1]]:16: error: expected immediate in range [7, 22] diff --git a/llvm/test/MC/Xtensa/xtensa-clamps-valid.s b/llvm/test/MC/Xtensa/xtensa-clamps-valid.s new file mode 100644 index 0000000000000..62b2858f04122 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-clamps-valid.s @@ -0,0 +1,12 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+clamps -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +# Instruction format RRR +# CHECK-INST: clamps a3, a2, 7 +# CHECK: encoding: [0x00,0x32,0x33] +clamps a3, a2, 7 + +# Instruction format RRR +# CHECK-INST: clamps a3, a2, 22 +# CHECK: encoding: [0xf0,0x32,0x33] +clamps a3, a2, 22 diff --git a/llvm/test/MC/Xtensa/xtensa-sext-invalid.s b/llvm/test/MC/Xtensa/xtensa-sext-invalid.s new file mode 100644 index 0000000000000..aaf99e40549d1 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-sext-invalid.s @@ -0,0 +1,9 @@ +# RUN: not llvm-mc %s -triple=xtensa -mattr=+sext 2>&1 | FileCheck %s + +# imm7_22 +sext a3, a2, 6 +# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [7, 22] + +# imm7_22 +sext a3, a2, 23 +# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [7, 22] diff --git a/llvm/test/MC/Xtensa/xtensa-sext-valid.s b/llvm/test/MC/Xtensa/xtensa-sext-valid.s new file mode 100644 index 0000000000000..34111d5dadb9b --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-sext-valid.s @@ -0,0 +1,12 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+sext -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +# Instruction format RRR +# CHECK-INST: sext a3, a2, 7 +# CHECK: encoding: [0x00,0x32,0x23] +sext a3, a2, 7 + +# Instruction format RRR +# CHECK-INST: sext a3, a2, 22 +# CHECK: encoding: [0xf0,0x32,0x23] +sext a3, a2, 22 From ace4e75a0af753a36d6c5c00e4ac05766ed6aab8 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 23:38:32 +0300 Subject: [PATCH 155/289] [Xtensa] Connect `abs` to `llvm.abs` --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 17 ++++++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 2 +- llvm/test/CodeGen/Xtensa/arith-intrinsics.ll | 27 +++++++++++++++++++ 3 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 llvm/test/CodeGen/Xtensa/arith-intrinsics.ll diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 11a9a70eeb9a6..3b4dbfe8e00e4 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -161,6 +161,23 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::SDIVREM, MVT::i32, Expand); setOperationAction(ISD::UDIVREM, MVT::i32, Expand); + setOperationAction(ISD::SDIV, MVT::i64, Expand); + setOperationAction(ISD::UDIV, MVT::i64, Expand); + setOperationAction(ISD::SREM, MVT::i64, Expand); + setOperationAction(ISD::UREM, MVT::i64, Expand); + + // Xtensa doesn't support [ADD,SUB][E,C] + setOperationAction(ISD::ADDC, MVT::i32, Expand); + setOperationAction(ISD::ADDE, MVT::i32, Expand); + setOperationAction(ISD::SUBC, MVT::i32, Expand); + setOperationAction(ISD::SUBE, MVT::i32, Expand); + + setOperationAction(ISD::ABS, MVT::i32, Legal); + + setOperationAction(ISD::ADD, MVT::i64, Expand); + setOperationAction(ISD::SUB, MVT::i64, Expand); + + // Xtensa doesn't support s[hl,rl,ra]_parts setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index c766b57451ed0..da6e009e74f24 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -52,7 +52,7 @@ def SUBX4 : SUBX<0x0E, "subx4", [(set AR:$r, (sub (shl AR:$s, (i32 2)), AR:$t))] def SUBX8 : SUBX<0x0F, "subx8", [(set AR:$r, (sub (shl AR:$s, (i32 3)), AR:$t))]>; def ABS : RRR_Inst<0x00, 0x00, 0x06, (outs AR:$r), (ins AR:$t), - "abs\t$r, $t", []> { + "abs\t$r, $t", [(set AR:$r, (abs AR:$t))]> { let s = 0x1; } diff --git a/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll b/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll new file mode 100644 index 0000000000000..c4a0749a0ed1e --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll @@ -0,0 +1,27 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +declare i32 @llvm.abs.i32(i32, i1) + +define i32 @abs_i32(i32 %a) { +; XTENSA-LABEL: abs_i32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-NEXT: abs a2, a2 +; XTENSA-NEXT: retw.n + %1 = tail call i32 @llvm.abs.i32(i32 %a, i1 false) + ret i32 %1 +} + +define i32 @abs_poison_i32(i32 %a) { +; XTENSA-LABEL: abs_poison_i32: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-NEXT: abs a2, a2 +; XTENSA-NEXT: retw.n + %1 = tail call i32 @llvm.abs.i32(i32 %a, i1 true) + ret i32 %1 +} From aa0b0170f51caf692b1f933f6dbf95c6df739b0c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Sat, 29 Jul 2023 19:59:00 +0300 Subject: [PATCH 156/289] [Xtensa] ADD support of the CLAMPS/MINMAX in target parser. Implement support of the CLAMPS/MINMAX in Xtensa target parser. Fix clang CPUs test. --- clang/test/Driver/xtensa-cpus.c | 15 +++--- .../llvm/TargetParser/XtensaTargetParser.def | 20 ++++---- .../llvm/TargetParser/XtensaTargetParser.h | 46 ++++++++++--------- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 2 +- 4 files changed, 45 insertions(+), 38 deletions(-) diff --git a/clang/test/Driver/xtensa-cpus.c b/clang/test/Driver/xtensa-cpus.c index 7a93f4cba4b0c..44e380adbf7ca 100644 --- a/clang/test/Driver/xtensa-cpus.c +++ b/clang/test/Driver/xtensa-cpus.c @@ -10,8 +10,9 @@ // RUN: %clang -target xtensa -### -c %s 2>&1 -mcpu=esp32 | FileCheck -check-prefix=MCPU-ESP32 %s // MCPU-ESP32: "-target-cpu" "esp32" // MCPU-ESP32: "-target-feature" "+density" "-target-feature" "+fp" "-target-feature" "+windowed" "-target-feature" "+bool" -// MCPU-ESP32: "-target-feature" "+loop" "-target-feature" "+sext" "-target-feature" "+nsa" "-target-feature" "+mul32" -// MCPU-ESP32: "-target-feature" "+mul32high" "-target-feature" "+div32" "-target-feature" "+mac16" "-target-feature" "+dfpaccel" +// MCPU-ESP32: "-target-feature" "+loop" "-target-feature" "+sext" "-target-feature" "+nsa" "-target-feature" "+clamps" +// MCPU-ESP32: "-target-feature" "+minmax" "-target-feature" "+mul32" "-target-feature" "+mul32high" +// MCPU-ESP32: "-target-feature" "+div32" "-target-feature" "+mac16" "-target-feature" "+dfpaccel" // MCPU-ESP32: "-target-feature" "+s32c1i" "-target-feature" "+threadptr" "-target-feature" "+atomctl" "-target-feature" "+memctl" // MCPU-ESP32: "-target-feature" "+debug" "-target-feature" "+exception" "-target-feature" "+highpriinterrupts" // MCPU-ESP32: "-target-feature" "+coprocessor" "-target-feature" "+interrupt" "-target-feature" "+rvector" "-target-feature" "+timerint" @@ -20,7 +21,8 @@ // RUN: %clang -target xtensa -### -c %s 2>&1 -mcpu=esp32s2 | FileCheck -check-prefix=MCPU-ESP32S2 %s // MCPU-ESP32S2: "-target-cpu" "esp32s2" // MCPU-ESP32S2: "-target-feature" "+density" "-target-feature" "+windowed" "-target-feature" "+sext" "-target-feature" "+nsa" -// MCPU-ESP32S2: "-target-feature" "+mul32" "-target-feature" "+mul32high" "-target-feature" "+div32" "-target-feature" "+threadptr" +// MCPU-ESP32S2: "-target-feature" "+clamps" "-target-feature" "+minmax" "-target-feature" "+mul32" +// MCPU-ESP32S2: "-target-feature" "+mul32high" "-target-feature" "+div32" "-target-feature" "+threadptr" // MCPU-ESP32S2: "-target-feature" "+memctl" "-target-feature" "+debug" "-target-feature" "+exception" "-target-feature" "+highpriinterrupts" // MCPU-ESP32S2: "-target-feature" "+coprocessor" "-target-feature" "+interrupt" "-target-feature" "+rvector" "-target-feature" "+timerint" // MCPU-ESP32S2: "-target-feature" "+prid" "-target-feature" "+regprotect" "-target-feature" "+miscsr" "-target-feature" "+esp32s2" @@ -28,9 +30,10 @@ // RUN: %clang -target xtensa -### -c %s 2>&1 -mcpu=esp32s3 | FileCheck -check-prefix=MCPU-ESP32S3 %s // MCPU-ESP32S3: "-target-cpu" "esp32s3" // MCPU-ESP32S3: "-target-feature" "+density" "-target-feature" "+fp" "-target-feature" "+windowed" "-target-feature" "+bool" -// MCPU-ESP32S3: "-target-feature" "+loop" "-target-feature" "+sext" "-target-feature" "+nsa" "-target-feature" "+mul32" -// MCPU-ESP32S3: "-target-feature" "+mul32high" "-target-feature" "+div32" "-target-feature" "+mac16" "-target-feature" "+dfpaccel" -// MCPU-ESP32S3: "-target-feature" "+s32c1i" "-target-feature" "+threadptr" "-target-feature" "+atomctl" "-target-feature" "+memctl" +// MCPU-ESP32S3: "-target-feature" "+loop" "-target-feature" "+sext" "-target-feature" "+nsa" "-target-feature" "+clamps" +// MCPU-ESP32S3: "-target-feature" "+minmax" "-target-feature" "+mul32" "-target-feature" "+mul32high" "-target-feature" "+div32" +// MCPU-ESP32S3: "-target-feature" "+mac16" "-target-feature" "+dfpaccel" "-target-feature" "+s32c1i" +// MCPU-ESP32S3: "-target-feature" "+threadptr" "-target-feature" "+atomctl" "-target-feature" "+memctl" // MCPU-ESP32S3: "-target-feature" "+debug" "-target-feature" "+exception" "-target-feature" "+highpriinterrupts" // MCPU-ESP32S3: "-target-feature" "+coprocessor" "-target-feature" "+interrupt" "-target-feature" "+rvector" "-target-feature" "+timerint" // MCPU-ESP32S3: "-target-feature" "+prid" "-target-feature" "+regprotect" "-target-feature" "+miscsr" "-target-feature" "+esp32s3" diff --git a/llvm/include/llvm/TargetParser/XtensaTargetParser.def b/llvm/include/llvm/TargetParser/XtensaTargetParser.def index e46020700f2e2..b765b015c1265 100644 --- a/llvm/include/llvm/TargetParser/XtensaTargetParser.def +++ b/llvm/include/llvm/TargetParser/XtensaTargetParser.def @@ -21,6 +21,8 @@ XTENSA_FEATURE(FK_BOOLEAN, "bool") XTENSA_FEATURE(FK_LOOP, "loop") XTENSA_FEATURE(FK_SEXT, "sext") XTENSA_FEATURE(FK_NSA, "nsa") +XTENSA_FEATURE(FK_CLAMPS, "clamps") +XTENSA_FEATURE(FK_MINMAX, "minmax") XTENSA_FEATURE(FK_MUL32, "mul32") XTENSA_FEATURE(FK_MUL32HIGH, "mul32high") XTENSA_FEATURE(FK_DIV32, "div32") @@ -57,18 +59,18 @@ XTENSA_CPU(ESP8266, {"esp8266"}, FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_REGPROTECT | FK_PRID)) XTENSA_CPU(ESP32, {"esp32"}, (FK_DENSITY | FK_FP | FK_LOOP | FK_MAC16 | FK_WINDOWED | FK_BOOLEAN | - FK_SEXT | FK_NSA | FK_MUL32 | FK_MUL32HIGH | FK_DFPACCEL | FK_S32C1I | FK_THREADPTR | FK_DIV32 | - FK_ATOMCTL | FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | - FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR)) + FK_SEXT | FK_NSA | FK_CLAMPS | FK_MINMAX | FK_MUL32 | FK_MUL32HIGH | FK_DFPACCEL | FK_S32C1I | + FK_THREADPTR | FK_DIV32 | FK_ATOMCTL | FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | + FK_COPROCESSOR | FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR)) XTENSA_CPU(ESP32S2, {"esp32s2"}, - (FK_DENSITY | FK_WINDOWED | FK_SEXT | FK_NSA | FK_MUL32 | FK_MUL32HIGH | FK_THREADPTR | FK_DIV32 | - FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | FK_INTERRUPT | - FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | FK_ESP32S2OPS)) + (FK_DENSITY | FK_WINDOWED | FK_SEXT | FK_NSA | FK_CLAMPS | FK_MINMAX | FK_MUL32 | FK_MUL32HIGH | + FK_THREADPTR | FK_DIV32 | FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | + FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | FK_ESP32S2OPS)) XTENSA_CPU(ESP32S3, {"esp32s3"}, (FK_DENSITY | FK_FP | FK_LOOP | FK_MAC16 | FK_WINDOWED | FK_BOOLEAN | - FK_SEXT | FK_NSA | FK_MUL32 | FK_MUL32HIGH | FK_DFPACCEL | FK_S32C1I | FK_THREADPTR | FK_DIV32 | - FK_ATOMCTL | FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | - FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | + FK_SEXT | FK_NSA | FK_CLAMPS | FK_MINMAX | FK_MUL32 | FK_MUL32HIGH | FK_DFPACCEL | FK_S32C1I | + FK_THREADPTR | FK_DIV32 | FK_ATOMCTL | FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | + FK_COPROCESSOR | FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | FK_ESP32S3OPS)) #undef XTENSA_CPU diff --git a/llvm/include/llvm/TargetParser/XtensaTargetParser.h b/llvm/include/llvm/TargetParser/XtensaTargetParser.h index b2d642b2d63ef..d4e639005a5a2 100644 --- a/llvm/include/llvm/TargetParser/XtensaTargetParser.h +++ b/llvm/include/llvm/TargetParser/XtensaTargetParser.h @@ -36,28 +36,30 @@ enum FeatureKind : uint64_t { FK_LOOP = 1 << 5, FK_SEXT = 1 << 6, FK_NSA = 1 << 7, - FK_MUL32 = 1 << 8, - FK_MUL32HIGH = 1 << 9, - FK_DIV32 = 1 << 10, - FK_MAC16 = 1 << 11, - FK_DFPACCEL = 1 << 12, - FK_S32C1I = 1 << 13, - FK_THREADPTR = 1 << 14, - FK_EXTENDEDL32R = 1 << 15, - FK_ATOMCTL = 1 << 16, - FK_MEMCTL = 1 << 17, - FK_DEBUG = 1 << 18, - FK_EXCEPTION = 1 << 19, - FK_HIGHPRIINTERRUPTS = 1 << 20, - FK_COPROCESSOR = 1 << 21, - FK_INTERRUPT = 1 << 22, - FK_RVECTOR = 1 << 23, - FK_TIMERINT = 1 << 24, - FK_PRID = 1 << 25, - FK_REGPROTECT = 1 << 26, - FK_MISCSR = 1 << 27, - FK_ESP32S2OPS = 1 << 28, - FK_ESP32S3OPS = 1 << 29 + FK_CLAMPS = 1 << 8, + FK_MINMAX = 1 << 9, + FK_MUL32 = 1 << 10, + FK_MUL32HIGH = 1 << 11, + FK_DIV32 = 1 << 12, + FK_MAC16 = 1 << 13, + FK_DFPACCEL = 1 << 14, + FK_S32C1I = 1 << 15, + FK_THREADPTR = 1 << 16, + FK_EXTENDEDL32R = 1 << 17, + FK_ATOMCTL = 1 << 18, + FK_MEMCTL = 1 << 19, + FK_DEBUG = 1 << 20, + FK_EXCEPTION = 1 << 21, + FK_HIGHPRIINTERRUPTS = 1 << 22, + FK_COPROCESSOR = 1 << 23, + FK_INTERRUPT = 1 << 24, + FK_RVECTOR = 1 << 25, + FK_TIMERINT = 1 << 26, + FK_PRID = 1 << 27, + FK_REGPROTECT = 1 << 28, + FK_MISCSR = 1 << 29, + FK_ESP32S2OPS = 1 << 30, + FK_ESP32S3OPS = 1ULL << 31 }; CPUKind parseCPUKind(StringRef CPU); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index da6e009e74f24..1b1a102e0ee39 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1293,7 +1293,7 @@ def SEXT : RRR_Inst<0x00, 0x03, 0x02, (outs AR:$r), (ins AR:$s, imm7_22:$imm), //===----------------------------------------------------------------------===// def CLAMPS : RRR_Inst<0x00, 0x03, 0x03, (outs AR:$r), (ins AR:$s, imm7_22:$imm), - "clamps\t$r, $s, $imm", []>, Requires<[HasSEXT]> { + "clamps\t$r, $s, $imm", []>, Requires<[HasCLAMPS]> { bits<4> imm; let t = imm; From 4cd5e090d0aadc49c2651f445f93ff2fe5be3d12 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 23:40:27 +0300 Subject: [PATCH 157/289] [Xtensa] Handle musttail --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 3b4dbfe8e00e4..492cfc6824568 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -22,6 +22,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" #include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/DiagnosticInfo.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" @@ -991,6 +992,12 @@ SDValue XtensaTargetLowering::LowerFormalArguments( return Chain; } +static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) { + MachineFunction &MF = DAG.getMachineFunction(); + DAG.getContext()->diagnose( + DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc())); +} + SDValue XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI, SmallVectorImpl &InVals) const { @@ -1010,7 +1017,11 @@ XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI, const TargetFrameLowering *TFL = Subtarget.getFrameLowering(); // TODO: Support tail call optimization. - IsTailCall = false; + if (IsTailCall) { + if (CLI.CB && CLI.CB->isMustTailCall()) + fail(DL, DAG, "tail call is not implemented"); + IsTailCall = false; + } // Analyze the operands of the call, assigning locations to each operand. SmallVector ArgLocs; From d3acb44cfe757f3e00e3c84b0404c1008afbd27b Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 23:43:55 +0300 Subject: [PATCH 158/289] [Xtensa] Implement CTLZ/CTTZ with NSAU Close https://github.com/espressif/llvm-project/pull/77 --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 22 ++++-- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 5 ++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 3 +- llvm/test/CodeGen/Xtensa/ctlz-cttz.ll | 67 +++++++++++++++++++ llvm/test/MC/Xtensa/xtensa-valid-nsa.s | 12 ++++ 5 files changed, 104 insertions(+), 5 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/ctlz-cttz.ll create mode 100644 llvm/test/MC/Xtensa/xtensa-valid-nsa.s diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 492cfc6824568..46d50ff0c91bd 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -192,10 +192,12 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::ROTL, MVT::i32, Expand); setOperationAction(ISD::ROTR, MVT::i32, Expand); setOperationAction(ISD::CTPOP, MVT::i32, Custom); - setOperationAction(ISD::CTTZ, MVT::i32, Expand); - setOperationAction(ISD::CTLZ, MVT::i32, Expand); - setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); - setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Expand); + if (Subtarget.hasNSA()) + setOperationAction(ISD::CTLZ, MVT::i32, Legal); + else + setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Expand); + setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, MVT::i32, Subtarget.hasMINMAX() ? Legal : Expand); @@ -407,6 +409,18 @@ bool XtensaTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, return false; } +bool XtensaTargetLowering::isCheapToSpeculateCtlz(Type *) const { + return Subtarget.hasNSA(); +} + +bool XtensaTargetLowering::isCheapToSpeculateCttz(Type *) const { + return Subtarget.hasNSA(); +} + +bool XtensaTargetLowering::isCtlzFast() const { + return Subtarget.hasNSA(); +} + /// If a physical register, this returns the register that receives the /// exception address on entry to an EH pad. Register XtensaTargetLowering::getExceptionPointerRegister( diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index 2340323ae9e23..f64a0e415dd0a 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -120,6 +120,11 @@ class XtensaTargetLowering : public TargetLowering { EVT VT) const override; bool isFNegFree(EVT VT) const override; + bool isCheapToSpeculateCtlz(Type *Ty) const override; + + bool isCheapToSpeculateCttz(Type *Ty) const override; + + bool isCtlzFast() const override; /// If a physical register, this returns the register that receives the /// exception address on entry to an EH pad. diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 1b1a102e0ee39..13cd99cd9a42c 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1309,7 +1309,8 @@ def NSA : RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), } def NSAU : RRR_Inst<0x00, 0x00, 0x04, (outs AR:$t), (ins AR:$s), - "nsau\t$t, $s", []>, Requires<[HasNSA]> { + "nsau\t$t, $s", + [(set AR:$t, (ctlz AR:$s))]>, Requires<[HasNSA]> { let r = 0xF; } diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll new file mode 100644 index 0000000000000..8008ba354e6ab --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll @@ -0,0 +1,67 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=XTENSA %s + +declare i32 @llvm.ctlz.i32(i32, i1) + +define i32 @test1_ctlz(i32 %v) { +; XTENSA-LABEL: test1_ctlz: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-NEXT: nsau a2, a2 +; XTENSA-NEXT: retw.n + %1 = tail call i32 @llvm.ctlz.i32(i32 %v, i1 false) + ret i32 %1 +} + +define i32 @test2_ctlz(i32 %v) { +; XTENSA-LABEL: test2_ctlz: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-NEXT: nsau a2, a2 +; XTENSA-NEXT: retw.n + %cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true) + %tobool = icmp eq i32 %v, 0 + %cond = select i1 %tobool, i32 32, i32 %cnt + ret i32 %cond +} + +declare i32 @llvm.cttz.i32(i32, i1) + +define i32 @test1_cttz(i32 %v) { +; XTENSA-LABEL: test1_cttz: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-NEXT: movi.n a8, -1 +; XTENSA-NEXT: xor a8, a2, a8 +; XTENSA-NEXT: addi.n a9, a2, -1 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: nsau a8, a8 +; XTENSA-NEXT: movi.n a9, 32 +; XTENSA-NEXT: sub a2, a9, a8 +; XTENSA-NEXT: retw.n + %1 = tail call i32 @llvm.cttz.i32(i32 %v, i1 false) + ret i32 %1 +} + +define i32 @test2_cttz(i32 %v) { +; XTENSA-LABEL: test2_cttz: +; XTENSA: # %bb.0: +; XTENSA-NEXT: entry a1, 32 +; XTENSA-NEXT: .cfi_def_cfa_offset 32 +; XTENSA-NEXT: movi.n a8, -1 +; XTENSA-NEXT: xor a8, a2, a8 +; XTENSA-NEXT: addi.n a9, a2, -1 +; XTENSA-NEXT: and a8, a8, a9 +; XTENSA-NEXT: nsau a8, a8 +; XTENSA-NEXT: movi.n a9, 32 +; XTENSA-NEXT: sub a2, a9, a8 +; XTENSA-NEXT: retw.n + %cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true) + %tobool = icmp eq i32 %v, 0 + %cond = select i1 %tobool, i32 32, i32 %cnt + ret i32 %cond +} diff --git a/llvm/test/MC/Xtensa/xtensa-valid-nsa.s b/llvm/test/MC/Xtensa/xtensa-valid-nsa.s new file mode 100644 index 0000000000000..150818b2dbaa9 --- /dev/null +++ b/llvm/test/MC/Xtensa/xtensa-valid-nsa.s @@ -0,0 +1,12 @@ +# RUN: llvm-mc %s -triple=xtensa -mattr=+nsa -show-encoding \ +# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s + +# Instruction format RRR +# CHECK-INST: nsa a3, a2 +# CHECK: encoding: [0x30,0xe2,0x40] +nsa a3, a2 + +# Instruction format RRR +# CHECK-INST: nsau a3, a2 +# CHECK: encoding: [0x30,0xf2,0x40] +nsau a3, a2 From d6e51cd51746b41a13fdcf27e13eee7aefd864e2 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 20 Aug 2024 23:45:04 +0300 Subject: [PATCH 159/289] [Xtensa] Add spill slot for smaller estimated stack size. --- llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp index de0a50a4c1d35..b0e699cc02b7c 100644 --- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp @@ -20,6 +20,8 @@ #include "llvm/CodeGen/RegisterScavenging.h" #include "llvm/IR/Function.h" +#define STACK_SIZE_THRESHOLD 100 + using namespace llvm; XtensaFrameLowering::XtensaFrameLowering(const XtensaSubtarget &STI) @@ -352,7 +354,8 @@ void XtensaFrameLowering::processFunctionBeforeFrameFinalized( // In WinABI mode add register scavenging slot // FIXME: It may be posssible to add spill slot by more optimal way - if (STI.isWinABI() && (MF.getFrameInfo().estimateStackSize(MF) > 256)) { + if (STI.isWinABI() && + (MF.getFrameInfo().estimateStackSize(MF) > STACK_SIZE_THRESHOLD)) { MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetRegisterClass &RC = Xtensa::ARRegClass; const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); From eec803335fbc34c3f1810381e099084b59142813 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 27 Sep 2023 13:51:47 +0300 Subject: [PATCH 160/289] [Xtensa] Fix decoder namespace for ESP32S3. Set decoder namespace to "ESP32S3" for ESP32S3 instructions to avoid possible decoding conflicts in future. --- .../Disassembler/XtensaDisassembler.cpp | 57 ++++++++++++++++++- llvm/lib/Target/Xtensa/XtensaInstrFormats.td | 2 + 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index e0c9186bbec8c..8ea0991b5040e 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -42,6 +42,10 @@ class XtensaDisassembler : public MCDisassembler { return STI.hasFeature(Xtensa::FeatureDensity); } + bool hasESP32S3Ops() const { + return STI.getFeatureBits()[Xtensa::FeatureESP32S3Ops]; + } + DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size, ArrayRef Bytes, uint64_t Address, raw_ostream &CStream) const override; @@ -787,7 +791,25 @@ static DecodeStatus readInstruction24(ArrayRef Bytes, uint64_t Address, Insn = (Bytes[2] << 16) | (Bytes[1] << 8) | (Bytes[0] << 0); } - Size = 3; + return MCDisassembler::Success; +} + +/// Read three bytes from the ArrayRef and return 32 bit data +static DecodeStatus readInstruction32(ArrayRef Bytes, uint64_t Address, + uint64_t &Size, uint32_t &Insn, + bool IsLittleEndian) { + // We want to read exactly 4 Bytes of data. + if (Bytes.size() < 4) { + Size = 0; + return MCDisassembler::Fail; + } + + if (!IsLittleEndian) { + report_fatal_error("Big-endian mode currently is not supported!"); + } else { + Insn = (Bytes[3] << 24) | (Bytes[2] << 16) | (Bytes[1] << 8) | (Bytes[0] << 0); + } + return MCDisassembler::Success; } @@ -800,6 +822,7 @@ DecodeStatus XtensaDisassembler::getInstruction(MCInst &MI, uint64_t &Size, uint32_t Insn; DecodeStatus Result; + // Parse 16-bit instructions if (hasDensity()) { Result = readInstruction16(Bytes, Address, Size, Insn, IsLittleEndian); if (Result == MCDisassembler::Fail) @@ -812,10 +835,42 @@ DecodeStatus XtensaDisassembler::getInstruction(MCInst &MI, uint64_t &Size, } } + // Parse Core 24-bit instructions Result = readInstruction24(Bytes, Address, Size, Insn, IsLittleEndian); if (Result == MCDisassembler::Fail) return MCDisassembler::Fail; LLVM_DEBUG(dbgs() << "Trying Xtensa 24-bit instruction table :\n"); Result = decodeInstruction(DecoderTable24, MI, Insn, Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 3; + return Result; + } + + if (hasESP32S3Ops()) { + // Parse ESP32S3 24-bit instructions + Result = readInstruction24(Bytes, Address, Size, Insn, IsLittleEndian); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + LLVM_DEBUG(dbgs() << "Trying ESP32S3 table (24-bit opcodes):\n"); + Result = decodeInstruction(DecoderTableESP32S324, MI, Insn, + Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 3; + return Result; + } + + // Parse ESP32S3 32-bit instructions + Result = readInstruction32(Bytes, Address, Size, Insn, IsLittleEndian); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + LLVM_DEBUG(dbgs() << "Trying ESP32S3 table (32-bit opcodes):\n"); + Result = decodeInstruction(DecoderTableESP32S332, MI, Insn, + Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 4; + return Result; + } + } + return Result; } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrFormats.td b/llvm/lib/Target/Xtensa/XtensaInstrFormats.td index beb15c3c5647b..2590c90688b8e 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrFormats.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrFormats.td @@ -213,6 +213,7 @@ class RI6_Inst op0, bits<1> i, bits<1> z, dag outs, dag ins, class EE_Inst24 pattern, InstrItinClass itin = NoItinerary> : XtensaInst24 { + let DecoderNamespace = "ESP32S3"; } class EE_Inst32 pattern, @@ -220,6 +221,7 @@ class EE_Inst32 pattern, : XtensaInst<4, outs, ins, asmstr, pattern, itin> { field bits<32> Inst; field bits<32> SoftFail = 0; + let DecoderNamespace = "ESP32S3"; } // Pseudo instructions From cf29a6aacbcb476850a91cea9265ccac12a29560 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 22 Aug 2023 23:34:08 +0300 Subject: [PATCH 161/289] [Xtensa] Fix hardware loop --- llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp | 43 +++++++++++++++++-- .../lib/Target/Xtensa/XtensaHardwareLoops.cpp | 2 +- 2 files changed, 40 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp b/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp index 7da0886cd072a..eadceace68041 100644 --- a/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFixupHWLoops.cpp @@ -302,10 +302,45 @@ bool XtensaFixupHwLoops::fixupLoopInstrs(MachineLoop *L) { .addMBB(LoopEnd); LoopEnd->addSuccessor(LoopEnd); } else { - BuildMI(*PMBB, PII, DL, TII->get(Xtensa::LOOPEND)).addMBB(PMBB); - PMBB->addSuccessor(PMBB); - BuildMI(*PMBB, PII, DL, TII->get(Xtensa::NOP)); - LoopEnd = PMBB; + bool NeedBlockForJump = false; + // Check for branches to the loop end basic block from + // predecessors + for (auto I = PMBB->pred_begin(), E = PMBB->pred_end(); I != E; + ++I) { + MachineBasicBlock *PLEMBB = *I; + MachineBasicBlock *TBB = nullptr, *FBB = nullptr; + SmallVector Cond; + if (!TII->analyzeBranch(*PLEMBB, TBB, FBB, Cond)) { + if (TBB == PMBB) { + NeedBlockForJump = true; + break; + } + } else { + NeedBlockForJump = true; + break; + } + } + // Create block and insert it before loop end address as + // target for jump/branch instruction to avoid premature exit from + // loop + if (NeedBlockForJump) { + LoopEnd = MF->CreateMachineBasicBlock(); + MF->insert(++(PMBB->getIterator()), LoopEnd); + LoopEnd->transferSuccessors(PMBB); + LoopEnd->splice(LoopEnd->end(), PMBB, PII, PMBB->end()); + PMBB->addSuccessor(LoopEnd); + BuildMI(*PMBB, PMBB->end(), DL, TII->get(Xtensa::NOP)); + + BuildMI(*LoopEnd, LoopEnd->begin(), DL, + TII->get(Xtensa::LOOPEND)) + .addMBB(LoopEnd); + LoopEnd->addSuccessor(LoopEnd); + } else { + BuildMI(*PMBB, PII, DL, TII->get(Xtensa::LOOPEND)).addMBB(PMBB); + PMBB->addSuccessor(PMBB); + BuildMI(*PMBB, PII, DL, TII->get(Xtensa::NOP)); + LoopEnd = PMBB; + } } Changed = true; diff --git a/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp b/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp index baba53e70e9af..4d930692dc833 100644 --- a/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp +++ b/llvm/lib/Target/Xtensa/XtensaHardwareLoops.cpp @@ -360,7 +360,7 @@ bool XtensaHardwareLoops::processLoop(MachineLoop *L) { } bool XtensaHardwareLoops::checkLoopSize(MachineLoop *L) { - uint64_t LoopSize = 0; + uint64_t LoopSize = 3; //Reserve space for possible NOP for (auto *MBB : L->getBlocks()) { uint64_t BlockSize = 0; From 7bf036e2f70668c305d6f235097ca752accedb66 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 21 Aug 2024 08:44:15 +0300 Subject: [PATCH 162/289] [Xtensa] Fix _FAST_ int types. --- clang/include/clang/Basic/LangOptions.def | 2 + clang/include/clang/Driver/Options.td | 5 + clang/lib/Driver/ToolChains/Clang.cpp | 3 + clang/lib/Frontend/InitPreprocessor.cpp | 26 +- clang/test/Preprocessor/init.c | 606 ++++++++++++++++++++++ 5 files changed, 632 insertions(+), 10 deletions(-) diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def index 834a6f6cd43e3..7bb871fb777df 100644 --- a/clang/include/clang/Basic/LangOptions.def +++ b/clang/include/clang/Basic/LangOptions.def @@ -493,6 +493,8 @@ LANGOPT(RelativeCXXABIVTables, 1, 0, LANGOPT(OmitVTableRTTI, 1, 0, "Use an ABI-incompatible v-table layout that omits the RTTI component") +LANGOPT(FastIntMin32, 1, 0, "Minimum width of _FAST_ int type") + LANGOPT(VScaleMin, 32, 0, "Minimum vscale value") LANGOPT(VScaleMax, 32, 0, "Maximum vscale value") diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 8260f21144b2c..33ee267518819 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -6407,6 +6407,11 @@ def mfix_esp32_psram_cache_strategy_EQ : Joined<["-"], "mfix-esp32-psram-cache-s Values<"memw, nops">; def mlongcalls : Flag<["-"], "mlongcalls">, Group; def mtext_section_literals : Flag<["-"], "mtext-section-literals">, Group; +def mfast_int_min32 : Flag<["-"], "mfast-int-min32">, Group, + Flags<[NoXarchOption]>, + Visibility<[ClangOption, CC1Option]>, + HelpText<"Set bit width of the _FAST_ int type to 32">, + MarshallingInfoFlag>; // These are legacy user-facing driver-level option spellings. They are always // aliases for options that are spelled using the more common Unix / GNU flag diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 50f8dd593f7a3..b02a1db5d9520 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -2449,6 +2449,9 @@ void Clang::AddXtensaTargetArgs(const ArgList &Args, CmdArgs.push_back("-mllvm"); CmdArgs.push_back("-mtext-section-literals"); } + + if (Args.getLastArg(options::OPT_mfast_int_min32)) + CmdArgs.push_back("-mfast-int-min32"); } void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename, diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp index 3ed7243deba8a..f6c4541167654 100644 --- a/clang/lib/Frontend/InitPreprocessor.cpp +++ b/clang/lib/Frontend/InitPreprocessor.cpp @@ -293,10 +293,13 @@ static void DefineLeastWidthIntType(const LangOptions &LangOpts, static void DefineFastIntType(const LangOptions &LangOpts, unsigned TypeWidth, bool IsSigned, const TargetInfo &TI, - MacroBuilder &Builder) { + MacroBuilder &Builder, unsigned MinFastTypeWidth) { + if (MinFastTypeWidth < TypeWidth) + MinFastTypeWidth = TypeWidth; // stdint.h currently defines the fast int types as equivalent to the least // types. - TargetInfo::IntType Ty = TI.getLeastIntTypeByWidth(TypeWidth, IsSigned); + TargetInfo::IntType Ty = + TI.getLeastIntTypeByWidth(MinFastTypeWidth, IsSigned); if (Ty == TargetInfo::NoInt) return; @@ -1305,14 +1308,17 @@ static void InitializePredefinedMacros(const TargetInfo &TI, DefineLeastWidthIntType(LangOpts, 64, true, TI, Builder); DefineLeastWidthIntType(LangOpts, 64, false, TI, Builder); - DefineFastIntType(LangOpts, 8, true, TI, Builder); - DefineFastIntType(LangOpts, 8, false, TI, Builder); - DefineFastIntType(LangOpts, 16, true, TI, Builder); - DefineFastIntType(LangOpts, 16, false, TI, Builder); - DefineFastIntType(LangOpts, 32, true, TI, Builder); - DefineFastIntType(LangOpts, 32, false, TI, Builder); - DefineFastIntType(LangOpts, 64, true, TI, Builder); - DefineFastIntType(LangOpts, 64, false, TI, Builder); + unsigned MinFastTypeWidth = 8; + if (LangOpts.FastIntMin32) + MinFastTypeWidth = 32; + DefineFastIntType(LangOpts, 8, true, TI, Builder, MinFastTypeWidth); + DefineFastIntType(LangOpts, 8, false, TI, Builder, MinFastTypeWidth); + DefineFastIntType(LangOpts, 16, true, TI, Builder, MinFastTypeWidth); + DefineFastIntType(LangOpts, 16, false, TI, Builder, MinFastTypeWidth); + DefineFastIntType(LangOpts, 32, true, TI, Builder, MinFastTypeWidth); + DefineFastIntType(LangOpts, 32, false, TI, Builder, MinFastTypeWidth); + DefineFastIntType(LangOpts, 64, true, TI, Builder, MinFastTypeWidth); + DefineFastIntType(LangOpts, 64, false, TI, Builder, MinFastTypeWidth); Builder.defineMacro("__USER_LABEL_PREFIX__", TI.getUserLabelPrefix()); diff --git a/clang/test/Preprocessor/init.c b/clang/test/Preprocessor/init.c index c177975114332..8605cef40e4c4 100644 --- a/clang/test/Preprocessor/init.c +++ b/clang/test/Preprocessor/init.c @@ -2742,3 +2742,609 @@ // RISCV64-LINUX: #define __unix__ 1 // RISCV64-LINUX: #define linux 1 // RISCV64-LINUX: #define unix 1 + +// RUN: %clang_cc1 -E -dM -ffreestanding -triple=xtensa-esp-unknown-elf < /dev/null \ +// RUN: | FileCheck -match-full-lines -check-prefix=XTENSA %s +// XTENSA: #define _ILP32 1 +// XTENSA: #define __ATOMIC_ACQUIRE 2 +// XTENSA: #define __ATOMIC_ACQ_REL 4 +// XTENSA: #define __ATOMIC_CONSUME 1 +// XTENSA: #define __ATOMIC_RELAXED 0 +// XTENSA: #define __ATOMIC_RELEASE 3 +// XTENSA: #define __ATOMIC_SEQ_CST 5 +// XTENSA: #define __BIGGEST_ALIGNMENT__ 4 +// XTENSA: #define __BITINT_MAXWIDTH__ 128 +// XTENSA: #define __BOOL_WIDTH__ 8 +// XTENSA: #define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__ +// XTENSA: #define __CHAR16_TYPE__ unsigned short +// XTENSA: #define __CHAR32_TYPE__ unsigned int +// XTENSA: #define __CHAR_BIT__ 8 +// XTENSA: #define __CLANG_ATOMIC_BOOL_LOCK_FREE 2 +// XTENSA: #define __CLANG_ATOMIC_CHAR16_T_LOCK_FREE 2 +// XTENSA: #define __CLANG_ATOMIC_CHAR32_T_LOCK_FREE 2 +// XTENSA: #define __CLANG_ATOMIC_CHAR_LOCK_FREE 2 +// XTENSA: #define __CLANG_ATOMIC_INT_LOCK_FREE 2 +// XTENSA: #define __CLANG_ATOMIC_LLONG_LOCK_FREE 1 +// XTENSA: #define __CLANG_ATOMIC_LONG_LOCK_FREE 2 +// XTENSA: #define __CLANG_ATOMIC_POINTER_LOCK_FREE 2 +// XTENSA: #define __CLANG_ATOMIC_SHORT_LOCK_FREE 2 +// XTENSA: #define __CLANG_ATOMIC_WCHAR_T_LOCK_FREE 2 +// XTENSA: #define __CONSTANT_CFSTRINGS__ 1 +// XTENSA: #define __DBL_DECIMAL_DIG__ 17 +// XTENSA: #define __DBL_DENORM_MIN__ 4.9406564584124654e-324 +// XTENSA: #define __DBL_DIG__ 15 +// XTENSA: #define __DBL_EPSILON__ 2.2204460492503131e-16 +// XTENSA: #define __DBL_HAS_DENORM__ 1 +// XTENSA: #define __DBL_HAS_INFINITY__ 1 +// XTENSA: #define __DBL_HAS_QUIET_NAN__ 1 +// XTENSA: #define __DBL_MANT_DIG__ 53 +// XTENSA: #define __DBL_MAX_10_EXP__ 308 +// XTENSA: #define __DBL_MAX_EXP__ 1024 +// XTENSA: #define __DBL_MAX__ 1.7976931348623157e+308 +// XTENSA: #define __DBL_MIN_10_EXP__ (-307) +// XTENSA: #define __DBL_MIN_EXP__ (-1021) +// XTENSA: #define __DBL_MIN__ 2.2250738585072014e-308 +// XTENSA: #define __DECIMAL_DIG__ __LDBL_DECIMAL_DIG__ +// XTENSA: #define __ELF__ 1 +// XTENSA: #define __FINITE_MATH_ONLY__ 0 +// XTENSA: #define __FLT_DECIMAL_DIG__ 9 +// XTENSA: #define __FLT_DENORM_MIN__ 1.40129846e-45F +// XTENSA: #define __FLT_DIG__ 6 +// XTENSA: #define __FLT_EPSILON__ 1.19209290e-7F +// XTENSA: #define __FLT_HAS_DENORM__ 1 +// XTENSA: #define __FLT_HAS_INFINITY__ 1 +// XTENSA: #define __FLT_HAS_QUIET_NAN__ 1 +// XTENSA: #define __FLT_MANT_DIG__ 24 +// XTENSA: #define __FLT_MAX_10_EXP__ 38 +// XTENSA: #define __FLT_MAX_EXP__ 128 +// XTENSA: #define __FLT_MAX__ 3.40282347e+38F +// XTENSA: #define __FLT_MIN_10_EXP__ (-37) +// XTENSA: #define __FLT_MIN_EXP__ (-125) +// XTENSA: #define __FLT_MIN__ 1.17549435e-38F +// XTENSA: #define __FLT_RADIX__ 2 +// XTENSA: #define __ILP32__ 1 +// XTENSA: #define __INT16_C_SUFFIX__ +// XTENSA: #define __INT16_FMTd__ "hd" +// XTENSA: #define __INT16_FMTi__ "hi" +// XTENSA: #define __INT16_MAX__ 32767 +// XTENSA: #define __INT16_TYPE__ short +// XTENSA: #define __INT32_C_SUFFIX__ +// XTENSA: #define __INT32_FMTd__ "d" +// XTENSA: #define __INT32_FMTi__ "i" +// XTENSA: #define __INT32_MAX__ 2147483647 +// XTENSA: #define __INT32_TYPE__ int +// XTENSA: #define __INT64_C_SUFFIX__ LL +// XTENSA: #define __INT64_FMTd__ "lld" +// XTENSA: #define __INT64_FMTi__ "lli" +// XTENSA: #define __INT64_MAX__ 9223372036854775807LL +// XTENSA: #define __INT64_TYPE__ long long int +// XTENSA: #define __INT8_C_SUFFIX__ +// XTENSA: #define __INT8_FMTd__ "hhd" +// XTENSA: #define __INT8_FMTi__ "hhi" +// XTENSA: #define __INT8_MAX__ 127 +// XTENSA: #define __INT8_TYPE__ signed char +// XTENSA: #define __INTMAX_C_SUFFIX__ LL +// XTENSA: #define __INTMAX_FMTd__ "lld" +// XTENSA: #define __INTMAX_FMTi__ "lli" +// XTENSA: #define __INTMAX_MAX__ 9223372036854775807LL +// XTENSA: #define __INTMAX_TYPE__ long long int +// XTENSA: #define __INTMAX_WIDTH__ 64 +// XTENSA: #define __INTPTR_FMTd__ "d" +// XTENSA: #define __INTPTR_FMTi__ "i" +// XTENSA: #define __INTPTR_MAX__ 2147483647 +// XTENSA: #define __INTPTR_TYPE__ int +// XTENSA: #define __INTPTR_WIDTH__ 32 +// XTENSA: #define __INT_FAST16_FMTd__ "hd" +// XTENSA: #define __INT_FAST16_FMTi__ "hi" +// XTENSA: #define __INT_FAST16_MAX__ 32767 +// XTENSA: #define __INT_FAST16_TYPE__ short +// XTENSA: #define __INT_FAST16_WIDTH__ 16 +// XTENSA: #define __INT_FAST32_FMTd__ "d" +// XTENSA: #define __INT_FAST32_FMTi__ "i" +// XTENSA: #define __INT_FAST32_MAX__ 2147483647 +// XTENSA: #define __INT_FAST32_TYPE__ int +// XTENSA: #define __INT_FAST32_WIDTH__ 32 +// XTENSA: #define __INT_FAST64_FMTd__ "lld" +// XTENSA: #define __INT_FAST64_FMTi__ "lli" +// XTENSA: #define __INT_FAST64_MAX__ 9223372036854775807LL +// XTENSA: #define __INT_FAST64_TYPE__ long long int +// XTENSA: #define __INT_FAST64_WIDTH__ 64 +// XTENSA: #define __INT_FAST8_FMTd__ "hhd" +// XTENSA: #define __INT_FAST8_FMTi__ "hhi" +// XTENSA: #define __INT_FAST8_MAX__ 127 +// XTENSA: #define __INT_FAST8_TYPE__ signed char +// XTENSA: #define __INT_FAST8_WIDTH__ 8 +// XTENSA: #define __INT_LEAST16_FMTd__ "hd" +// XTENSA: #define __INT_LEAST16_FMTi__ "hi" +// XTENSA: #define __INT_LEAST16_MAX__ 32767 +// XTENSA: #define __INT_LEAST16_TYPE__ short +// XTENSA: #define __INT_LEAST16_WIDTH__ 16 +// XTENSA: #define __INT_LEAST32_FMTd__ "d" +// XTENSA: #define __INT_LEAST32_FMTi__ "i" +// XTENSA: #define __INT_LEAST32_MAX__ 2147483647 +// XTENSA: #define __INT_LEAST32_TYPE__ int +// XTENSA: #define __INT_LEAST32_WIDTH__ 32 +// XTENSA: #define __INT_LEAST64_FMTd__ "lld" +// XTENSA: #define __INT_LEAST64_FMTi__ "lli" +// XTENSA: #define __INT_LEAST64_MAX__ 9223372036854775807LL +// XTENSA: #define __INT_LEAST64_TYPE__ long long int +// XTENSA: #define __INT_LEAST64_WIDTH__ 64 +// XTENSA: #define __INT_LEAST8_FMTd__ "hhd" +// XTENSA: #define __INT_LEAST8_FMTi__ "hhi" +// XTENSA: #define __INT_LEAST8_MAX__ 127 +// XTENSA: #define __INT_LEAST8_TYPE__ signed char +// XTENSA: #define __INT_LEAST8_WIDTH__ 8 +// XTENSA: #define __INT_MAX__ 2147483647 +// XTENSA: #define __INT_WIDTH__ 32 +// XTENSA: #define __LDBL_DECIMAL_DIG__ 17 +// XTENSA: #define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L +// XTENSA: #define __LDBL_DIG__ 15 +// XTENSA: #define __LDBL_EPSILON__ 2.2204460492503131e-16L +// XTENSA: #define __LDBL_HAS_DENORM__ 1 +// XTENSA: #define __LDBL_HAS_INFINITY__ 1 +// XTENSA: #define __LDBL_HAS_QUIET_NAN__ 1 +// XTENSA: #define __LDBL_MANT_DIG__ 53 +// XTENSA: #define __LDBL_MAX_10_EXP__ 308 +// XTENSA: #define __LDBL_MAX_EXP__ 1024 +// XTENSA: #define __LDBL_MAX__ 1.7976931348623157e+308L +// XTENSA: #define __LDBL_MIN_10_EXP__ (-307) +// XTENSA: #define __LDBL_MIN_EXP__ (-1021) +// XTENSA: #define __LDBL_MIN__ 2.2250738585072014e-308L +// XTENSA: #define __LITTLE_ENDIAN__ 1 +// XTENSA: #define __LLONG_WIDTH__ 64 +// XTENSA: #define __LONG_LONG_MAX__ 9223372036854775807LL +// XTENSA: #define __LONG_MAX__ 2147483647L +// XTENSA: #define __LONG_WIDTH__ 32 +// XTENSA: #define __NO_INLINE__ 1 +// XTENSA: #define __NO_MATH_ERRNO__ 1 +// XTENSA: #define __OBJC_BOOL_IS_BOOL 0 +// XTENSA: #define __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES 3 +// XTENSA: #define __OPENCL_MEMORY_SCOPE_DEVICE 2 +// XTENSA: #define __OPENCL_MEMORY_SCOPE_SUB_GROUP 4 +// XTENSA: #define __OPENCL_MEMORY_SCOPE_WORK_GROUP 1 +// XTENSA: #define __OPENCL_MEMORY_SCOPE_WORK_ITEM 0 +// XTENSA: #define __ORDER_BIG_ENDIAN__ 4321 +// XTENSA: #define __ORDER_LITTLE_ENDIAN__ 1234 +// XTENSA: #define __ORDER_PDP_ENDIAN__ 3412 +// XTENSA: #define __POINTER_WIDTH__ 32 +// XTENSA: #define __PRAGMA_REDEFINE_EXTNAME 1 +// XTENSA: #define __PTRDIFF_FMTd__ "d" +// XTENSA: #define __PTRDIFF_FMTi__ "i" +// XTENSA: #define __PTRDIFF_MAX__ 2147483647 +// XTENSA: #define __PTRDIFF_TYPE__ int +// XTENSA: #define __PTRDIFF_WIDTH__ 32 +// XTENSA: #define __SCHAR_MAX__ 127 +// XTENSA: #define __SHRT_MAX__ 32767 +// XTENSA: #define __SHRT_WIDTH__ 16 +// XTENSA: #define __SIG_ATOMIC_MAX__ 2147483647 +// XTENSA: #define __SIG_ATOMIC_WIDTH__ 32 +// XTENSA: #define __SIZEOF_DOUBLE__ 8 +// XTENSA: #define __SIZEOF_FLOAT__ 4 +// XTENSA: #define __SIZEOF_INT__ 4 +// XTENSA: #define __SIZEOF_LONG_DOUBLE__ 8 +// XTENSA: #define __SIZEOF_LONG_LONG__ 8 +// XTENSA: #define __SIZEOF_LONG__ 4 +// XTENSA: #define __SIZEOF_POINTER__ 4 +// XTENSA: #define __SIZEOF_PTRDIFF_T__ 4 +// XTENSA: #define __SIZEOF_SHORT__ 2 +// XTENSA: #define __SIZEOF_SIZE_T__ 4 +// XTENSA: #define __SIZEOF_WCHAR_T__ 1 +// XTENSA: #define __SIZEOF_WINT_T__ 4 +// XTENSA: #define __SIZE_FMTX__ "X" +// XTENSA: #define __SIZE_FMTo__ "o" +// XTENSA: #define __SIZE_FMTu__ "u" +// XTENSA: #define __SIZE_FMTx__ "x" +// XTENSA: #define __SIZE_MAX__ 4294967295U +// XTENSA: #define __SIZE_TYPE__ unsigned int +// XTENSA: #define __SIZE_WIDTH__ 32 +// XTENSA: #define __STDC_HOSTED__ 0 +// XTENSA: #define __STDC_UTF_16__ 1 +// XTENSA: #define __STDC_UTF_32__ 1 +// XTENSA: #define __STDC_VERSION__ 201710L +// XTENSA: #define __STDC__ 1 +// XTENSA: #define __UINT16_C_SUFFIX__ +// XTENSA: #define __UINT16_FMTX__ "hX" +// XTENSA: #define __UINT16_FMTo__ "ho" +// XTENSA: #define __UINT16_FMTu__ "hu" +// XTENSA: #define __UINT16_FMTx__ "hx" +// XTENSA: #define __UINT16_MAX__ 65535 +// XTENSA: #define __UINT16_TYPE__ unsigned short +// XTENSA: #define __UINT32_C_SUFFIX__ U +// XTENSA: #define __UINT32_FMTX__ "X" +// XTENSA: #define __UINT32_FMTo__ "o" +// XTENSA: #define __UINT32_FMTu__ "u" +// XTENSA: #define __UINT32_FMTx__ "x" +// XTENSA: #define __UINT32_MAX__ 4294967295U +// XTENSA: #define __UINT32_TYPE__ unsigned int +// XTENSA: #define __UINT64_C_SUFFIX__ ULL +// XTENSA: #define __UINT64_FMTX__ "llX" +// XTENSA: #define __UINT64_FMTo__ "llo" +// XTENSA: #define __UINT64_FMTu__ "llu" +// XTENSA: #define __UINT64_FMTx__ "llx" +// XTENSA: #define __UINT64_MAX__ 18446744073709551615ULL +// XTENSA: #define __UINT64_TYPE__ long long unsigned int +// XTENSA: #define __UINT8_C_SUFFIX__ +// XTENSA: #define __UINT8_FMTX__ "hhX" +// XTENSA: #define __UINT8_FMTo__ "hho" +// XTENSA: #define __UINT8_FMTu__ "hhu" +// XTENSA: #define __UINT8_FMTx__ "hhx" +// XTENSA: #define __UINT8_MAX__ 255 +// XTENSA: #define __UINT8_TYPE__ unsigned char +// XTENSA: #define __UINTMAX_C_SUFFIX__ ULL +// XTENSA: #define __UINTMAX_FMTX__ "llX" +// XTENSA: #define __UINTMAX_FMTo__ "llo" +// XTENSA: #define __UINTMAX_FMTu__ "llu" +// XTENSA: #define __UINTMAX_FMTx__ "llx" +// XTENSA: #define __UINTMAX_MAX__ 18446744073709551615ULL +// XTENSA: #define __UINTMAX_TYPE__ long long unsigned int +// XTENSA: #define __UINTMAX_WIDTH__ 64 +// XTENSA: #define __UINTPTR_FMTX__ "X" +// XTENSA: #define __UINTPTR_FMTo__ "o" +// XTENSA: #define __UINTPTR_FMTu__ "u" +// XTENSA: #define __UINTPTR_FMTx__ "x" +// XTENSA: #define __UINTPTR_MAX__ 4294967295U +// XTENSA: #define __UINTPTR_TYPE__ unsigned int +// XTENSA: #define __UINTPTR_WIDTH__ 32 +// XTENSA: #define __UINT_FAST16_FMTX__ "hX" +// XTENSA: #define __UINT_FAST16_FMTo__ "ho" +// XTENSA: #define __UINT_FAST16_FMTu__ "hu" +// XTENSA: #define __UINT_FAST16_FMTx__ "hx" +// XTENSA: #define __UINT_FAST16_MAX__ 65535 +// XTENSA: #define __UINT_FAST16_TYPE__ unsigned short +// XTENSA: #define __UINT_FAST32_FMTX__ "X" +// XTENSA: #define __UINT_FAST32_FMTo__ "o" +// XTENSA: #define __UINT_FAST32_FMTu__ "u" +// XTENSA: #define __UINT_FAST32_FMTx__ "x" +// XTENSA: #define __UINT_FAST32_MAX__ 4294967295U +// XTENSA: #define __UINT_FAST32_TYPE__ unsigned int +// XTENSA: #define __UINT_FAST64_FMTX__ "llX" +// XTENSA: #define __UINT_FAST64_FMTo__ "llo" +// XTENSA: #define __UINT_FAST64_FMTu__ "llu" +// XTENSA: #define __UINT_FAST64_FMTx__ "llx" +// XTENSA: #define __UINT_FAST64_MAX__ 18446744073709551615ULL +// XTENSA: #define __UINT_FAST64_TYPE__ long long unsigned int +// XTENSA: #define __UINT_FAST8_FMTX__ "hhX" +// XTENSA: #define __UINT_FAST8_FMTo__ "hho" +// XTENSA: #define __UINT_FAST8_FMTu__ "hhu" +// XTENSA: #define __UINT_FAST8_FMTx__ "hhx" +// XTENSA: #define __UINT_FAST8_MAX__ 255 +// XTENSA: #define __UINT_FAST8_TYPE__ unsigned char +// XTENSA: #define __UINT_LEAST16_FMTX__ "hX" +// XTENSA: #define __UINT_LEAST16_FMTo__ "ho" +// XTENSA: #define __UINT_LEAST16_FMTu__ "hu" +// XTENSA: #define __UINT_LEAST16_FMTx__ "hx" +// XTENSA: #define __UINT_LEAST16_MAX__ 65535 +// XTENSA: #define __UINT_LEAST16_TYPE__ unsigned short +// XTENSA: #define __UINT_LEAST32_FMTX__ "X" +// XTENSA: #define __UINT_LEAST32_FMTo__ "o" +// XTENSA: #define __UINT_LEAST32_FMTu__ "u" +// XTENSA: #define __UINT_LEAST32_FMTx__ "x" +// XTENSA: #define __UINT_LEAST32_MAX__ 4294967295U +// XTENSA: #define __UINT_LEAST32_TYPE__ unsigned int +// XTENSA: #define __UINT_LEAST64_FMTX__ "llX" +// XTENSA: #define __UINT_LEAST64_FMTo__ "llo" +// XTENSA: #define __UINT_LEAST64_FMTu__ "llu" +// XTENSA: #define __UINT_LEAST64_FMTx__ "llx" +// XTENSA: #define __UINT_LEAST64_MAX__ 18446744073709551615ULL +// XTENSA: #define __UINT_LEAST64_TYPE__ long long unsigned int +// XTENSA: #define __UINT_LEAST8_FMTX__ "hhX" +// XTENSA: #define __UINT_LEAST8_FMTo__ "hho" +// XTENSA: #define __UINT_LEAST8_FMTu__ "hhu" +// XTENSA: #define __UINT_LEAST8_FMTx__ "hhx" +// XTENSA: #define __UINT_LEAST8_MAX__ 255 +// XTENSA: #define __UINT_LEAST8_TYPE__ unsigned char +// XTENSA: #define __WCHAR_MAX__ 255 +// XTENSA: #define __WCHAR_TYPE__ unsigned char +// XTENSA: #define __WCHAR_UNSIGNED__ 1 +// XTENSA: #define __WCHAR_WIDTH__ 8 +// XTENSA: #define __WINT_MAX__ 4294967295U +// XTENSA: #define __WINT_TYPE__ unsigned int +// XTENSA: #define __WINT_UNSIGNED__ 1 +// XTENSA: #define __WINT_WIDTH__ 32 +// XTENSA: #define __XTENSA_EL__ 1 +// XTENSA: #define __XTENSA_WINDOWED_ABI__ 1 +// XTENSA: #define __XTENSA__ 1 +// XTENSA: #define __xtensa__ 1 + +// RUN: %clang_cc1 -E -dM -ffreestanding -triple=xtensa-esp-unknown-elf -mfast-int-min32 < /dev/null \ +// RUN: | FileCheck -match-full-lines -check-prefix=XTENSA_FAST32 %s +// XTENSA_FAST32: #define _ILP32 1 +// XTENSA_FAST32: #define __ATOMIC_ACQUIRE 2 +// XTENSA_FAST32: #define __ATOMIC_ACQ_REL 4 +// XTENSA_FAST32: #define __ATOMIC_CONSUME 1 +// XTENSA_FAST32: #define __ATOMIC_RELAXED 0 +// XTENSA_FAST32: #define __ATOMIC_RELEASE 3 +// XTENSA_FAST32: #define __ATOMIC_SEQ_CST 5 +// XTENSA_FAST32: #define __BIGGEST_ALIGNMENT__ 4 +// XTENSA_FAST32: #define __BITINT_MAXWIDTH__ 128 +// XTENSA_FAST32: #define __BOOL_WIDTH__ 8 +// XTENSA_FAST32: #define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__ +// XTENSA_FAST32: #define __CHAR16_TYPE__ unsigned short +// XTENSA_FAST32: #define __CHAR32_TYPE__ unsigned int +// XTENSA_FAST32: #define __CHAR_BIT__ 8 +// XTENSA_FAST32: #define __CLANG_ATOMIC_BOOL_LOCK_FREE 2 +// XTENSA_FAST32: #define __CLANG_ATOMIC_CHAR16_T_LOCK_FREE 2 +// XTENSA_FAST32: #define __CLANG_ATOMIC_CHAR32_T_LOCK_FREE 2 +// XTENSA_FAST32: #define __CLANG_ATOMIC_CHAR_LOCK_FREE 2 +// XTENSA_FAST32: #define __CLANG_ATOMIC_INT_LOCK_FREE 2 +// XTENSA_FAST32: #define __CLANG_ATOMIC_LLONG_LOCK_FREE 1 +// XTENSA_FAST32: #define __CLANG_ATOMIC_LONG_LOCK_FREE 2 +// XTENSA_FAST32: #define __CLANG_ATOMIC_POINTER_LOCK_FREE 2 +// XTENSA_FAST32: #define __CLANG_ATOMIC_SHORT_LOCK_FREE 2 +// XTENSA_FAST32: #define __CLANG_ATOMIC_WCHAR_T_LOCK_FREE 2 +// XTENSA_FAST32: #define __CONSTANT_CFSTRINGS__ 1 +// XTENSA_FAST32: #define __DBL_DECIMAL_DIG__ 17 +// XTENSA_FAST32: #define __DBL_DENORM_MIN__ 4.9406564584124654e-324 +// XTENSA_FAST32: #define __DBL_DIG__ 15 +// XTENSA_FAST32: #define __DBL_EPSILON__ 2.2204460492503131e-16 +// XTENSA_FAST32: #define __DBL_HAS_DENORM__ 1 +// XTENSA_FAST32: #define __DBL_HAS_INFINITY__ 1 +// XTENSA_FAST32: #define __DBL_HAS_QUIET_NAN__ 1 +// XTENSA_FAST32: #define __DBL_MANT_DIG__ 53 +// XTENSA_FAST32: #define __DBL_MAX_10_EXP__ 308 +// XTENSA_FAST32: #define __DBL_MAX_EXP__ 1024 +// XTENSA_FAST32: #define __DBL_MAX__ 1.7976931348623157e+308 +// XTENSA_FAST32: #define __DBL_MIN_10_EXP__ (-307) +// XTENSA_FAST32: #define __DBL_MIN_EXP__ (-1021) +// XTENSA_FAST32: #define __DBL_MIN__ 2.2250738585072014e-308 +// XTENSA_FAST32: #define __DECIMAL_DIG__ __LDBL_DECIMAL_DIG__ +// XTENSA_FAST32: #define __ELF__ 1 +// XTENSA_FAST32: #define __FINITE_MATH_ONLY__ 0 +// XTENSA_FAST32: #define __FLT_DECIMAL_DIG__ 9 +// XTENSA_FAST32: #define __FLT_DENORM_MIN__ 1.40129846e-45F +// XTENSA_FAST32: #define __FLT_DIG__ 6 +// XTENSA_FAST32: #define __FLT_EPSILON__ 1.19209290e-7F +// XTENSA_FAST32: #define __FLT_HAS_DENORM__ 1 +// XTENSA_FAST32: #define __FLT_HAS_INFINITY__ 1 +// XTENSA_FAST32: #define __FLT_HAS_QUIET_NAN__ 1 +// XTENSA_FAST32: #define __FLT_MANT_DIG__ 24 +// XTENSA_FAST32: #define __FLT_MAX_10_EXP__ 38 +// XTENSA_FAST32: #define __FLT_MAX_EXP__ 128 +// XTENSA_FAST32: #define __FLT_MAX__ 3.40282347e+38F +// XTENSA_FAST32: #define __FLT_MIN_10_EXP__ (-37) +// XTENSA_FAST32: #define __FLT_MIN_EXP__ (-125) +// XTENSA_FAST32: #define __FLT_MIN__ 1.17549435e-38F +// XTENSA_FAST32: #define __FLT_RADIX__ 2 +// XTENSA_FAST32: #define __ILP32__ 1 +// XTENSA_FAST32: #define __INT16_C_SUFFIX__ +// XTENSA_FAST32: #define __INT16_FMTd__ "hd" +// XTENSA_FAST32: #define __INT16_FMTi__ "hi" +// XTENSA_FAST32: #define __INT16_MAX__ 32767 +// XTENSA_FAST32: #define __INT16_TYPE__ short +// XTENSA_FAST32: #define __INT32_C_SUFFIX__ +// XTENSA_FAST32: #define __INT32_FMTd__ "d" +// XTENSA_FAST32: #define __INT32_FMTi__ "i" +// XTENSA_FAST32: #define __INT32_MAX__ 2147483647 +// XTENSA_FAST32: #define __INT32_TYPE__ int +// XTENSA_FAST32: #define __INT64_C_SUFFIX__ LL +// XTENSA_FAST32: #define __INT64_FMTd__ "lld" +// XTENSA_FAST32: #define __INT64_FMTi__ "lli" +// XTENSA_FAST32: #define __INT64_MAX__ 9223372036854775807LL +// XTENSA_FAST32: #define __INT64_TYPE__ long long int +// XTENSA_FAST32: #define __INT8_C_SUFFIX__ +// XTENSA_FAST32: #define __INT8_FMTd__ "hhd" +// XTENSA_FAST32: #define __INT8_FMTi__ "hhi" +// XTENSA_FAST32: #define __INT8_MAX__ 127 +// XTENSA_FAST32: #define __INT8_TYPE__ signed char +// XTENSA_FAST32: #define __INTMAX_C_SUFFIX__ LL +// XTENSA_FAST32: #define __INTMAX_FMTd__ "lld" +// XTENSA_FAST32: #define __INTMAX_FMTi__ "lli" +// XTENSA_FAST32: #define __INTMAX_MAX__ 9223372036854775807LL +// XTENSA_FAST32: #define __INTMAX_TYPE__ long long int +// XTENSA_FAST32: #define __INTMAX_WIDTH__ 64 +// XTENSA_FAST32: #define __INTPTR_FMTd__ "d" +// XTENSA_FAST32: #define __INTPTR_FMTi__ "i" +// XTENSA_FAST32: #define __INTPTR_MAX__ 2147483647 +// XTENSA_FAST32: #define __INTPTR_TYPE__ int +// XTENSA_FAST32: #define __INTPTR_WIDTH__ 32 +// XTENSA_FAST32: #define __INT_FAST16_FMTd__ "d" +// XTENSA_FAST32: #define __INT_FAST16_FMTi__ "i" +// XTENSA_FAST32: #define __INT_FAST16_MAX__ 2147483647 +// XTENSA_FAST32: #define __INT_FAST16_TYPE__ int +// XTENSA_FAST32: #define __INT_FAST16_WIDTH__ 32 +// XTENSA_FAST32: #define __INT_FAST32_FMTd__ "d" +// XTENSA_FAST32: #define __INT_FAST32_FMTi__ "i" +// XTENSA_FAST32: #define __INT_FAST32_MAX__ 2147483647 +// XTENSA_FAST32: #define __INT_FAST32_TYPE__ int +// XTENSA_FAST32: #define __INT_FAST32_WIDTH__ 32 +// XTENSA_FAST32: #define __INT_FAST64_FMTd__ "lld" +// XTENSA_FAST32: #define __INT_FAST64_FMTi__ "lli" +// XTENSA_FAST32: #define __INT_FAST64_MAX__ 9223372036854775807LL +// XTENSA_FAST32: #define __INT_FAST64_TYPE__ long long int +// XTENSA_FAST32: #define __INT_FAST64_WIDTH__ 64 +// XTENSA_FAST32: #define __INT_FAST8_FMTd__ "d" +// XTENSA_FAST32: #define __INT_FAST8_FMTi__ "i" +// XTENSA_FAST32: #define __INT_FAST8_MAX__ 2147483647 +// XTENSA_FAST32: #define __INT_FAST8_TYPE__ int +// XTENSA_FAST32: #define __INT_FAST8_WIDTH__ 32 +// XTENSA_FAST32: #define __INT_LEAST16_FMTd__ "hd" +// XTENSA_FAST32: #define __INT_LEAST16_FMTi__ "hi" +// XTENSA_FAST32: #define __INT_LEAST16_MAX__ 32767 +// XTENSA_FAST32: #define __INT_LEAST16_TYPE__ short +// XTENSA_FAST32: #define __INT_LEAST16_WIDTH__ 16 +// XTENSA_FAST32: #define __INT_LEAST32_FMTd__ "d" +// XTENSA_FAST32: #define __INT_LEAST32_FMTi__ "i" +// XTENSA_FAST32: #define __INT_LEAST32_MAX__ 2147483647 +// XTENSA_FAST32: #define __INT_LEAST32_TYPE__ int +// XTENSA_FAST32: #define __INT_LEAST32_WIDTH__ 32 +// XTENSA_FAST32: #define __INT_LEAST64_FMTd__ "lld" +// XTENSA_FAST32: #define __INT_LEAST64_FMTi__ "lli" +// XTENSA_FAST32: #define __INT_LEAST64_MAX__ 9223372036854775807LL +// XTENSA_FAST32: #define __INT_LEAST64_TYPE__ long long int +// XTENSA_FAST32: #define __INT_LEAST64_WIDTH__ 64 +// XTENSA_FAST32: #define __INT_LEAST8_FMTd__ "hhd" +// XTENSA_FAST32: #define __INT_LEAST8_FMTi__ "hhi" +// XTENSA_FAST32: #define __INT_LEAST8_MAX__ 127 +// XTENSA_FAST32: #define __INT_LEAST8_TYPE__ signed char +// XTENSA_FAST32: #define __INT_LEAST8_WIDTH__ 8 +// XTENSA_FAST32: #define __INT_MAX__ 2147483647 +// XTENSA_FAST32: #define __INT_WIDTH__ 32 +// XTENSA_FAST32: #define __LDBL_DECIMAL_DIG__ 17 +// XTENSA_FAST32: #define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L +// XTENSA_FAST32: #define __LDBL_DIG__ 15 +// XTENSA_FAST32: #define __LDBL_EPSILON__ 2.2204460492503131e-16L +// XTENSA_FAST32: #define __LDBL_HAS_DENORM__ 1 +// XTENSA_FAST32: #define __LDBL_HAS_INFINITY__ 1 +// XTENSA_FAST32: #define __LDBL_HAS_QUIET_NAN__ 1 +// XTENSA_FAST32: #define __LDBL_MANT_DIG__ 53 +// XTENSA_FAST32: #define __LDBL_MAX_10_EXP__ 308 +// XTENSA_FAST32: #define __LDBL_MAX_EXP__ 1024 +// XTENSA_FAST32: #define __LDBL_MAX__ 1.7976931348623157e+308L +// XTENSA_FAST32: #define __LDBL_MIN_10_EXP__ (-307) +// XTENSA_FAST32: #define __LDBL_MIN_EXP__ (-1021) +// XTENSA_FAST32: #define __LDBL_MIN__ 2.2250738585072014e-308L +// XTENSA_FAST32: #define __LITTLE_ENDIAN__ 1 +// XTENSA_FAST32: #define __LLONG_WIDTH__ 64 +// XTENSA_FAST32: #define __LONG_LONG_MAX__ 9223372036854775807LL +// XTENSA_FAST32: #define __LONG_MAX__ 2147483647L +// XTENSA_FAST32: #define __LONG_WIDTH__ 32 +// XTENSA_FAST32: #define __NO_INLINE__ 1 +// XTENSA_FAST32: #define __NO_MATH_ERRNO__ 1 +// XTENSA_FAST32: #define __OBJC_BOOL_IS_BOOL 0 +// XTENSA_FAST32: #define __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES 3 +// XTENSA_FAST32: #define __OPENCL_MEMORY_SCOPE_DEVICE 2 +// XTENSA_FAST32: #define __OPENCL_MEMORY_SCOPE_SUB_GROUP 4 +// XTENSA_FAST32: #define __OPENCL_MEMORY_SCOPE_WORK_GROUP 1 +// XTENSA_FAST32: #define __OPENCL_MEMORY_SCOPE_WORK_ITEM 0 +// XTENSA_FAST32: #define __ORDER_BIG_ENDIAN__ 4321 +// XTENSA_FAST32: #define __ORDER_LITTLE_ENDIAN__ 1234 +// XTENSA_FAST32: #define __ORDER_PDP_ENDIAN__ 3412 +// XTENSA_FAST32: #define __POINTER_WIDTH__ 32 +// XTENSA_FAST32: #define __PRAGMA_REDEFINE_EXTNAME 1 +// XTENSA_FAST32: #define __PTRDIFF_FMTd__ "d" +// XTENSA_FAST32: #define __PTRDIFF_FMTi__ "i" +// XTENSA_FAST32: #define __PTRDIFF_MAX__ 2147483647 +// XTENSA_FAST32: #define __PTRDIFF_TYPE__ int +// XTENSA_FAST32: #define __PTRDIFF_WIDTH__ 32 +// XTENSA_FAST32: #define __SCHAR_MAX__ 127 +// XTENSA_FAST32: #define __SHRT_MAX__ 32767 +// XTENSA_FAST32: #define __SHRT_WIDTH__ 16 +// XTENSA_FAST32: #define __SIG_ATOMIC_MAX__ 2147483647 +// XTENSA_FAST32: #define __SIG_ATOMIC_WIDTH__ 32 +// XTENSA_FAST32: #define __SIZEOF_DOUBLE__ 8 +// XTENSA_FAST32: #define __SIZEOF_FLOAT__ 4 +// XTENSA_FAST32: #define __SIZEOF_INT__ 4 +// XTENSA_FAST32: #define __SIZEOF_LONG_DOUBLE__ 8 +// XTENSA_FAST32: #define __SIZEOF_LONG_LONG__ 8 +// XTENSA_FAST32: #define __SIZEOF_LONG__ 4 +// XTENSA_FAST32: #define __SIZEOF_POINTER__ 4 +// XTENSA_FAST32: #define __SIZEOF_PTRDIFF_T__ 4 +// XTENSA_FAST32: #define __SIZEOF_SHORT__ 2 +// XTENSA_FAST32: #define __SIZEOF_SIZE_T__ 4 +// XTENSA_FAST32: #define __SIZEOF_WCHAR_T__ 1 +// XTENSA_FAST32: #define __SIZEOF_WINT_T__ 4 +// XTENSA_FAST32: #define __SIZE_FMTX__ "X" +// XTENSA_FAST32: #define __SIZE_FMTo__ "o" +// XTENSA_FAST32: #define __SIZE_FMTu__ "u" +// XTENSA_FAST32: #define __SIZE_FMTx__ "x" +// XTENSA_FAST32: #define __SIZE_MAX__ 4294967295U +// XTENSA_FAST32: #define __SIZE_TYPE__ unsigned int +// XTENSA_FAST32: #define __SIZE_WIDTH__ 32 +// XTENSA_FAST32: #define __STDC_HOSTED__ 0 +// XTENSA_FAST32: #define __STDC_UTF_16__ 1 +// XTENSA_FAST32: #define __STDC_UTF_32__ 1 +// XTENSA_FAST32: #define __STDC_VERSION__ 201710L +// XTENSA_FAST32: #define __STDC__ 1 +// XTENSA_FAST32: #define __UINT16_C_SUFFIX__ +// XTENSA_FAST32: #define __UINT16_FMTX__ "hX" +// XTENSA_FAST32: #define __UINT16_FMTo__ "ho" +// XTENSA_FAST32: #define __UINT16_FMTu__ "hu" +// XTENSA_FAST32: #define __UINT16_FMTx__ "hx" +// XTENSA_FAST32: #define __UINT16_MAX__ 65535 +// XTENSA_FAST32: #define __UINT16_TYPE__ unsigned short +// XTENSA_FAST32: #define __UINT32_C_SUFFIX__ U +// XTENSA_FAST32: #define __UINT32_FMTX__ "X" +// XTENSA_FAST32: #define __UINT32_FMTo__ "o" +// XTENSA_FAST32: #define __UINT32_FMTu__ "u" +// XTENSA_FAST32: #define __UINT32_FMTx__ "x" +// XTENSA_FAST32: #define __UINT32_MAX__ 4294967295U +// XTENSA_FAST32: #define __UINT32_TYPE__ unsigned int +// XTENSA_FAST32: #define __UINT64_C_SUFFIX__ ULL +// XTENSA_FAST32: #define __UINT64_FMTX__ "llX" +// XTENSA_FAST32: #define __UINT64_FMTo__ "llo" +// XTENSA_FAST32: #define __UINT64_FMTu__ "llu" +// XTENSA_FAST32: #define __UINT64_FMTx__ "llx" +// XTENSA_FAST32: #define __UINT64_MAX__ 18446744073709551615ULL +// XTENSA_FAST32: #define __UINT64_TYPE__ long long unsigned int +// XTENSA_FAST32: #define __UINT8_C_SUFFIX__ +// XTENSA_FAST32: #define __UINT8_FMTX__ "hhX" +// XTENSA_FAST32: #define __UINT8_FMTo__ "hho" +// XTENSA_FAST32: #define __UINT8_FMTu__ "hhu" +// XTENSA_FAST32: #define __UINT8_FMTx__ "hhx" +// XTENSA_FAST32: #define __UINT8_MAX__ 255 +// XTENSA_FAST32: #define __UINT8_TYPE__ unsigned char +// XTENSA_FAST32: #define __UINTMAX_C_SUFFIX__ ULL +// XTENSA_FAST32: #define __UINTMAX_FMTX__ "llX" +// XTENSA_FAST32: #define __UINTMAX_FMTo__ "llo" +// XTENSA_FAST32: #define __UINTMAX_FMTu__ "llu" +// XTENSA_FAST32: #define __UINTMAX_FMTx__ "llx" +// XTENSA_FAST32: #define __UINTMAX_MAX__ 18446744073709551615ULL +// XTENSA_FAST32: #define __UINTMAX_TYPE__ long long unsigned int +// XTENSA_FAST32: #define __UINTMAX_WIDTH__ 64 +// XTENSA_FAST32: #define __UINTPTR_FMTX__ "X" +// XTENSA_FAST32: #define __UINTPTR_FMTo__ "o" +// XTENSA_FAST32: #define __UINTPTR_FMTu__ "u" +// XTENSA_FAST32: #define __UINTPTR_FMTx__ "x" +// XTENSA_FAST32: #define __UINTPTR_MAX__ 4294967295U +// XTENSA_FAST32: #define __UINTPTR_TYPE__ unsigned int +// XTENSA_FAST32: #define __UINTPTR_WIDTH__ 32 +// XTENSA_FAST32: #define __UINT_FAST16_FMTX__ "X" +// XTENSA_FAST32: #define __UINT_FAST16_FMTo__ "o" +// XTENSA_FAST32: #define __UINT_FAST16_FMTu__ "u" +// XTENSA_FAST32: #define __UINT_FAST16_FMTx__ "x" +// XTENSA_FAST32: #define __UINT_FAST16_MAX__ 4294967295U +// XTENSA_FAST32: #define __UINT_FAST16_TYPE__ unsigned int +// XTENSA_FAST32: #define __UINT_FAST32_FMTX__ "X" +// XTENSA_FAST32: #define __UINT_FAST32_FMTo__ "o" +// XTENSA_FAST32: #define __UINT_FAST32_FMTu__ "u" +// XTENSA_FAST32: #define __UINT_FAST32_FMTx__ "x" +// XTENSA_FAST32: #define __UINT_FAST32_MAX__ 4294967295U +// XTENSA_FAST32: #define __UINT_FAST32_TYPE__ unsigned int +// XTENSA_FAST32: #define __UINT_FAST64_FMTX__ "llX" +// XTENSA_FAST32: #define __UINT_FAST64_FMTo__ "llo" +// XTENSA_FAST32: #define __UINT_FAST64_FMTu__ "llu" +// XTENSA_FAST32: #define __UINT_FAST64_FMTx__ "llx" +// XTENSA_FAST32: #define __UINT_FAST64_MAX__ 18446744073709551615ULL +// XTENSA_FAST32: #define __UINT_FAST64_TYPE__ long long unsigned int +// XTENSA_FAST32: #define __UINT_FAST8_FMTX__ "X" +// XTENSA_FAST32: #define __UINT_FAST8_FMTo__ "o" +// XTENSA_FAST32: #define __UINT_FAST8_FMTu__ "u" +// XTENSA_FAST32: #define __UINT_FAST8_FMTx__ "x" +// XTENSA_FAST32: #define __UINT_FAST8_MAX__ 4294967295U +// XTENSA_FAST32: #define __UINT_FAST8_TYPE__ unsigned int +// XTENSA_FAST32: #define __UINT_LEAST16_FMTX__ "hX" +// XTENSA_FAST32: #define __UINT_LEAST16_FMTo__ "ho" +// XTENSA_FAST32: #define __UINT_LEAST16_FMTu__ "hu" +// XTENSA_FAST32: #define __UINT_LEAST16_FMTx__ "hx" +// XTENSA_FAST32: #define __UINT_LEAST16_MAX__ 65535 +// XTENSA_FAST32: #define __UINT_LEAST16_TYPE__ unsigned short +// XTENSA_FAST32: #define __UINT_LEAST32_FMTX__ "X" +// XTENSA_FAST32: #define __UINT_LEAST32_FMTo__ "o" +// XTENSA_FAST32: #define __UINT_LEAST32_FMTu__ "u" +// XTENSA_FAST32: #define __UINT_LEAST32_FMTx__ "x" +// XTENSA_FAST32: #define __UINT_LEAST32_MAX__ 4294967295U +// XTENSA_FAST32: #define __UINT_LEAST32_TYPE__ unsigned int +// XTENSA_FAST32: #define __UINT_LEAST64_FMTX__ "llX" +// XTENSA_FAST32: #define __UINT_LEAST64_FMTo__ "llo" +// XTENSA_FAST32: #define __UINT_LEAST64_FMTu__ "llu" +// XTENSA_FAST32: #define __UINT_LEAST64_FMTx__ "llx" +// XTENSA_FAST32: #define __UINT_LEAST64_MAX__ 18446744073709551615ULL +// XTENSA_FAST32: #define __UINT_LEAST64_TYPE__ long long unsigned int +// XTENSA_FAST32: #define __UINT_LEAST8_FMTX__ "hhX" +// XTENSA_FAST32: #define __UINT_LEAST8_FMTo__ "hho" +// XTENSA_FAST32: #define __UINT_LEAST8_FMTu__ "hhu" +// XTENSA_FAST32: #define __UINT_LEAST8_FMTx__ "hhx" +// XTENSA_FAST32: #define __UINT_LEAST8_MAX__ 255 +// XTENSA_FAST32: #define __UINT_LEAST8_TYPE__ unsigned char +// XTENSA_FAST32: #define __WCHAR_MAX__ 255 +// XTENSA_FAST32: #define __WCHAR_TYPE__ unsigned char +// XTENSA_FAST32: #define __WCHAR_UNSIGNED__ 1 +// XTENSA_FAST32: #define __WCHAR_WIDTH__ 8 +// XTENSA_FAST32: #define __WINT_MAX__ 4294967295U +// XTENSA_FAST32: #define __WINT_TYPE__ unsigned int +// XTENSA_FAST32: #define __WINT_UNSIGNED__ 1 +// XTENSA_FAST32: #define __WINT_WIDTH__ 32 +// XTENSA_FAST32: #define __XTENSA_EL__ 1 +// XTENSA_FAST32: #define __XTENSA_WINDOWED_ABI__ 1 +// XTENSA_FAST32: #define __XTENSA__ 1 +// XTENSA_FAST32: #define __xtensa__ 1 From f5d9e681de7ca2b20b6b02624becc6c7745ffc69 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 31 Aug 2023 23:33:08 +0300 Subject: [PATCH 163/289] esp/ci: Makes use of MacOS codesign scripts from external repo --- .gitlab-ci.yml | 2 +- .universal-toolchain-release.yml | 80 +++++++++++--------------------- 2 files changed, 27 insertions(+), 55 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8601675f3b7fd..644da08a100ff 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,7 +1,7 @@ stages: - build - pack - - sign + - macos_codesign - private_deploy - test - public_deploy diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index 4628e486d6e4c..baf1891c4b73d 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -31,14 +31,14 @@ ${ARCHIVE_TOOL} ${ARCHIVE_NAME} esp-clang/ mkdir -p ${DISTRO_DIR} mv ${ARCHIVE_NAME} ${DISTRO_DIR}/ - echo "${ARCHIVE_NAME}" > ${DISTRO_DIR}/file_${PLATFORM_NAME} + echo "${ARCHIVE_NAME}" > ${DISTRO_DIR}/dist_name_${PLATFORM_NAME} # Pack libs to be used for Rust, Go etc. .package_libs: &package_libs | eval ${ARCHIVE_TOOL} ${LIBS_ARCHIVE_NAME} esp-clang/lib/clang/${CLANG_VER}/include esp-clang/lib/lib{clang,LLVM}* ${LIBS_PACK_EXTRA_PATHS:-} mkdir -p ${DISTRO_DIR} mv ${LIBS_ARCHIVE_NAME} ${DISTRO_DIR}/ - echo "${LIBS_ARCHIVE_NAME}" > ${DISTRO_DIR}/file_libs-${PLATFORM_NAME} + echo "${LIBS_ARCHIVE_NAME}" > ${DISTRO_DIR}/dist_name_libs-${PLATFORM_NAME} .get_binutils: &get_binutils | git clone -b ${BINUTILS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${BINUTILS_REPO}.git @@ -150,7 +150,7 @@ build_x86_64-w64-mingw32: - !reference [.use_ci_tools, script] - !reference [.add_gitlab_key, script] # get ARCHIVE_NAME for Linux release. Modify vars to make get_release_name working properly - - CLANG_LINUX_ARCHIVE=$(cat ${DIST_DIR}/file_${PLATFORM_NAME_LINUX}) + - CLANG_LINUX_ARCHIVE=$(cat ${DIST_DIR}/dist_name_${PLATFORM_NAME_LINUX}) # unpack x86_64-linux-gnu toolchain to re-use it as native Clang for Windows build - mkdir -p esp-clang-${PLATFORM_NAME_LINUX} - ${UNARCHIVE_TOOL_LINUX} ${DIST_DIR}/${CLANG_LINUX_ARCHIVE} -C esp-clang-${PLATFORM_NAME_LINUX} @@ -208,7 +208,7 @@ build_newlib: ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" script: # get ARCHIVE_NAME for Linux release. - - CLANG_ARCHIVE=$PWD/${DIST_DIR}/$(cat ${DIST_DIR}/file_${PLATFORM_NAME_LINUX}) + - CLANG_ARCHIVE=$PWD/${DIST_DIR}/$(cat ${DIST_DIR}/dist_name_${PLATFORM_NAME_LINUX}) - mkdir -p ${DOWNLOADS_DIR} - pushd ${DOWNLOADS_DIR} - *get_xtensa_overlays @@ -252,7 +252,7 @@ build_compiler-rt: script: - LLVM_PROJECT_PATH=$PWD # get ARCHIVE_NAME for Linux release. - - CLANG_ARCHIVE=$PWD/${DIST_DIR}/$(cat ${DIST_DIR}/file_${PLATFORM_NAME_LINUX}) + - CLANG_ARCHIVE=$PWD/${DIST_DIR}/$(cat ${DIST_DIR}/dist_name_${PLATFORM_NAME_LINUX}) - NEWLIB_ARCHIVE=$PWD/${DIST_DIR}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} - mkdir -p ${DOWNLOADS_DIR} - pushd ${DOWNLOADS_DIR} @@ -414,56 +414,29 @@ test_x86_64-linux-gnu: # run testsuite for esp32 - ./run_esp32_tests.sh 2>&1 > ${BUILD_PATH}/tests.log -.macos_codesign: &macos_codesign - stage: sign - tags: [ "darwin", "amd64" ] +macos_codesign: + stage: macos_codesign + when: on_success resource_group: macos_codesign + tags: [ "darwin", "codesign" ] + # list all jobs that produces macos distros + needs: [ pack_x86_64-apple-darwin, pack_aarch64-apple-darwin ] artifacts: paths: - - ${DIST_DIR}/ - when: always - expire_in: 3 day + - ${DIST_DIR} variables: - KEYCHAIN_NAME: "llvm.keychain" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" - ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" + # directory with distro archives + DIST_ART_DIR: ${DIST_DIR} + # command to unarchive distro + ARCHIVE_TOOL: ${ARCHIVE_TOOL_MACOS} + # command to unarchive distro + UNARCHIVE_TOOL: ${UNARCHIVE_TOOL_MACOS} + # URL to macos codesign repo + NOTARIZATION_SCRIPTS_GIT: "${CI_SERVER_PROTOCOL}://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/macos_codesign_notarization.git" script: - - *get_release_name - - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} - - rm -rf ${DIST_DIR} - - TOOLCHAIN_PATH=$PWD/esp-clang - - echo $MACOS_CERTIFICATE | base64 --decode > $PWD/certificate.p12 - - security create-keychain -p $KEYCHAIN_PWD $KEYCHAIN_NAME || true - - security import $PWD/certificate.p12 -k $KEYCHAIN_NAME -P $MACOS_CERTIFICATE_PWD -T /usr/bin/codesign - - security set-key-partition-list -S apple-tool:,apple:,codesign -s -k $KEYCHAIN_PWD $KEYCHAIN_NAME - - security list-keychains -d user -s ~/Library/Keychains/$KEYCHAIN_NAME - - security find-identity -v -p codesigning - - security unlock-keychain -p $KEYCHAIN_PWD $KEYCHAIN_NAME - - /usr/bin/codesign -v --force --options runtime -s $IDENTITY_ID $TOOLCHAIN_PATH/bin/* $TOOLCHAIN_PATH/lib/*.dylib - - security delete-keychain $KEYCHAIN_NAME - - codesign -dvv $TOOLCHAIN_PATH/bin/* - - DISTRO_DIR=$PWD/${DIST_DIR} - - *package_toolchain - - *package_libs - after_script: - - security find-identity -v - - security delete-keychain $KEYCHAIN_NAME - - security find-identity -v - -sign_x86_64-apple-darwin: - extends: .macos_codesign - needs: - - pack_x86_64-apple-darwin - variables: - PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" - -sign_aarch64-apple-darwin: - extends: .macos_codesign - needs: - - pack_aarch64-apple-darwin - variables: - PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" + - git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} -b ${CI_COMMIT_REF_NAME} || + git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} + - ./macos_codesign_notarization/run.sh upload_to_http: stage: private_deploy @@ -480,7 +453,7 @@ upload_to_http: script: - cit_add_ssh_key "${HTTP_UPLOAD_KEY}" # List of archives - - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) + - FILES=$(find ${DIST_DIR} -name dist_name_\* -exec cat {} \+) - cd ${DIST_DIR} - ls -l $FILES - scp ${FILES} ${HTTP_UPLOAD_DIR}/ct-ng/llvm-builds @@ -505,15 +478,14 @@ upload_to_github: - job: pack_arm-linux-gnueabihf - job: pack_aarch64-linux-gnu - job: pack_x86_64-w64-mingw32 - - job: sign_x86_64-apple-darwin - - job: sign_aarch64-apple-darwin + - job: macos_codesign before_script: [] script: - ls -l dist*/ - git remote add github ${GH_REPO_HTTPS} - hub release show ${TAG} || { echo "Please create a release on GitHub with ${TAG} tag at first"; exit 1; } # List of archives - - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) + - FILES=$(find ${DIST_DIR} -name dist_name_\* -exec cat {} \+) - cd ${DIST_DIR} - ls -l $FILES # Upload archives From 8a794c44cb7514a5d661ee1d2890979ba6b1b9fb Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 1 Sep 2023 14:33:04 +0300 Subject: [PATCH 164/289] esp/ci: Use gold linker for Linux builds --- .universal-toolchain-release.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index baf1891c4b73d..4d14ff174bb55 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -88,6 +88,7 @@ - BUILD_PATH=$PWD/${BUILD_DIR} - mkdir -p ${BUILD_PATH} - export USE_PARALLEL_LINK_JOBS=2 + - export USE_PARALLEL_COMPILE_JOBS=2 # build Clang toolchain w/o newlib - ${BUILD_TOOLCHAIN_CMD} --llvm-path=${LLVM_PROJECT_PATH} --gcc-toolchains-path=${ESP_GCC_TOOLCHAIN_DIST_BASE} --binutils-path=${BINUTILS_PATH} @@ -120,6 +121,7 @@ GCC_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" GCC_ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" + USE_LINKER: "gold" build_x86_64-linux-gnu: extends: .build_linux-gnu_template From d533d489b05993b8af7fdc4098ce3876fe7aaf06 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 1 Sep 2023 16:29:07 +0300 Subject: [PATCH 165/289] esp/ci: Adds cmake err/log artifacts --- .universal-toolchain-release.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index 4d14ff174bb55..2d2260df9b10c 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -61,6 +61,8 @@ - ${BUILD_DIR}/lld-tests.log - ${BUILD_DIR}/tests.log - ${BUILD_DIR}/build.log + - "${BUILD_DIR}/**/CMakeError.log" + - "${BUILD_DIR}/**/CMakeOutput.log" when: always expire_in: 1 day variables: From 9b4599358acff4c10a95b173bd9bca6af8ff843c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 12 Oct 2023 01:53:02 +0300 Subject: [PATCH 166/289] [lld][Xtensa] Fix sections placements. --- lld/ELF/Writer.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp index 8e3a746a08eb2..4298078f8b4d1 100644 --- a/lld/ELF/Writer.cpp +++ b/lld/ELF/Writer.cpp @@ -1201,6 +1201,10 @@ static void sortSection(OutputSection &osec, if (auto *isd = dyn_cast(b)) sortISDBySectionOrder(isd, order, osec.flags & SHF_EXECINSTR); + if (config->emachine == EM_XTENSA) { + osec.sort([](InputSectionBase *s) { return s->name.contains(".literal") ? 0 : 1; }); + } + if (script->hasSectionsCommand) return; From 1d6af9f128a141a406b574c6d864492b21b3bfb6 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 18 Oct 2023 17:29:11 +0300 Subject: [PATCH 167/289] [lld][Xtensa] Improve literal sections placement. Improve sections sort to place literal section closer to code section which uses literals. Fix l32r relocation check. --- lld/ELF/Arch/Xtensa.cpp | 10 +++++--- lld/ELF/Writer.cpp | 55 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 60 insertions(+), 5 deletions(-) diff --git a/lld/ELF/Arch/Xtensa.cpp b/lld/ELF/Arch/Xtensa.cpp index fef4d2c06b899..301901ab89990 100644 --- a/lld/ELF/Arch/Xtensa.cpp +++ b/lld/ELF/Arch/Xtensa.cpp @@ -127,10 +127,12 @@ void Xtensa::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const { // Look at the instruction to determine how to do the relocation. uint8_t opcode = loc[0] & 0x0f; if (opcode == 0b0001) { // RI16 format: l32r - uint64_t val = dest - ((p + 3) & (uint64_t)0xfffffffc); - checkInt(loc, static_cast(val) >> 2, 16, rel); - checkAlignment(loc, val, 4, rel); - write16le(loc + 1, static_cast(val) >> 2); + int64_t val = dest - ((p + 3) & (uint64_t)0xfffffffc); + if ((val < -262144 || val > -4)) + reportRangeError(loc, rel, Twine(static_cast(val)), -262141, + -4); + checkAlignment(loc, static_cast(val), 4, rel); + write16le(loc + 1, val >> 2); } else if (opcode == 0b0101) { // call0, call4, call8, call12 (CALL format) uint64_t val = dest - ((p + 4) & (uint64_t)0xfffffffc); checkInt(loc, static_cast(val) >> 2, 18, rel); diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp index 4298078f8b4d1..23d3c7d65ec03 100644 --- a/lld/ELF/Writer.cpp +++ b/lld/ELF/Writer.cpp @@ -32,6 +32,7 @@ #include "llvm/Support/TimeProfiler.h" #include "llvm/Support/xxhash.h" #include +#include #define DEBUG_TYPE "lld" @@ -1184,6 +1185,58 @@ sortISDBySectionOrder(InputSectionDescription *isd, isd->sections.push_back(isec); } +// Sort Xtensa literal sections in OutputSection. For each literal section we try +// to find by name text(code) section, which uses these literals. The literal +// section should always be placed before code section. +// Also we try to place literal section just before code section. +static void sortSectionXtensa(OutputSection &osec) { + for (SectionCommand *b : osec.commands) { + if (auto *isd = dyn_cast(b)) { + std::map orderedNames; + SmallVector, 0> orderedSections; + int sidx = 0; + + for (InputSection *isec : isd->sections) { + if (orderedNames.count(isec->name.str())) { + orderedSections.push_back({isec, orderedNames[isec->name.str()]}); + continue; + } + //Check if current section contains literals + if (isec->name.contains(".literal")) { + std::string literalName = isec->name.str(); + std::size_t pos = literalName.find(".literal"); + std::string textName; + // Reconstructing text(code) section name by literal section name + if (pos == 0) { + textName = ".text"; + } else { + textName = literalName.substr(0, pos); + } + textName += literalName.substr(pos + 8, std::string::npos); + if (orderedNames.count(textName)) { + int textIdx = orderedNames[textName]; + int literalIdx = textIdx - 1; + orderedSections.push_back({isec, literalIdx}); + orderedNames[isec->name.str()] = literalIdx; + } else { + orderedSections.push_back({isec, sidx}); + orderedNames[isec->name.str()] = sidx; + } + } else { + orderedSections.push_back({isec, sidx}); + orderedNames[isec->name.str()] = sidx; + } + sidx += 2; + } + + llvm::sort(orderedSections, llvm::less_second()); + isd->sections.clear(); + for (std::pair p : orderedSections) + isd->sections.push_back(p.first); + } + } +} + static void sortSection(OutputSection &osec, const DenseMap &order) { StringRef name = osec.name; @@ -1202,7 +1255,7 @@ static void sortSection(OutputSection &osec, sortISDBySectionOrder(isd, order, osec.flags & SHF_EXECINSTR); if (config->emachine == EM_XTENSA) { - osec.sort([](InputSectionBase *s) { return s->name.contains(".literal") ? 0 : 1; }); + sortSectionXtensa(osec); } if (script->hasSectionsCommand) From e6f2577ac9ec59eb3d2914bf4cddc15a8ed068e5 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 31 Oct 2023 00:51:37 +0300 Subject: [PATCH 168/289] [lld][Xtensa] Fix loop instruction relocation --- lld/ELF/Arch/Xtensa.cpp | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/lld/ELF/Arch/Xtensa.cpp b/lld/ELF/Arch/Xtensa.cpp index 301901ab89990..9734eedc67274 100644 --- a/lld/ELF/Arch/Xtensa.cpp +++ b/lld/ELF/Arch/Xtensa.cpp @@ -105,9 +105,31 @@ static inline bool isRRI8Branch(uint8_t *loc) { // instructions: bgeui, bltui if ((loc[0] & 0b1011'1111) == 0b1011'0110) return true; - // instruction: bt - if ((loc[0] & 0b0111'1111) == 0b0111'0110) - return true; + if ((loc[0] & 0b0111'1111) == 0b0111'0110) { + // instruction: bf + if ((loc[1] & 0b1111'0000) == 0b0000'0000) + return true; + // instruction: bt + if ((loc[1] & 0b1111'0000) == 0b0001'0000) + return true; + } + // some other instruction + return false; +} + +static inline bool isLoop(uint8_t *loc) { + // instructions: loop, loopgtz, loopnez + if ((loc[0] & 0b1111'1111) == 0b0111'0110) { + // instruction: loop + if ((loc[1] & 0b1111'0000) == 0b1000'0000) + return true; + // instruction: loopgtz + if ((loc[1] & 0b1111'0000) == 0b1010'0000) + return true; + // instruction: loopnez + if ((loc[1] & 0b1111'0000) == 0b1001'0000) + return true; + } // some other instruction return false; } @@ -151,6 +173,10 @@ void Xtensa::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const { uint64_t v = val - 4; checkInt(loc, static_cast(v), 8, rel); loc[2] = v & 0xff; + } else if (isLoop(loc)) { // loop instructions + uint64_t v = val - 4; + checkUInt(loc, v, 8, rel); + loc[2] = v & 0xff; } else if ((loc[0] & 0b1000'1111) == 0b1000'1100) { // RI16 format: beqz.n, bnez.n uint64_t v = val - 4; checkUInt(loc, v, 6, rel); From 3a356afac79c7a3481bb82e3cfd7be433b74d403 Mon Sep 17 00:00:00 2001 From: Ayke van Laethem Date: Tue, 24 Oct 2023 15:15:10 +0200 Subject: [PATCH 169/289] [Xtensa] Fix Clang builtins include directory This fixes https://github.com/espressif/llvm-project/issues/83 In short, it adjusts the include path logic to include the builtins directory as long as `-nostdinc` or `-nobuiltininc` isn't specified. Previously, the builtins directory would not be included if either the GCC installation wasn't found, or `-nostdlibinc` was specified (both of which aren't related to the builtins directory). --- clang/lib/Driver/ToolChains/Xtensa.cpp | 41 +++++++++++++++----------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 0b40155eb2e77..76fab61d3ab6e 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -132,29 +132,34 @@ Tool *XtensaToolChain::buildAssembler() const { void XtensaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, ArgStringList &CC1Args) const { - if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc) || - DriverArgs.hasArg(options::OPT_nostdlibinc)) + if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc)) return; - if (!getDriver().SysRoot.empty()) { - SmallString<128> Dir(getDriver().SysRoot); - llvm::sys::path::append(Dir, "include"); - addSystemInclude(DriverArgs, CC1Args, Dir.str()); - } else if (GCCInstallation.isValid()) { - SmallString<128> Path1(getDriver().ResourceDir); - llvm::sys::path::append(Path1, "include"); - SmallString<128> Path2(GCCToolchainDir); - llvm::sys::path::append(Path2, GCCToolchainName, "sys-include"); - SmallString<128> Path3(GCCToolchainDir); - llvm::sys::path::append(Path3, GCCToolchainName, "include"); - - const StringRef Paths[] = {Path1, Path2, Path3}; - addSystemIncludes(DriverArgs, CC1Args, Paths); - } else { - SmallString<128> Dir(computeSysRoot()); + if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) { + SmallString<128> Dir(getDriver().ResourceDir); llvm::sys::path::append(Dir, "include"); addSystemInclude(DriverArgs, CC1Args, Dir.str()); } + + if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) { + if (!getDriver().SysRoot.empty()) { + SmallString<128> Dir(getDriver().SysRoot); + llvm::sys::path::append(Dir, "include"); + addSystemInclude(DriverArgs, CC1Args, Dir.str()); + } else if (GCCInstallation.isValid()) { + SmallString<128> Path1(GCCToolchainDir); + llvm::sys::path::append(Path1, GCCToolchainName, "sys-include"); + SmallString<128> Path2(GCCToolchainDir); + llvm::sys::path::append(Path2, GCCToolchainName, "include"); + + const StringRef Paths[] = {Path1, Path2}; + addSystemIncludes(DriverArgs, CC1Args, Paths); + } else { + SmallString<128> Dir(computeSysRoot()); + llvm::sys::path::append(Dir, "include"); + addSystemInclude(DriverArgs, CC1Args, Dir.str()); + } + } } void XtensaToolChain::addLibStdCxxIncludePaths( From d8e2148554520bef188b5ce6cdf23045f9050f13 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 21 Aug 2024 08:47:55 +0300 Subject: [PATCH 170/289] [Xtensa] Fix wchar type. Add absent IR passes. --- clang/lib/Basic/Targets/Xtensa.h | 2 +- clang/test/Preprocessor/init.c | 18 ++++++++---------- llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp | 5 ++++- llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll | 6 +++--- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index 0e7d054ff47a8..1d777f9014d12 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -45,7 +45,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { SizeType = UnsignedInt; PtrDiffType = SignedInt; IntPtrType = SignedInt; - WCharType = UnsignedChar; + WCharType = SignedInt; WIntType = UnsignedInt; UseZeroLengthBitfieldAlignment = true; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32; diff --git a/clang/test/Preprocessor/init.c b/clang/test/Preprocessor/init.c index 8605cef40e4c4..63a3afd3c90e4 100644 --- a/clang/test/Preprocessor/init.c +++ b/clang/test/Preprocessor/init.c @@ -2928,7 +2928,7 @@ // XTENSA: #define __SIZEOF_PTRDIFF_T__ 4 // XTENSA: #define __SIZEOF_SHORT__ 2 // XTENSA: #define __SIZEOF_SIZE_T__ 4 -// XTENSA: #define __SIZEOF_WCHAR_T__ 1 +// XTENSA: #define __SIZEOF_WCHAR_T__ 4 // XTENSA: #define __SIZEOF_WINT_T__ 4 // XTENSA: #define __SIZE_FMTX__ "X" // XTENSA: #define __SIZE_FMTo__ "o" @@ -3033,10 +3033,9 @@ // XTENSA: #define __UINT_LEAST8_FMTx__ "hhx" // XTENSA: #define __UINT_LEAST8_MAX__ 255 // XTENSA: #define __UINT_LEAST8_TYPE__ unsigned char -// XTENSA: #define __WCHAR_MAX__ 255 -// XTENSA: #define __WCHAR_TYPE__ unsigned char -// XTENSA: #define __WCHAR_UNSIGNED__ 1 -// XTENSA: #define __WCHAR_WIDTH__ 8 +// XTENSA: #define __WCHAR_MAX__ 2147483647 +// XTENSA: #define __WCHAR_TYPE__ int +// XTENSA: #define __WCHAR_WIDTH__ 32 // XTENSA: #define __WINT_MAX__ 4294967295U // XTENSA: #define __WINT_TYPE__ unsigned int // XTENSA: #define __WINT_UNSIGNED__ 1 @@ -3231,7 +3230,7 @@ // XTENSA_FAST32: #define __SIZEOF_PTRDIFF_T__ 4 // XTENSA_FAST32: #define __SIZEOF_SHORT__ 2 // XTENSA_FAST32: #define __SIZEOF_SIZE_T__ 4 -// XTENSA_FAST32: #define __SIZEOF_WCHAR_T__ 1 +// XTENSA_FAST32: #define __SIZEOF_WCHAR_T__ 4 // XTENSA_FAST32: #define __SIZEOF_WINT_T__ 4 // XTENSA_FAST32: #define __SIZE_FMTX__ "X" // XTENSA_FAST32: #define __SIZE_FMTo__ "o" @@ -3336,10 +3335,9 @@ // XTENSA_FAST32: #define __UINT_LEAST8_FMTx__ "hhx" // XTENSA_FAST32: #define __UINT_LEAST8_MAX__ 255 // XTENSA_FAST32: #define __UINT_LEAST8_TYPE__ unsigned char -// XTENSA_FAST32: #define __WCHAR_MAX__ 255 -// XTENSA_FAST32: #define __WCHAR_TYPE__ unsigned char -// XTENSA_FAST32: #define __WCHAR_UNSIGNED__ 1 -// XTENSA_FAST32: #define __WCHAR_WIDTH__ 8 +// XTENSA_FAST32: #define __WCHAR_MAX__ 2147483647 +// XTENSA_FAST32: #define __WCHAR_TYPE__ int +// XTENSA_FAST32: #define __WCHAR_WIDTH__ 32 // XTENSA_FAST32: #define __WINT_MAX__ 4294967295U // XTENSA_FAST32: #define __WINT_TYPE__ unsigned int // XTENSA_FAST32: #define __WINT_UNSIGNED__ 1 diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 3cb1812c96685..0c52125b6a4c3 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -143,7 +143,10 @@ bool XtensaPassConfig::addInstSelector() { return false; } -void XtensaPassConfig::addIRPasses() { addPass(createAtomicExpandLegacyPass()); } +void XtensaPassConfig::addIRPasses() { + addPass(createAtomicExpandLegacyPass()); + TargetPassConfig::addIRPasses(); +} void XtensaPassConfig::addPreRegAlloc() { addPass(createXtensaHardwareLoops()); diff --git a/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll b/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll index 9d339f00e539b..ff5c5389d83e0 100644 --- a/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll +++ b/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll @@ -69,7 +69,7 @@ for.body: ; preds = %entry, %for.body ; CHECK-NEXT: ee.stf.128.ip f11, f10, f9, f8, a8, 16 ; CHECK-NEXT: ee.stf.128.ip f8, f8, f8, f8, a8, 16 ; CHECK-NEXT: ee.stf.128.xp f8, f8, f8, f8, a9, a8 -; CHECK-NEXT: movi.n a10, 0 +; CHECK-NEXT: movi.n a10, 32 ; CHECK-NEXT: movi.n a11, 10 ; CHECK-NEXT: .LBB0_1: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 @@ -79,8 +79,8 @@ for.body: ; preds = %entry, %for.body ; CHECK-NEXT: ee.vmulas.s16.accx.ld.xp.qup q4, a8, a9, q0, q4, q2, q3 ; CHECK-NEXT: ee.ld.128.usar.xp q4, a8, a9 ; CHECK-NEXT: ee.vmulas.s16.accx.ld.ip.qup q3, a8, 16, q0, q3, q4, q2 -; CHECK-NEXT: addi.n a10, a10, 1 -; CHECK-NEXT: bnei a10, 32, .LBB0_1 +; CHECK-NEXT: addi.n a10, a10, -1 +; CHECK-NEXT: bnez a10, .LBB0_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: movi.n a8, 0 ; CHECK-NEXT: wur.sar_byte a8 From e8be995747a66681e0fe853d887806bc8392231b Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 26 Mar 2024 01:49:52 +0300 Subject: [PATCH 171/289] [Xtensa][Tests]: Fix call abi test. --- clang/test/CodeGen/xtensa-abi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/test/CodeGen/xtensa-abi.c b/clang/test/CodeGen/xtensa-abi.c index 25b1fbd2d1b97..e858f8e88044d 100644 --- a/clang/test/CodeGen/xtensa-abi.c +++ b/clang/test/CodeGen/xtensa-abi.c @@ -25,4 +25,4 @@ void callee_struct_a16b_2(struct S16 a, int b) {} void callee_struct_a16b_3(int a, struct S16 b) {} -// CHECK: define dso_local void @callee_struct_a16b_3(i32 noundef %a, %struct.S16* noundef byval(%struct.S16) align 16 %b) +// CHECK: define dso_local void @callee_struct_a16b_3(i32 noundef %a, ptr noundef byval(%struct.S16) align 16 %b) From c8e7cc2ce5bab9cf8e98de2223de2e731e6a31d6 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 12 Dec 2023 16:39:28 +0800 Subject: [PATCH 172/289] esp/ci: Modify ci script to support Clang release 17.0.4 --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 644da08a100ff..471d4af4e3282 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -12,7 +12,7 @@ variables: # move all these to CI/CD settings REL_SFX: "llvm" - CLANG_VER: "16" + CLANG_VER: "17" GCC_REL_NAME: "esp-2022r1" GCC_REL_VER: "gcc11_2_0" NEWLIB_REF: "esp-4.1.0_20230425" From cd0b11f68cfe0dd211380d204e669afb065652bb Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 26 Sep 2024 03:07:12 +0300 Subject: [PATCH 173/289] [Xtensa] Fix hwloop tests. --- llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll | 14 +++++++------- llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll | 14 +++++++------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll index 5308fdccfc276..a72e89105587c 100644 --- a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll +++ b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll @@ -7,20 +7,20 @@ define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #0 { ; CHECK-LABEL: test_hwloop: ; CHECK: entry a1, 32 ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: blti a4, 1, .LBB0_5 +; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: movi.n a8, 0 -; CHECK-NEXT: bge a8, a4, .LBB0_5 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: movi.n a9, 0 +; CHECK-NEXT: mov.n a9, a8 ; CHECK-NEXT: j .LBB0_3 ; CHECK-NEXT: .LBB0_2: # %for.body ; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 ; CHECK-NEXT: add.n a2, a10, a2 -; CHECK-NEXT: addi.n a8, a8, 1 -; CHECK-NEXT: bge a8, a4, .LBB0_5 +; CHECK-NEXT: addi.n a9, a9, 1 +; CHECK-NEXT: bge a9, a4, .LBB0_5 ; CHECK-NEXT: .LBB0_3: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: mov.n a10, a9 -; CHECK-NEXT: bge a9, a2, .LBB0_2 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: bge a8, a2, .LBB0_2 ; CHECK-NEXT: # %bb.4: # %for.body ; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 ; CHECK-NEXT: mull a10, a2, a3 diff --git a/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll index f34729e58d37e..d2899ae550509 100644 --- a/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll +++ b/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll @@ -6,22 +6,22 @@ define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #1 { ; CHECK-LABEL: test_hwloop: ; CHECK: entry a1, 32 ; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: blti a4, 1, .LBB0_5 +; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: movi.n a8, 0 -; CHECK-NEXT: bge a8, a4, .LBB0_5 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: movi.n a9, 0 +; CHECK-NEXT: mov.n a9, a8 ; CHECK-NEXT: j .LBB0_3 ; CHECK-NEXT: .LBB0_2: # %for.body ; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 ; CHECK-NEXT: add.n a2, a10, a2 ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP -; CHECK-NEXT: addi.n a8, a8, 1 -; CHECK-NEXT: bge a8, a4, .LBB0_5 +; CHECK-NEXT: addi.n a9, a9, 1 +; CHECK-NEXT: bge a9, a4, .LBB0_5 ; CHECK-NEXT: .LBB0_3: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: mov.n a10, a9 -; CHECK-NEXT: bge a9, a2, .LBB0_2 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: bge a8, a2, .LBB0_2 ; CHECK-NEXT: # %bb.4: # %for.body ; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=1 ; CHECK-NEXT: mull a10, a2, a3 From 0b35ec4fc04ef3b8d89637f3aa8a352447008ab7 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 12 Dec 2023 11:53:04 +0800 Subject: [PATCH 174/289] esp/ci: Bump binutils version to esp-2.39.0_20230208 --- .gitlab-ci.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 471d4af4e3282..909fa15978201 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -16,10 +16,7 @@ variables: GCC_REL_NAME: "esp-2022r1" GCC_REL_VER: "gcc11_2_0" NEWLIB_REF: "esp-4.1.0_20230425" - # TODO: LLVM-248. Upgrade binutils above 2.36 when Clang will be upgraded to >=17.x - # which supports 'zicsr' or 'zifencei' RISCV extensions via '-march=' - # https://www.spinics.net/lists/stable/msg645015.html - BINUTILS_REF: "esp-2022r1-binutils" + BINUTILS_REF: "esp-2.39.0_20230208" XTENSA_OVERLAYS_REF: "master" LLVM_GCC_TESTSUITE_REF: "esp-16.0.0-20230425" XTENSA_CLANG_TOOLCHAIN_REF: "esp-16.0.0-20230516" From 74d4014c8ceb0fe9481ec72b03e76be859715a12 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 12 Dec 2023 12:21:56 +0800 Subject: [PATCH 175/289] esp/ci: Use GNU components from GCC release 12.2.0_20230208 --- .gitlab-ci.yml | 10 ++++++++-- .universal-toolchain-release.yml | 14 ++++++++++---- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 909fa15978201..b92d9c4558eca 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -13,8 +13,7 @@ variables: # move all these to CI/CD settings REL_SFX: "llvm" CLANG_VER: "17" - GCC_REL_NAME: "esp-2022r1" - GCC_REL_VER: "gcc11_2_0" + GCC_REL_VER: "12.2.0_20230208" NEWLIB_REF: "esp-4.1.0_20230425" BINUTILS_REF: "esp-2.39.0_20230208" XTENSA_OVERLAYS_REF: "master" @@ -29,6 +28,13 @@ variables: PLATFORM_NAME_MACOS: "macos" PLATFORM_NAME_MACOS_ARM64: "macos-arm64" + GCC_PLATFORM_NAME_LINUX: "i686-linux-gnu" + GCC_PLATFORM_NAME_LINUX_ARMHF: "arm-linux-gnueabihf" + GCC_PLATFORM_NAME_LINUX_ARM64: "aarch64-linux-gnu" + GCC_PLATFORM_NAME_WIN: "i686-w64-mingw32" + GCC_PLATFORM_NAME_MACOS: "x86_64-apple-darwin" + GCC_PLATFORM_NAME_MACOS_ARM64: "aarch64-apple-darwin" + ARCHIVE_TOOL_LINUX: "tar -cJf" UNARCHIVE_TOOL_LINUX: "tar -xf" ARCHIVE_EXT_LINUX: "tar.xz" diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index 2d2260df9b10c..6279866f2aa11 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -18,12 +18,12 @@ "esp32s3") for ((i = 0; i < ${#XTENSA_CPUS[@]}; i++)); do XTENSA_CPU=${XTENSA_CPUS[$i]} - GCC_TOOLCHAIN_ARCH=xtensa-${XTENSA_CPU}-elf-${GCC_REL_VER}-${GCC_REL_NAME}-${PLATFORM_NAME}.${GCC_ARCHIVE_EXT} - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/${GCC_REL_NAME}/${GCC_TOOLCHAIN_ARCH} + GCC_TOOLCHAIN_ARCH=xtensa-${XTENSA_CPU}-elf-${GCC_REL_VER}-${GCC_PLATFORM_NAME}.${GCC_ARCHIVE_EXT} + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-${GCC_REL_VER}/${GCC_TOOLCHAIN_ARCH} ${GCC_UNARCHIVE_TOOL} ${GCC_TOOLCHAIN_ARCH} done; - GCC_TOOLCHAIN_ARCH=riscv32-esp-elf-${GCC_REL_VER}-${GCC_REL_NAME}-${PLATFORM_NAME}.${GCC_ARCHIVE_EXT} - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/${GCC_REL_NAME}/${GCC_TOOLCHAIN_ARCH} + GCC_TOOLCHAIN_ARCH=riscv32-esp-elf-${GCC_REL_VER}-${GCC_PLATFORM_NAME}.${GCC_ARCHIVE_EXT} + wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-${GCC_REL_VER}/${GCC_TOOLCHAIN_ARCH} ${GCC_UNARCHIVE_TOOL} ${GCC_TOOLCHAIN_ARCH} # Pack the toolchain @@ -130,6 +130,7 @@ build_x86_64-linux-gnu: variables: CONF_HOST: "x86_64-linux-gnu" PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" + GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_LINUX}" build_arm-linux-gnueabihf: extends: .build_linux-gnu_template @@ -137,6 +138,7 @@ build_arm-linux-gnueabihf: variables: CONF_HOST: "arm-linux-gnueabihf" PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARMHF}" + GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_LINUX_ARMHF}" build_aarch64-linux-gnu: extends: .build_linux-gnu_template @@ -144,6 +146,7 @@ build_aarch64-linux-gnu: variables: CONF_HOST: "aarch64-linux-gnu" PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" + GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_LINUX_ARM64}" build_x86_64-w64-mingw32: extends: .build_template @@ -166,6 +169,7 @@ build_x86_64-w64-mingw32: variables: CONF_HOST: "x86_64-w64-mingw32" PLATFORM_NAME: "${PLATFORM_NAME_WIN}" + GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_WIN}" ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" GCC_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" @@ -186,12 +190,14 @@ build_x86_64-apple-darwin: variables: CONF_HOST: "x86_64-apple-darwin21.1" PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" + GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_MACOS}" build_aarch64-apple-darwin: extends: .build_apple-darwin_template variables: CONF_HOST: "aarch64-apple-darwin21.1" PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" + GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_MACOS_ARM64}" build_newlib: stage: build From a6c6043e35fe0a471eef0029d6ded8b982db941e Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 12 Dec 2023 12:38:59 +0800 Subject: [PATCH 176/289] [Toolchain][RISCV]: Add support for ISA 2.1 compliant multilib naming --- clang/lib/Driver/ToolChains/Gnu.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index 6180a78aae58a..fb2700d7d1f1f 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -1855,10 +1855,13 @@ static void findRISCVBareMetalMultilibs(const Driver &D, // currently only support the set of multilibs like riscv-gnu-toolchain does. // TODO: support MULTILIB_REUSE constexpr RiscvMultilib RISCVMultilibSet[] = { - {"rv32i", "ilp32"}, {"rv32im", "ilp32"}, {"rv32iac", "ilp32"}, - {"rv32imc", "ilp32"}, - {"rv32imac", "ilp32"}, {"rv32imafc", "ilp32f"}, {"rv64imac", "lp64"}, - {"rv64imafdc", "lp64d"}}; + {"rv32i", "ilp32"}, {"rv32im", "ilp32"}, {"rv32iac", "ilp32"}, + {"rv32imc", "ilp32"}, {"rv32imac", "ilp32"}, {"rv32imafc", "ilp32f"}, + {"rv64imac", "lp64"}, {"rv64imafdc", "lp64d"}, + // Add ISA 2.1 naming variants to support more modern GCC installations + {"rv32i_zicsr_zifencei", "ilp32"}, {"rv32im_zicsr_zifencei", "ilp32"}, {"rv32iac_zicsr_zifencei", "ilp32"}, + {"rv32imc_zicsr_zifencei", "ilp32"}, {"rv32imac_zicsr_zifencei", "ilp32"}, {"rv32imafc_zicsr_zifencei", "ilp32f"}, + {"rv64imac_zicsr_zifencei", "lp64"}, {"rv64imafdc_zicsr_zifencei", "lp64d"}}; std::vector Ms; From 929de77a5588badbaac17ba8e0a79a7c58808861 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 12 Dec 2023 13:10:14 +0800 Subject: [PATCH 177/289] esp/ci: Remove legacy release CI code --- .gitlab-ci.yml | 5 -- .legacy-release.yml | 164 -------------------------------------------- 2 files changed, 169 deletions(-) delete mode 100644 .legacy-release.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b92d9c4558eca..4e11284b1b001 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -112,8 +112,3 @@ before_script: include: - local: .universal-toolchain-release.yml - rules: - - if: $ESP_CLANG_LEGACY_RELEASE != "true" - - local: .legacy-release.yml - rules: - - if: $ESP_CLANG_LEGACY_RELEASE == "true" diff --git a/.legacy-release.yml b/.legacy-release.yml deleted file mode 100644 index 2042369e9049d..0000000000000 --- a/.legacy-release.yml +++ /dev/null @@ -1,164 +0,0 @@ - -.get_release_name_legacy: &get_release_name_legacy | - # using annotated tags - REL_NUM=$(git describe --abbrev=7) - REL_SFX="llvm15_0_0" - REL_NAME=${CONF_TARGET}-${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} - ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} - echo "PLATFORM_NAME: $PLATFORM_NAME" - echo "REL_NUM: $REL_NUM" - echo "REL_NAME: $REL_NAME" - echo "ARCHIVE_NAME: $ARCHIVE_NAME" - -.get_gcc_toolchain_legacy: &get_gcc_toolchain_legacy | - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-2021r2-patch3/${XTENSA_GCC_TOOLCHAIN} - ${UNARCHIVE_TOOL} ${XTENSA_GCC_TOOLCHAIN} - if [[ "$XTENSA_GCC_TOOLCHAIN" == *"linux-amd64"* ]]; then - cp -r xtensa-esp32-elf ${XTENSA_CLANG_TOOLCHAIN} - else - mv xtensa-esp32-elf ${XTENSA_CLANG_TOOLCHAIN} - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-2021r2-patch3/xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz - tar -xf xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz - fi - export GCC_ESP32_LINUX_TOOLCHAIN="xtensa-esp32-elf" - -.package_toolchain_legacy: &package_toolchain_legacy | - ${ARCHIVE_TOOL} ${ARCHIVE_NAME} ${XTENSA_CLANG_TOOLCHAIN}/ - mkdir -p ${DIST_DIR} - mv ${ARCHIVE_NAME} ${DIST_DIR}/ - echo "${ARCHIVE_NAME}" > ${DIST_DIR}/file_${PLATFORM_NAME}_${CONF_TARGET} - -.build_template_legacy: - stage: build - tags: [ "amd64", "build" ] - artifacts: - paths: - - ${DIST_DIR}/ - when: always - expire_in: 10 day - variables: - XTENSA_CLANG_TOOLCHAIN_REF: "release_esp32_clang_15.0.0_gcc_8.4.0" - GCC_REL_NAME: "gcc8_4_0-esp-2021r2-patch3" - script: - - *get_release_name_legacy - - *get_gcc_toolchain_legacy - - !reference [.fix_origin_remote_for_public, script] - - !reference [.get_clang_toolchain_build_scripts, script] - - ${BUILD_TOOLCHAIN_CMD} "${XTENSA_CLANG_TOOLCHAIN}" - - *package_toolchain_legacy - -linux_amd64_build: - extends: .build_template_legacy - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-linux-amd64.tar.gz" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-linux.sh" - -linux_arm64_build: - extends: .build_template_legacy - image: ${CROSS_ARM_IMAGE} - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-linux-arm64.tar.gz" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-linux-arm64.sh" - -win64_build: - extends: .build_template_legacy - variables: - PLATFORM_NAME: "${PLATFORM_NAME_WIN}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_WIN}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" - ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" - XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-win64.zip" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" - -macos_amd64_build: - extends: .build_template_legacy - variables: - PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" - ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" - XTENSA_GCC_TOOLCHAIN: "xtensa-esp32-elf-${GCC_REL_NAME}-macos.tar.gz" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-macos.sh" - -linux_amd64_testsuite: - stage: test - tags: [ "amd64", "build" ] - needs: - - job: linux_amd64_build - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - LLVM_GCC_TESTSUITE_REF: "feature/ci_llvm_multitarget_crt_tests" - script: - - *get_release_name_legacy - - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} - - # getting testsuite - - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/${LLVM_TESTSUITE_REPO}.git - - # preparing testsuite - - export PATH=${PWD}/${XTENSA_CLANG_TOOLCHAIN}/bin/:$PATH - - cd ${LLVM_TESTSUITE_REPO} - - # qemu - - ./qemu_esp32_install.sh - - # run testsuite for esp32 - - ./run_esp32_tests.sh - - # run testsuite for compiler_rt library - - ./run_esp32_crt_tests.sh ../$XTENSA_CLANG_TOOLCHAIN - -upload_to_http_legacy: - stage: private_deploy - when: manual - allow_failure: true - tags: [ "deploy", "shiny" ] - variables: - # force the fetch strategy to clean old archives up in dist/ dir - GIT_STRATEGY: fetch - before_script: - - !reference [.use_ci_tools, script] - script: - - cit_add_ssh_key "${HTTP_UPLOAD_KEY}" - # List of archives - - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) - - cd ${DIST_DIR} - - scp ${FILES} ${HTTP_UPLOAD_DIR}/ct-ng/llvm-builds - # Show info - - echo -e "\nArchives were published there:\n\n$(for n in ${FILES}; do echo "${HTTP_PUBLIC_DIR}/ct-ng/llvm-builds/${n}"; done)\n" - -upload_to_github_legacy: - stage: public_deploy - when: manual - allow_failure: true - only: - - tags - tags: [ "amd64", "internet" ] - image: espressif/github-hub:2 - variables: - GIT_STRATEGY: fetch - GITHUB_TOKEN: "${GH_TOKEN}" - GITHUB_REPO: "${GH_REPO_HTTPS}" - TAG: "${CI_COMMIT_TAG}" - before_script: [] - script: - - ls -l dist*/ - - git remote add github ${GH_REPO_HTTPS} - - hub release show ${TAG} || { echo "Please create a release on GitHub with ${TAG} tag at first"; exit 1; } - # List of archives - - FILES=$(find ${DIST_DIR} -name file_\* -exec cat {} \+) - - cd ${DIST_DIR} - - ls -l $FILES - # Upload archives - - for n in ${FILES}; do hub release edit -m "" -a "${n}" "${TAG}"; done From eab51e36ed605d1d279de920f4af1181491e00f0 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 21 Aug 2024 08:56:26 +0300 Subject: [PATCH 178/289] [Xtensa][RISCV] Fix multilib support Fix multilib support for Espressif RISCV and Xtensa chips. Fix multilib tests. --- clang/lib/Driver/ToolChains/Gnu.cpp | 121 +++++++++++----------------- 1 file changed, 48 insertions(+), 73 deletions(-) diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index fb2700d7d1f1f..f24020614a625 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -1865,45 +1865,28 @@ static void findRISCVBareMetalMultilibs(const Driver &D, std::vector Ms; - if (TargetTriple.getVendor() == llvm::Triple::Espressif) - Ms.emplace_back(MultilibBuilder()); - if (TargetTriple.getVendor() == llvm::Triple::Espressif) { - Ms.emplace_back(MultilibBuilder()); - Ms.emplace_back(MultilibBuilder("no-rtti") - .flag("-fno-rtti") - .flag("-frtti", /*Disallow=*/true)); + for (auto Element : RISCVMultilibSet) { + // multilib path rule is ${march}/${mabi} + Ms.emplace_back( + MultilibBuilder( + (Twine(Element.march) + "/" + Twine(Element.mabi)).str()) + .flag(Twine("-march=", Element.march).str()) + .flag(Twine("-mabi=", Element.mabi).str())); } - for (auto Element : RISCVMultilibSet) { - if (TargetTriple.getVendor() == llvm::Triple::Espressif) { - // multilib path rule is ${march}/${mabi} - Ms.emplace_back( - MultilibBuilder( - (Twine(Element.march) + "/" + Twine(Element.mabi)).str()) - .flag(Twine("-march=", Element.march).str()) - .flag(Twine("-mabi=", Element.mabi).str())); - /* no-rtti version for every ${march}/${mabi} */ - Ms.emplace_back( - MultilibBuilder( - (Twine(Element.march) + "/" + Twine(Element.mabi) + "/no-rtti").str()) - .flag(Twine("-march=", Element.march).str()) - .flag(Twine("-mabi=", Element.mabi).str()) - .flag("-fno-rtti") - .flag("-frtti", /*Disallow=*/true)); - } else { - // multilib path rule is ${march}/${mabi} - Ms.emplace_back( - MultilibBuilder( - (Twine(Element.march) + "/" + Twine(Element.mabi)).str()) - .flag(Twine("-march=", Element.march).str()) - .flag(Twine("-mabi=", Element.mabi).str())); - } + MultilibSet RISCVMultilibs; + if (TargetTriple.getVendor() == llvm::Triple::Espressif) { + MultilibBuilder NoRTTI = MultilibBuilder("/no-rtti").flag("-fno-rtti"); + RISCVMultilibs = MultilibSetBuilder() + .Either(Ms) + .Maybe(NoRTTI) + .makeMultilibSet() + .FilterOut(NonExistent); + } else { + RISCVMultilibs = + MultilibSetBuilder().Either(Ms).makeMultilibSet().FilterOut( + NonExistent); } - MultilibSet RISCVMultilibs = - MultilibSetBuilder() - .Either(Ms) - .makeMultilibSet() - .FilterOut(NonExistent); if (TargetTriple.getVendor() == llvm::Triple::Espressif) { RISCVMultilibs.setFilePathsCallback([](const Multilib &M) { @@ -1935,8 +1918,8 @@ static void findRISCVBareMetalMultilibs(const Driver &D, if (TargetTriple.getVendor() == llvm::Triple::Espressif) { addMultilibFlag( - Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, true), "frtti", - Flags); + Args.hasFlag(options::OPT_fno_rtti, options::OPT_frtti, false), + "-fno-rtti", Flags); } if (selectRISCVMultilib(RISCVMultilibs, MArch, Flags, @@ -1987,49 +1970,41 @@ static void findRISCVMultilibs(const Driver &D, } static void findXtensaMultilibs(const Driver &D, - const llvm::Triple &TargetTriple, StringRef Path, - const ArgList &Args, DetectedMultilibs &Result) { + const llvm::Triple &TargetTriple, + StringRef Path, const ArgList &Args, + DetectedMultilibs &Result) { + FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS()); - MultilibSet XtensaMultilibs = MultilibSet(); StringRef cpu = Args.getLastArgValue(options::OPT_mcpu_EQ, "esp32"); bool IsESP32 = cpu == "esp32"; - XtensaMultilibs.push_back(Multilib()); - XtensaMultilibs.push_back(MultilibBuilder("no-rtti", {}, {}) - .flag("-fno-rtti") - .flag("-frtti", /*Disallow=*/true) - .makeMultilib()); + Multilib::flags_list Flags; - if (IsESP32) { - XtensaMultilibs.push_back(MultilibBuilder("esp32-psram", {}, {}) - .flag("-mfix-esp32-psram-cache-issue") - .makeMultilib()); + addMultilibFlag( + Args.hasFlag(options::OPT_fno_rtti, options::OPT_frtti, false), + "-fno-rtti", Flags); - XtensaMultilibs.push_back(MultilibBuilder("esp32-psram/no-rtti", {}, {}) - .flag("-mfix-esp32-psram-cache-issue") - .flag("-fno-rtti") - .flag("-frtti", /*Disallow=*/true) - .makeMultilib()); - } + addMultilibFlag( + IsESP32 && Args.hasFlag(options::OPT_mfix_esp32_psram_cache_issue, + options::OPT_mfix_esp32_psram_cache_issue, false), + "-mfix-esp32-psram-cache-issue", Flags); - std::string cpu_name = cpu.str(); - XtensaMultilibs - .setFilePathsCallback([cpu_name](const Multilib &M) { - return std::vector( - {M.gccSuffix(), - "/../../../../xtensa-" + cpu_name + "-elf/lib" + M.gccSuffix()}); - }); + MultilibBuilder NoRTTI = MultilibBuilder("/no-rtti").flag("-fno-rtti"); + MultilibBuilder FixPSRAM = + MultilibBuilder("/esp32-psram").flag("-mfix-esp32-psram-cache-issue"); - Multilib::flags_list Flags; - addMultilibFlag( - Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti, true), "frtti", - Flags); - - if (IsESP32) - addMultilibFlag(Args.hasFlag(options::OPT_mfix_esp32_psram_cache_issue, - options::OPT_mfix_esp32_psram_cache_issue, - false), - "mfix-esp32-psram-cache-issue", Flags); + MultilibSet XtensaMultilibs = MultilibSetBuilder() + .Maybe(FixPSRAM) + .Maybe(NoRTTI) + .makeMultilibSet() + .FilterOut(NonExistent); + + std::string cpu_name = cpu.str(); + XtensaMultilibs.setFilePathsCallback([cpu_name](const Multilib &M) { + return std::vector( + {M.gccSuffix(), + "/../../../../xtensa-" + cpu_name + "-elf/lib" + M.gccSuffix()}); + }); if (XtensaMultilibs.select(Flags, Result.SelectedMultilibs)) Result.Multilibs = XtensaMultilibs; From 42a4e9f77a1f065be18f86b1bfbe8ee850f9c87d Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 28 Dec 2023 10:37:20 +0300 Subject: [PATCH 179/289] esp/ci: Split MacOS sign job (one per arch) to fit into artifacts size limit --- .universal-toolchain-release.yml | 33 +++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml index 6279866f2aa11..cd899df8a7ba0 100644 --- a/.universal-toolchain-release.yml +++ b/.universal-toolchain-release.yml @@ -424,13 +424,39 @@ test_x86_64-linux-gnu: # run testsuite for esp32 - ./run_esp32_tests.sh 2>&1 > ${BUILD_PATH}/tests.log -macos_codesign: +sign_pack_x86_64-apple-darwin: stage: macos_codesign when: on_success resource_group: macos_codesign tags: [ "darwin", "codesign" ] # list all jobs that produces macos distros - needs: [ pack_x86_64-apple-darwin, pack_aarch64-apple-darwin ] + needs: + - job: pack_x86_64-apple-darwin + artifacts: + paths: + - ${DIST_DIR} + variables: + # directory with distro archives + DIST_ART_DIR: ${DIST_DIR} + # command to unarchive distro + ARCHIVE_TOOL: ${ARCHIVE_TOOL_MACOS} + # command to unarchive distro + UNARCHIVE_TOOL: ${UNARCHIVE_TOOL_MACOS} + # URL to macos codesign repo + NOTARIZATION_SCRIPTS_GIT: "${CI_SERVER_PROTOCOL}://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/macos_codesign_notarization.git" + script: + - git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} -b ${CI_COMMIT_REF_NAME} || + git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} + - ./macos_codesign_notarization/run.sh + +sign_aarch64-apple-darwin: + stage: macos_codesign + when: on_success + resource_group: macos_codesign + tags: [ "darwin", "codesign" ] + # list all jobs that produces macos distros + needs: + - job: pack_aarch64-apple-darwin artifacts: paths: - ${DIST_DIR} @@ -488,7 +514,8 @@ upload_to_github: - job: pack_arm-linux-gnueabihf - job: pack_aarch64-linux-gnu - job: pack_x86_64-w64-mingw32 - - job: macos_codesign + - job: sign_pack_x86_64-apple-darwin + - job: sign_aarch64-apple-darwin before_script: [] script: - ls -l dist*/ From 875857b9b65523917ef38def857e83e97d35570d Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 28 Dec 2023 18:05:23 +0300 Subject: [PATCH 180/289] esp/ci: Adds GH PR workflow to run tests --- .github/workflows/esp-clang-tests.yml | 40 +++++++++++++++++++++++ .github/workflows/esp-llvm-tests.yml | 46 +++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 .github/workflows/esp-clang-tests.yml create mode 100644 .github/workflows/esp-llvm-tests.yml diff --git a/.github/workflows/esp-clang-tests.yml b/.github/workflows/esp-clang-tests.yml new file mode 100644 index 0000000000000..a982ca753c383 --- /dev/null +++ b/.github/workflows/esp-clang-tests.yml @@ -0,0 +1,40 @@ +name: Clang Tests + +permissions: + contents: read + +on: + workflow_dispatch: + push: + ignore-forks: true + branches: + - '**_release_**' + paths: + - 'clang/**' + - '.github/workflows/esp-clang-tests.yml' + - '.github/workflows/llvm-project-tests.yml' + - '!llvm/**' + pull_request: + ignore-forks: true + branches: + - '**_release_**' + paths: + - 'clang/**' + - '.github/workflows/esp-clang-tests.yml' + - '.github/workflows/llvm-project-tests.yml' + - '!llvm/**' + +concurrency: + # Skip intermediate builds: always. + # Cancel intermediate builds: only if it is a pull request build. + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + +jobs: + check_clang: + if: github.repository_owner == 'espressif' + name: Test clang + uses: ./.github/workflows/llvm-project-tests.yml + with: + build_target: check-clang + projects: clang diff --git a/.github/workflows/esp-llvm-tests.yml b/.github/workflows/esp-llvm-tests.yml new file mode 100644 index 0000000000000..42143b2695ec5 --- /dev/null +++ b/.github/workflows/esp-llvm-tests.yml @@ -0,0 +1,46 @@ +name: LLVM Tests + +permissions: + contents: read + +on: + workflow_dispatch: + push: + ignore-forks: true + branches: + - '**_release_**' + paths: + - 'llvm/**' + - '.github/workflows/esp-llvm-tests.yml' + - '.github/workflows/llvm-project-tests.yml' + pull_request: + ignore-forks: true + branches: + - '**_release_**' + paths: + - 'llvm/**' + - '.github/workflows/esp-llvm-tests.yml' + - '.github/workflows/llvm-project-tests.yml' + +concurrency: + # Skip intermediate builds: always. + # Cancel intermediate builds: only if it is a pull request build. + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + +jobs: + check_all: + if: github.repository_owner == 'espressif' + name: Test llvm,clang + uses: ./.github/workflows/llvm-project-tests.yml + with: + build_target: check-all + projects: clang + + check_lld: + if: github.repository_owner == 'espressif' + name: Test lld + uses: ./.github/workflows/llvm-project-tests.yml + with: + build_target: check-lld + projects: lld From 61fa839e6922d2df48c888b32aa367e113f8c12b Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 29 Dec 2023 11:11:30 +0300 Subject: [PATCH 181/289] [Toolchain][Xtensa][Tests] Fix calling clang++ in tests --- clang/test/Driver/xtensa-toolchain.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/test/Driver/xtensa-toolchain.c b/clang/test/Driver/xtensa-toolchain.c index 77f23c284cf52..234495b0cda0a 100644 --- a/clang/test/Driver/xtensa-toolchain.c +++ b/clang/test/Driver/xtensa-toolchain.c @@ -103,7 +103,7 @@ // C-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" // C-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" -// RUN: %clang++ %s -### -no-canonical-prefixes \ +// RUN: %clangxx %s -### -no-canonical-prefixes \ // RUN: -target xtensa-esp-elf -mcpu=esp32 -stdlib=libstdc++ --rtlib=platform \ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ // RUN: | FileCheck -check-prefix=CXX-XTENSA-ESP32-BAREMETAL %s @@ -113,7 +113,7 @@ // CXX-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" // CXX-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" -// RUN: %clang++ %s -### -no-canonical-prefixes \ +// RUN: %clangxx %s -### -no-canonical-prefixes \ // RUN: -target xtensa-esp-elf -mcpu=esp32 -stdlib=libstdc++ --rtlib=platform \ // RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree \ // RUN: --sysroot=%S/Inputs/multilib_xtensa_tree/xtensa-esp32-elf 2>&1 \ From 1f547f6fe4b3751ae37b064626e9125e58ada862 Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Mon, 11 Sep 2023 16:19:32 +0000 Subject: [PATCH 182/289] [Xtensa] Add definition of S3 output registers. Xtensa S3 DSP instructions are coded using explicit register allocation. However, some instructions miss RegState:Define flag for output registers. This leads MachineVerifier to raise errors. This commit adds missing definitions. --- llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp | 9 +++++++++ llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td | 4 ++++ .../lib/Target/Xtensa/XtensaS3ISelLowering.cpp | 18 +++++++++--------- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp index 905e9c9788738..73cd675f45599 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp @@ -62,6 +62,15 @@ BitVector XtensaRegisterInfo::getReservedRegs(const MachineFunction &MF) const { // Reserve stack pointer. Reserved.set(Xtensa::SP); + //Reserve QR regs + Reserved.set(Xtensa::Q0); + Reserved.set(Xtensa::Q1); + Reserved.set(Xtensa::Q2); + Reserved.set(Xtensa::Q3); + Reserved.set(Xtensa::Q4); + Reserved.set(Xtensa::Q5); + Reserved.set(Xtensa::Q6); + Reserved.set(Xtensa::Q7); return Reserved; } diff --git a/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td index b3efb331ce45f..afb0abe37ecfb 100644 --- a/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td @@ -113,6 +113,8 @@ def EE_CMUL_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let Inst{10-6} = 0x3; let Inst{5-4} = sel4{1-0}; let Inst{3-0} = as{3-0}; + + let Constraints = "$asr = $as"; } let usesCustomInserter = 1 in @@ -140,6 +142,8 @@ def EE_CMUL_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let Inst{7-6} = 0x0; let Inst{5-4} = sel4{1-0}; let Inst{3-0} = as{3-0}; + + let Constraints = "$asr = $as"; } let usesCustomInserter = 1 in diff --git a/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp index 301d225c6442e..181a5ae59ee31 100644 --- a/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp @@ -39,7 +39,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_andq first argument, it must " "be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -56,8 +56,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( const TargetRegisterClass *RC = getRegClassFor(MVT::i32); unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AX.getReg()); MI.eraseFromParent(); @@ -79,7 +79,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "must be in range [0,7]"); MachineOperand &SEL4 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) .addImm(SEL4.getImm()); @@ -110,9 +110,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL4 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -144,8 +144,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL4 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) From 642cda7b187b0df30ee324326d3b6b282cbd71b0 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 30 Sep 2024 02:14:17 +0300 Subject: [PATCH 183/289] [Xtensa] Add Boolean Extension feature Boolean Extension support consists of: - v1i1 boolean vector type backed by BR boolean register class - calling convection for boolean variables - boolean instructions implementing logical operators - truncation and zero-extension operations for conversion to scalars - register spill and fill logic --- clang/lib/Basic/Targets/Xtensa.cpp | 3 + clang/lib/Basic/Targets/Xtensa.h | 3 +- clang/lib/CodeGen/Targets/Xtensa.cpp | 7 ++ llvm/lib/Target/Xtensa/XtensaCallingConv.td | 6 ++ .../lib/Target/Xtensa/XtensaFrameLowering.cpp | 21 +++- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 52 ++++++++-- llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 11 ++- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 75 ++++++++++++-- llvm/lib/Target/Xtensa/XtensaOperators.td | 4 +- llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp | 98 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.h | 2 + llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 9 +- .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 2 +- 13 files changed, 269 insertions(+), 24 deletions(-) diff --git a/clang/lib/Basic/Targets/Xtensa.cpp b/clang/lib/Basic/Targets/Xtensa.cpp index 3bc8cc531069d..3c00be659c092 100644 --- a/clang/lib/Basic/Targets/Xtensa.cpp +++ b/clang/lib/Basic/Targets/Xtensa.cpp @@ -75,6 +75,7 @@ bool XtensaTargetInfo::hasFeature(StringRef Feature) const { return llvm::StringSwitch(Feature) .Case("fp", HasFP) .Case("windowed", HasWindowed) + .Case("bool", HasBoolean) .Default(false); } @@ -84,6 +85,8 @@ bool XtensaTargetInfo::handleTargetFeatures(std::vector &Features, for (const auto &Feature : Features) { if (Feature == "+fp") HasFP = true; + else if (Feature == "+bool") + HasBoolean = true; else if (Feature == "+windowed") HasWindowed = true; } diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index 1d777f9014d12..c969f182c63d6 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -33,6 +33,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { std::string CPU; bool HasFP = false; bool HasWindowed = false; + bool HasBoolean = false; public: XtensaTargetInfo(const llvm::Triple &Triple, const TargetOptions &) @@ -49,7 +50,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { WIntType = UnsignedInt; UseZeroLengthBitfieldAlignment = true; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32; - resetDataLayout("e-m:e-p:32:32-i64:64-i128:128-n32"); + resetDataLayout("e-m:e-p:32:32-v1:8:8-i64:64-i128:128-n32"); } void getTargetDefines(const LangOptions &Opts, diff --git a/clang/lib/CodeGen/Targets/Xtensa.cpp b/clang/lib/CodeGen/Targets/Xtensa.cpp index 65f5d5383454e..0c132816f670d 100644 --- a/clang/lib/CodeGen/Targets/Xtensa.cpp +++ b/clang/lib/CodeGen/Targets/Xtensa.cpp @@ -98,6 +98,13 @@ ABIArgInfo XtensaABIInfo::classifyArgumentType(QualType Ty, return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); } + // xtbool + if (getTarget().hasFeature("bool") && Size == 1 && Ty->isVectorType()) { + llvm::Type *ResType = + llvm::FixedVectorType::get(llvm::Type::getInt1Ty(getVMContext()), 1); + return ABIArgInfo::getDirect(ResType); + } + // Aggregates which are <= 6*32 will be passed in registers if possible, // so coerce to integers. if ((Size <= (MaxNumArgGPRs * 32)) && (!MustUseStack)) { diff --git a/llvm/lib/Target/Xtensa/XtensaCallingConv.td b/llvm/lib/Target/Xtensa/XtensaCallingConv.td index a472bc02642bc..c48b97d446bbe 100644 --- a/llvm/lib/Target/Xtensa/XtensaCallingConv.td +++ b/llvm/lib/Target/Xtensa/XtensaCallingConv.td @@ -8,10 +8,15 @@ // This describes the calling conventions for the Xtensa ABI. //===----------------------------------------------------------------------===// +class CCIfFeature: + CCIf().has", Feature, "()"), A>; + //===----------------------------------------------------------------------===// // Xtensa return value calling convention //===----------------------------------------------------------------------===// def RetCC_Xtensa : CallingConv<[ + CCIfFeature<"Boolean",CCIfType<[v1i1], CCAssignToReg<[B0]>>>, + // First two return values go in a2, a3, a4, a5 CCIfType<[i32], CCAssignToReg<[A2, A3, A4, A5]>>, CCIfType<[f32], CCAssignToReg<[A2, A3, A4, A5]>>, @@ -30,6 +35,7 @@ def CSRWE_Xtensa : CalleeSavedRegs<(add)> { def RetCCW_Xtensa : CallingConv<[ CCIfType<[i1, i8, i16], CCPromoteToType>, + CCIfFeature<"Boolean",CCIfType<[v1i1], CCAssignToReg<[B0]>>>, CCIfType<[f32], CCBitConvertToType>, //First two return values go in a10, a11, a12, a13 diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp index b0e699cc02b7c..b9b3bb0ad3a1d 100644 --- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp @@ -352,15 +352,32 @@ void XtensaFrameLowering::processFunctionBeforeFrameFinalized( MachineFunction &MF, RegScavenger *RS) const { const XtensaSubtarget &STI = MF.getSubtarget(); + // Presence of SPILL_* pseudo-instructions requires spill slots + int NeedRegs = 0; + for (const MachineBasicBlock &MBB : MF) { + for (const MachineInstr &MI : MBB) { + unsigned Opcode = MI.getOpcode(); + if (Opcode == Xtensa::SPILL_BOOL) + NeedRegs += 1; + + if (Opcode == Xtensa::RESTORE_BOOL) + NeedRegs += 3; + } + } + NeedRegs = std::min(16, NeedRegs); + // In WinABI mode add register scavenging slot // FIXME: It may be posssible to add spill slot by more optimal way if (STI.isWinABI() && - (MF.getFrameInfo().estimateStackSize(MF) > STACK_SIZE_THRESHOLD)) { + ((MF.getFrameInfo().estimateStackSize(MF) > STACK_SIZE_THRESHOLD) || + (NeedRegs > 0))) { MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetRegisterClass &RC = Xtensa::ARRegClass; const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); unsigned Size = TRI.getSpillSize(RC); Align Alignment = TRI.getSpillAlign(RC); - RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Alignment, false)); + for (int i = 0; i < NeedRegs; i++) + RS->addScavengingFrameIndex( + MFI.CreateStackObject(Size, Alignment, false)); } } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 46d50ff0c91bd..56ad439b3d4f9 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -64,6 +64,16 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, addRegisterClass(MVT::f32, &Xtensa::FPRRegClass); } + if (Subtarget.hasBoolean()) { + addRegisterClass(MVT::v1i1, &Xtensa::BRRegClass); + setOperationAction(ISD::Constant, MVT::v1i1, Expand); + for (MVT VT : MVT::integer_valuetypes()) { + setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v1i1, Promote); + setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v1i1, Promote); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::v1i1, Promote); + } + } + // Set up special registers. setStackPointerRegisterToSaveRestore(Xtensa::SP); @@ -767,6 +777,11 @@ static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT, ISD::ArgFlagsTy ArgFlags, CCState &State) { static const MCPhysReg IntRegs[] = {Xtensa::A2, Xtensa::A3, Xtensa::A4, Xtensa::A5, Xtensa::A6, Xtensa::A7}; + static const MCPhysReg BoolRegs[] = { + Xtensa::B0, Xtensa::B1, Xtensa::B2, Xtensa::B3, + Xtensa::B4, Xtensa::B5, Xtensa::B6, Xtensa::B7, + Xtensa::B8, Xtensa::B9, Xtensa::B10, Xtensa::B11, + Xtensa::B12, Xtensa::B13, Xtensa::B14, Xtensa::B15}; if (ArgFlags.isByVal()) { Align ByValAlign = ArgFlags.getNonZeroByValAlign(); @@ -824,9 +839,11 @@ static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT, Register = State.AllocateReg(IntRegs); State.AllocateReg(IntRegs); LocVT = MVT::i32; - } else { - report_fatal_error("Cannot handle this ValVT."); - } + } else if (ValVT == MVT::v1i1) { + Register = State.AllocateReg(BoolRegs); + LocVT = ValVT; + } else + llvm_unreachable("Cannot handle this ValVT."); if (!Register) { unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign); @@ -871,10 +888,12 @@ SDValue XtensaTargetLowering::LowerFormalArguments( EVT RegVT = VA.getLocVT(); const TargetRegisterClass *RC; - if (RegVT == MVT::i32) + if (RegVT == MVT::i32) { RC = &Xtensa::ARRegClass; - else - report_fatal_error("RegVT not supported by FormalArguments Lowering"); + } else if (RegVT == MVT::v1i1) { + RC = &Xtensa::BRRegClass; + } else + llvm_unreachable("RegVT not supported by FormalArguments Lowering"); // Transform the arguments stored on // physical registers into virtual ones @@ -3303,6 +3322,27 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( } return MBB; } + case Xtensa::MOVBA_P: { + const TargetRegisterClass *AR = getRegClassFor(MVT::i32); + + Register Dst1 = MRI.createVirtualRegister(AR); + Register Dst2 = MRI.createVirtualRegister(AR); + MachineOperand Breg = MI.getOperand(0); + MachineOperand Src = MI.getOperand(1); + + /* + MOVBA_P2 Breg, Dst1, Dest2, Src + */ + + BuildMI(*MBB, MI, DL, TII.get(Xtensa::MOVBA_P2), Breg.getReg()) + .addReg(Dst1, RegState::Define | RegState::EarlyClobber) + .addReg(Dst2, RegState::Define | RegState::EarlyClobber) + .addReg(Src.getReg()); + + MI.eraseFromParent(); + + return MBB; + } default: return EmitDSPInstrWithCustomInserter(MI, MBB, TII, MF, MRI, DL); // llvm_unreachable("Unexpected instr type to insert"); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index b02b03363530d..211f0090effe5 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -129,7 +129,13 @@ void XtensaInstrInfo::copyPhysReg(MachineBasicBlock &MBB, else if (STI.hasSingleFloat() && Xtensa::ARRegClass.contains(SrcReg) && Xtensa::FPRRegClass.contains(DestReg)) Opcode = Xtensa::WFR; - else + else if (STI.hasBoolean() && Xtensa::BRRegClass.contains(SrcReg) && + Xtensa::BRRegClass.contains(DestReg)) { + BuildMI(MBB, MBBI, DL, get(Xtensa::ORB), DestReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .addReg(SrcReg, getKillRegState(KillSrc)); + return; + } else report_fatal_error("Impossible reg-to-reg copy"); BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) @@ -170,6 +176,9 @@ void XtensaInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, } else if (RC == &Xtensa::FPRRegClass) { LoadOpcode = Xtensa::LSI; StoreOpcode = Xtensa::SSI; + } else if (RC == &Xtensa::BRRegClass) { + LoadOpcode = Xtensa::RESTORE_BOOL; + StoreOpcode = Xtensa::SPILL_BOOL; } else llvm_unreachable("Unsupported regclass to load or store"); } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 13cd99cd9a42c..becd43aa5d8c9 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -839,6 +839,11 @@ def ROTW : RRR_Inst<0x00, 0x00, 0x04, (outs), (ins imm8n_7:$imm), //===----------------------------------------------------------------------===// // Boolean Instructions //===----------------------------------------------------------------------===// +class BIN_PAT + : Pat<(dst_vt (node src_vt:$f1, src_vt:$f2)), + (inst src_vt:$f1, src_vt:$f2)>; + def ALL4 : RRR_Inst<0x00, 0x00, 0x00, (outs BR:$t), (ins BR:$s), "all4\t$t, $s", []>, Requires<[HasBoolean]> { @@ -854,6 +859,12 @@ def ANDB : RRR_Inst<0x00, 0x02, 0x00, (outs BR:$r), (ins BR:$s, BR:$t), "andb\t$r, $s, $t", []>, Requires<[HasBoolean]>; def ANDBC : RRR_Inst<0x00, 0x02, 0x01, (outs BR:$r), (ins BR:$s, BR:$t), "andbc\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def ORB : RRR_Inst<0x00, 0x02, 0x02, (outs BR:$r), (ins BR:$s, BR:$t), + "orb\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def ORBC : RRR_Inst<0x00, 0x02, 0x03, (outs BR:$r), (ins BR:$s, BR:$t), + "orbc\t$r, $s, $t", []>, Requires<[HasBoolean]>; +def XORB : RRR_Inst<0x00, 0x02, 0x04, (outs BR:$r), (ins BR:$s, BR:$t), + "xorb\t$r, $s, $t", []>, Requires<[HasBoolean]>; def ANY4 : RRR_Inst<0x00, 0x00, 0x00, (outs BR:$t), (ins BR:$s), "any4\t$t, $s", []>, Requires<[HasBoolean]> { @@ -889,21 +900,67 @@ let isBranch = 1, isTerminator = 1, Predicates = [HasBoolean] in { } } -def MOVF : RRR_Inst<0x00, 0x03, 0x0C, (outs AR:$r), (ins AR:$s, BR:$t), +let Constraints = "$dr = $r,@earlyclobber $dr" in { + def MOVF : RRR_Inst<0x00, 0x03, 0x0C, (outs AR:$dr), (ins AR:$r, AR:$s, BR:$t), "movf\t$r, $s, $t", []>, Requires<[HasBoolean]>; -def MOVT : RRR_Inst<0x00, 0x03, 0x0D, (outs AR:$r), (ins AR:$s, BR:$t), - "movt\t$r, $s, $t", []>, Requires<[HasBoolean]>; -def ORB : RRR_Inst<0x00, 0x02, 0x02, (outs BR:$r), (ins BR:$s, BR:$t), - "orb\t$r, $s, $t", []>, Requires<[HasBoolean]>; -def ORBC : RRR_Inst<0x00, 0x02, 0x03, (outs BR:$r), (ins BR:$s, BR:$t), - "orbc\t$r, $s, $t", []>, Requires<[HasBoolean]>; -def XORB : RRR_Inst<0x00, 0x02, 0x04, (outs BR:$r), (ins BR:$s, BR:$t), - "xorb\t$r, $s, $t", []>, Requires<[HasBoolean]>; + def MOVT : RRR_Inst<0x00, 0x03, 0x0D, (outs AR:$dr), (ins AR:$r, AR:$s, BR:$t), + "movt\t$r, $s, $t", []>, Requires<[HasBoolean]>; +} def : Pat<(Xtensa_br_t BR:$b, bb:$target), (BT BR:$b, bb:$target)>; def : Pat<(Xtensa_br_f BR:$b, bb:$target), (BF BR:$b, bb:$target)>; +let Predicates = [HasBoolean] in { + + def OR_BR_PAT: BIN_PAT; + def XOR_BR_PAT: BIN_PAT; + def AND_BR_PAT: BIN_PAT; + + // vselect C T F = C * T + ~C * F + def : Pat<(v1i1 (vselect v1i1:$c, v1i1:$t, v1i1:$f)), + (ORB (ANDB $t, $f), (ANDBC $f, $c))>; + + + def MOVBA_P2: Pseudo<(outs BR:$r, AR:$x, AR:$y), (ins AR:$s), + "!movba $r, $x, $y, $s", []> { + let Defs = [BREG]; + } + + def MOVBA_P: Pseudo<(outs BR:$r), (ins AR:$s), + "!movba $r, $s", []> { + let usesCustomInserter = 1; + let Defs = [BREG]; + //let Uses = [BREG]; + } + + def EXTUI_BR_P: Pseudo<(outs AR:$r), (ins AR:$s, BR:$b), + "!extui_br $r, $s, $b", []>; + def SLLI_BR_P: Pseudo<(outs AR:$r), (ins AR:$s, BR:$b), + "!slli_br $r, $s, $b", []>; + + def : Pat<(v1i1 (build_vector AR:$a)), (MOVBA_P AR:$a)>; + + def : Pat<(i32 (vector_extract (v1i1 BR:$b), (i32 0))), + (EXTUI_BR_P (RSR BREG), BR:$b)>; + + def : Pat<(v1i1 (load addr_ish1:$addr)), (MOVBA_P (L8UI mem8:$addr))>; + + def : Pat<(store BR:$b, addr_ish1:$addr), (S8I (EXTUI_BR_P (RSR BREG), BR:$b), mem32:$addr)>; + + def SPILL_BOOL: Pseudo<(outs), (ins BR:$b, mem8:$mem), + "!spill_bool $b, $mem",[]> { + let mayStore = 1; + } + + def RESTORE_BOOL: Pseudo<(outs BR:$out), (ins mem8:$mem), + "!restore_bool $out, $mem",[]> { + let mayLoad = 1; + let Defs = [BREG]; + } +} + + //===----------------------------------------------------------------------===// // Floating-Point Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index f5136c8038273..add29bf755dc4 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -26,9 +26,9 @@ def SDT_XtensaSelectCC : SDTypeProfile<1, 5, SDTCisVT<5, i32>]>; def SDT_XtensaMOVSP : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; -def SDT_XtensaBrBool : SDTypeProfile<0, 2, [SDTCisVT<0, i1>, SDTCisVT<1, OtherVT>]>; +def SDT_XtensaBrBool : SDTypeProfile<0, 2, [SDTCisVT<0, v1i1>, SDTCisVT<1, OtherVT>]>; def SDT_XtensaBrCCFP : SDTypeProfile<0, 4, [SDTCisVT<0, i32>, SDTCisVT<1, f32>, SDTCisVT<2, f32>, SDTCisVT<3, OtherVT>]>; -def SDT_XtensaCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i1>, SDTCisVT<1, f32>, SDTCisVT<2, f32>]>; +def SDT_XtensaCmp : SDTypeProfile<1, 2, [SDTCisVT<0, v1i1>, SDTCisVT<1, f32>, SDTCisVT<2, f32>]>; def SDT_XtensaMADD : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisVT<0, f32>]>; def SDT_XtensaMOVS : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, f32>]>; def SDT_XtensaSelectCCFP : SDTypeProfile<1, 5, [SDTCisSameAs<0, 3>, SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisVT<5, i32>]>; diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp index 73cd675f45599..48ae4e4a99943 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp @@ -19,6 +19,7 @@ #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/RegisterScavenging.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" @@ -71,6 +72,7 @@ BitVector XtensaRegisterInfo::getReservedRegs(const MachineFunction &MF) const { Reserved.set(Xtensa::Q5); Reserved.set(Xtensa::Q6); Reserved.set(Xtensa::Q7); + Reserved.set(Xtensa::BREG); return Reserved; } @@ -87,6 +89,8 @@ bool XtensaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int MinCSFI = 0; int MaxCSFI = -1; + assert(RS && "Need register scavenger"); + if (CSI.size()) { MinCSFI = CSI[0].getFrameIdx(); MaxCSFI = CSI[CSI.size() - 1].getFrameIdx(); @@ -137,6 +141,94 @@ bool XtensaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, IsKill = true; } + MachineBasicBlock &MBB = *MI.getParent(); + DebugLoc DL = II->getDebugLoc(); + const XtensaInstrInfo &TII = *static_cast( + MBB.getParent()->getSubtarget().getInstrInfo()); + unsigned BRegBase = Xtensa::B0; + switch (MI.getOpcode()) { + case Xtensa::SPILL_BOOL: { + Register TempAR = + RS->scavengeRegisterBackwards(Xtensa::ARRegClass, II, false, 0); + RS->setRegUsed(TempAR); + + BuildMI(MBB, II, DL, TII.get(Xtensa::RSR), TempAR).addReg(Xtensa::BREG); + MachineOperand &Breg = MI.getOperand(0); + unsigned RegNo = Breg.getReg().id() - BRegBase; + + BuildMI(MBB, II, DL, TII.get(Xtensa::EXTUI), TempAR) + .addReg(TempAR) + .addImm(RegNo) + .addImm(1); + + BuildMI(MBB, II, DL, TII.get(Xtensa::S8I)) + .addReg(TempAR, RegState::Kill) + .addReg(FrameReg, getKillRegState(IsKill)) + .addImm(Offset); + + MI.eraseFromParent(); + return true; + } + case Xtensa::RESTORE_BOOL: { + + Register SrcAR = + RS->scavengeRegisterBackwards(Xtensa::ARRegClass, II, false, 0); + RS->setRegUsed(SrcAR); + Register MaskAR = + RS->scavengeRegisterBackwards(Xtensa::ARRegClass, II, false, 0); + RS->setRegUsed(MaskAR); + Register BRegAR = + RS->scavengeRegisterBackwards(Xtensa::ARRegClass, II, false, 0); + RS->setRegUsed(BRegAR); + + MachineOperand &Breg = MI.getOperand(0); + unsigned RegNo = Breg.getReg().id() - BRegBase; + + BuildMI(MBB, II, DL, TII.get(Xtensa::L8UI), SrcAR) + .addReg(FrameReg, getKillRegState(IsKill)) + .addImm(Offset); + + BuildMI(MBB, II, DL, TII.get(Xtensa::EXTUI), SrcAR) + .addReg(SrcAR) + .addImm(0) + .addImm(1); + + if (RegNo != 0) { + BuildMI(MBB, II, DL, TII.get(Xtensa::SLLI), SrcAR) + .addReg(SrcAR) + .addImm(RegNo); + } + + BuildMI(MBB, II, DL, TII.get(Xtensa::RSR), BRegAR).addReg(Xtensa::BREG); + + unsigned Mask = ~(1 << RegNo) & 0x3ff; + BuildMI(MBB, II, DL, TII.get(Xtensa::MOVI), MaskAR) + .addImm(RegNo < 12 ? Mask : 1); + if (RegNo >= 12) { + BuildMI(MBB, II, DL, TII.get(Xtensa::SLLI), MaskAR) + .addReg(MaskAR) + .addImm(RegNo); + } + BuildMI(MBB, II, DL, TII.get(Xtensa::AND), BRegAR) + .addReg(BRegAR) + .addReg(MaskAR); + + BuildMI(MBB, II, DL, TII.get(Xtensa::OR), BRegAR) + .addReg(SrcAR) + .addReg(BRegAR); + + BuildMI(MBB, II, DL, TII.get(Xtensa::WSR)) + .addReg(Xtensa::BREG, RegState::Define) + .addReg(BRegAR) + .addDef(Breg.getReg(), RegState::Implicit); + + MI.eraseFromParent(); + return true; + } + default: + break; + } + MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, IsKill); MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); @@ -148,3 +240,9 @@ Register XtensaRegisterInfo::getFrameRegister(const MachineFunction &MF) const { return TFI->hasFP(MF) ? (Subtarget.isWinABI() ? Xtensa::A7 : Xtensa::A15) : Xtensa::SP; } + +bool XtensaRegisterInfo::requiresFrameIndexReplacementScavenging( + const MachineFunction &MF) const { + const MachineFrameInfo &MFI = MF.getFrameInfo(); + return MFI.hasStackObjects(); +} diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h index ede0eeb90b42d..e2f3b51f7fe90 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.h @@ -53,6 +53,8 @@ class XtensaRegisterInfo : public XtensaGenRegisterInfo { RegScavenger *RS = nullptr) const override; Register getFrameRegister(const MachineFunction &MF) const override; + + bool requiresFrameIndexReplacementScavenging(const MachineFunction &MF) const override; }; } // end namespace llvm diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index dc2d5abc48758..7f2de9814da90 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -308,5 +308,10 @@ foreach i = 0-15 in { } // Boolean register class -def BR : RegisterClass<"Xtensa", [i1], 0, (add B0, B1, -B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13, B14, B15)>; +def BR : RegisterClass<"Xtensa", [v1i1], 8, (add B0, B1, +B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13, B14, B15)> { + let Size = 8; +} + + + diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index 0c52125b6a4c3..f503288ba3b2b 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -34,7 +34,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXtensaTarget() { static std::string computeDataLayout(const Triple &TT, StringRef CPU, const TargetOptions &Options, bool IsLittle) { - std::string Ret = "e-m:e-p:32:32-i64:64-i128:128-n32"; + std::string Ret = "e-m:e-p:32:32-v1:8:8-i64:64-i128:128-n32"; return Ret; } From c8155ad1e909a7cb45729cccdf58e954eed0a1a5 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 22 Aug 2024 01:18:59 +0300 Subject: [PATCH 184/289] [Xtensa] Refactor loadImmediate loadImmediate is split into two functions: - buildLoadImmediate which accepts allocated registers as params - loadImmediate which allocates virtual registers buildLoadImmediate is inteded to be used in post-RA passes. --- llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 32 +++++++++++++++------- llvm/lib/Target/Xtensa/XtensaInstrInfo.h | 4 +++ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index 211f0090effe5..7d2389875eaff 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -183,23 +183,22 @@ void XtensaInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, llvm_unreachable("Unsupported regclass to load or store"); } -void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, +MachineInstrBuilder +XtensaInstrInfo::buildLoadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, - unsigned *Reg, int64_t Value) const { + unsigned Reg, int64_t Value) const { DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); - MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); - const TargetRegisterClass *RC = &Xtensa::ARRegClass; - // create virtual reg to store immediate - *Reg = RegInfo.createVirtualRegister(RC); if (Value >= -2048 && Value <= 2047) { - BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI), *Reg).addImm(Value); + return BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI), Reg).addImm(Value); } else if (Value >= -32768 && Value <= 32767) { int Low = Value & 0xFF; int High = Value & ~0xFF; - BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI), *Reg).addImm(Low); - BuildMI(MBB, MBBI, DL, get(Xtensa::ADDMI), *Reg).addReg(*Reg).addImm(High); + BuildMI(MBB, MBBI, DL, get(Xtensa::MOVI), Reg).addImm(Low); + return BuildMI(MBB, MBBI, DL, get(Xtensa::ADDMI), Reg) + .addReg(Reg) + .addImm(High); } else if (Value >= -4294967296LL && Value <= 4294967295LL) { // 32 bit arbitrary constant MachineConstantPool *MCP = MBB.getParent()->getConstantPool(); @@ -209,7 +208,8 @@ void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, false); unsigned Idx = MCP->getConstantPoolIndex(CVal, Align(2U)); // MCSymbol MSym - BuildMI(MBB, MBBI, DL, get(Xtensa::L32R), *Reg).addConstantPoolIndex(Idx); + return BuildMI(MBB, MBBI, DL, get(Xtensa::L32R), Reg) + .addConstantPoolIndex(Idx); } else { // use L32R to let assembler load immediate best // TODO replace to L32R @@ -217,6 +217,18 @@ void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, } } +void XtensaInstrInfo::loadImmediate(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned *Reg, int64_t Value) const { + DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); + MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); + const TargetRegisterClass *RC = &Xtensa::ARRegClass; + + // create virtual reg to store immediate + *Reg = RegInfo.createVirtualRegister(RC); + buildLoadImmediate(MBB, MBBI, *Reg, Value); +} + unsigned XtensaInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { switch (MI.getOpcode()) { case TargetOpcode::INLINEASM: { // Inline Asm: Variable size. diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h index 0bfe35a3fb741..339f0e9fff9df 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h @@ -73,6 +73,10 @@ class XtensaInstrInfo : public XtensaGenInstrInfo { // physical register Reg. void loadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned *Reg, int64_t Value) const; + + MachineInstrBuilder buildLoadImmediate(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned Reg, int64_t Value) const; bool reverseBranchCondition(SmallVectorImpl &Cond) const override; MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override; From 41fb0a671ef0813fb6243c5575561809da4f93c8 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 22 Aug 2024 01:20:10 +0300 Subject: [PATCH 185/289] [Xtensa] Implement BRegFixupPass BRegFixupPass implements post-RA transformations for boolean instructions: - selecting the right constant for SLLI shift instruction based on physical register number. The number is unknown before regster allocation. - selecting the right constant for EXTUI bit extract operation based on physical BR register - emulating MOVBA (reg copy from AR to BR) operation --- llvm/lib/Target/Xtensa/CMakeLists.txt | 1 + llvm/lib/Target/Xtensa/Xtensa.h | 1 + .../lib/Target/Xtensa/XtensaBRegFixupPass.cpp | 188 ++++++++++++++++++ .../lib/Target/Xtensa/XtensaTargetMachine.cpp | 1 + 4 files changed, 191 insertions(+) create mode 100644 llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp diff --git a/llvm/lib/Target/Xtensa/CMakeLists.txt b/llvm/lib/Target/Xtensa/CMakeLists.txt index 985e18b1cc4c1..479bbbd831e59 100644 --- a/llvm/lib/Target/Xtensa/CMakeLists.txt +++ b/llvm/lib/Target/Xtensa/CMakeLists.txt @@ -34,6 +34,7 @@ add_llvm_target(XtensaCodeGen XtensaTargetObjectFile.cpp XtensaUtils.cpp XtensaTargetTransformInfo.cpp + XtensaBRegFixupPass.cpp LINK_COMPONENTS AsmPrinter diff --git a/llvm/lib/Target/Xtensa/Xtensa.h b/llvm/lib/Target/Xtensa/Xtensa.h index fca6ed897c574..af7d3044eb41d 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.h +++ b/llvm/lib/Target/Xtensa/Xtensa.h @@ -30,5 +30,6 @@ FunctionPass *createXtensaHardwareLoops(); FunctionPass *createXtensaFixupHwLoops(); FunctionPass *createXtensaPSRAMCacheFixPass(); FunctionPass *createXtensaConstantIslandPass(); +FunctionPass *createXtensaBRegFixupPass(); } // namespace llvm #endif // LLVM_LIB_TARGET_XTENSA_XTENSA_H diff --git a/llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp b/llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp new file mode 100644 index 0000000000000..e5da98500b57b --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp @@ -0,0 +1,188 @@ +//===- XtensaBRegFixup.cpp - Xtensa boolean register fixup ----------------===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "XtensaInstrInfo.h" +#include "XtensaSubtarget.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen//MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; + +#define DEBUG_TYPE "xtensa-breg-fixup-pass" + +class XtensaBRegFixup : public MachineFunctionPass { +public: + static char ID; + XtensaBRegFixup() : MachineFunctionPass(ID) {} + + const XtensaSubtarget *Subtarget; + static const XtensaInstrInfo *XtensaII; + + bool runOnMachineFunction(MachineFunction &MF) override; + + llvm::StringRef getPassName() const override { + return "Xtensa bool reg fixup pass"; + } + +private: + bool VisitMBB(MachineBasicBlock &MBB); + bool VisitInstruction(const MachineBasicBlock::instr_iterator &MII); +}; + +char XtensaBRegFixup::ID = 0; +const XtensaInstrInfo *XtensaBRegFixup::XtensaII; + +bool XtensaBRegFixup::runOnMachineFunction(MachineFunction &MF) { + + Subtarget = &static_cast(MF.getSubtarget()); + XtensaII = static_cast(Subtarget->getInstrInfo()); + bool Modified = false; + + MachineFunction::iterator I = MF.begin(), E = MF.end(); + + LLVM_DEBUG(dbgs() << "********* " << getPassName() << " *********\n"); + + for (; I != E; ++I) + Modified |= VisitMBB(*I); + return Modified; +} + +FunctionPass *llvm::createXtensaBRegFixupPass() { + return new XtensaBRegFixup(); +} + +bool XtensaBRegFixup::VisitMBB(MachineBasicBlock &MBB) { + bool Modified = false; + MachineBasicBlock::instr_iterator MII = MBB.instr_begin(), + E = MBB.instr_end(); + MachineBasicBlock::instr_iterator NextMII; + + // Iterate through the instructions in the basic block + for (; MII != E; MII = NextMII) { + NextMII = std::next(MII); + MachineInstr *MI = &*MII; + + // Don't reduce bundled instructions or pseudo operations + if (MI->isBundle() || MI->isTransient()) + continue; + + Modified |= VisitInstruction(MII); + } + + return Modified; +} + +bool XtensaBRegFixup::VisitInstruction( + const MachineBasicBlock::instr_iterator &MII) { + MachineInstr *MI = &*MII; + MachineBasicBlock &MBB = *MI->getParent(); + MachineFunction *MF = MBB.getParent(); + const XtensaInstrInfo &TII = + *static_cast(MF->getSubtarget().getInstrInfo()); + unsigned Opcode = MI->getOpcode(); + unsigned RegBase = Xtensa::B0; + + switch (Opcode) { + case Xtensa::MOVBA_P2: { + /* + MOVBA_P2 Breg, Dst1, Dst2, Src + | + V + RSR Dst1, BREG + LoadImmediate Dst2, BregMask + AND Dst2, Dst2, Dst1 + SLLI Dst1, Src, BregShift + OR Dst2, Dst2, Dst1 + WSR BREG, Dst2 + */ + MachineOperand Breg = MI->getOperand(0); + MachineOperand Dst1 = MI->getOperand(1); + MachineOperand Dst2 = MI->getOperand(2); + MachineOperand Src = MI->getOperand(3); + DebugLoc DL = MI->getDebugLoc(); + unsigned RegNo = Breg.getReg().id() - RegBase; + int64_t Mask = 0xffff & (~(1 << RegNo)); + + MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(Xtensa::RSR)).add(Dst1).addReg(Xtensa::BREG); + + TII.buildLoadImmediate(MBB, MI, Dst2.getReg(), Mask); + + BuildMI(MBB, MI, DL, TII.get(Xtensa::AND)).add(Dst2).add(Dst2).add(Dst1); + + if (RegNo > 0) { + BuildMI(MBB, MI, DL, TII.get(Xtensa::SLLI)) + .add(Dst1) + .add(Src) + .addImm(RegNo); + } else { + Dst1.setReg(Src.getReg()); + } + + BuildMI(MBB, MI, DL, TII.get(Xtensa::OR)).add(Dst2).add(Dst2).add(Dst1); + + BuildMI(MBB, MI, DL, TII.get(Xtensa::WSR)).addReg(Xtensa::BREG).add(Dst2); + + LLVM_DEBUG(dbgs() << " Fixed MOVBA_P2: " << *MIB); + MBB.erase_instr(MI); + return true; + } break; + case Xtensa::EXTUI_BR_P: { + + MachineOperand Breg = MI->getOperand(2); + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::EXTUI); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + MIB.add(MI->getOperand(0)); + MIB.add(MI->getOperand(1)); + unsigned RegNo = Breg.getReg().id() - RegBase; + MIB.addImm(RegNo); + MIB.addImm(1); + + LLVM_DEBUG(dbgs() << " Fixed EXTUI: " << *MIB); + MBB.erase_instr(MI); + return true; + + } break; + + case Xtensa::SLLI_BR_P: { + + MachineOperand Breg = MI->getOperand(2); + unsigned RegNo = Breg.getReg().id() - RegBase; + if (RegNo != 0) { + DebugLoc dl = MI->getDebugLoc(); + const MCInstrDesc &NewMCID = XtensaII->get(Xtensa::SLLI); + MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); + // Transfer MI flags. + MIB.setMIFlags(MI->getFlags()); + MIB.add(MI->getOperand(0)); + MIB.add(MI->getOperand(1)); + MIB.addImm(RegNo); + + LLVM_DEBUG(dbgs() << " Fixed SLLI: " << *MIB); + } else { + LLVM_DEBUG(dbgs() << " Fixed SLLI: SLLI 0 => NOP"); + } + MBB.erase_instr(MI); + return true; + + } break; + default: + break; + } + + return false; +} diff --git a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp index f503288ba3b2b..28ea43f8cb076 100644 --- a/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp +++ b/llvm/lib/Target/Xtensa/XtensaTargetMachine.cpp @@ -154,6 +154,7 @@ void XtensaPassConfig::addPreRegAlloc() { void XtensaPassConfig::addPreEmitPass() { addPass(createXtensaPSRAMCacheFixPass()); + addPass(createXtensaBRegFixupPass()); addPass(createXtensaSizeReductionPass()); addPass(createXtensaFixupHwLoops()); addPass(&BranchRelaxationPassID); From 38d2e51549797811aa10394a497d247be9a6d788 Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Mon, 27 Mar 2023 13:09:03 +0000 Subject: [PATCH 186/289] [Xtensa] Add LLVM tests for Boolean Extension LLVM bitcode tests verify that Xtensa backend can: - convert between v1i1 and scalar - generate code for v1i1 vselect - spill & restore BR registers - load and store v1i1 values --- .../CodeGen/Xtensa/xtensa-xtbool-convert.ll | 21 +++++++++ .../CodeGen/Xtensa/xtensa-xtbool-select.ll | 10 ++++ .../CodeGen/Xtensa/xtensa-xtbool-spill.ll | 46 +++++++++++++++++++ 3 files changed, 77 insertions(+) create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-xtbool-select.ll create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll new file mode 100644 index 0000000000000..c2428cead69d7 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll @@ -0,0 +1,21 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define <1 x i1> @test_xtbool_trunc(i32 %a) { + ; CHECK-LABEL: test_xtbool_trunc + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: and [[AND:a[0-9]+]], {{a[0-9]+}}, {{a[0-9]+}} + ; CHECK: or [[OR:a[0-9]+]], [[AND]], a2 + ; CHECK: wsr [[OR]], br + %trunc = trunc i32 %a to i1 + %vec = insertelement <1 x i1> poison, i1 %trunc, i64 0 + ret <1 x i1> %vec +} + +define i32 @test_xtbool_zext(<1 x i1> %b) { + ; CHECK-LABEL: test_xtbool_zext + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: extui {{a[0-9]+}}, [[BREG]], 0, 1 + %bit = extractelement <1 x i1> %b, i64 0 + %int = zext i1 %bit to i32 + ret i32 %int +} diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-select.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-select.ll new file mode 100644 index 0000000000000..b3f4b0e3dfa6a --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-select.ll @@ -0,0 +1,10 @@ +; RUN: llc -O0 -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define <1 x i1> @test_select(<1 x i1> %cc, <1 x i1> %t, <1 x i1> %f) { + ; CHECK-LABEL: test_select + ; CHECK-DAG: andb [[EQ:b[0-9]+]], {{b[0-9]+}}, {{b[0-9]+}} + ; CHECK-DAG: andbc [[NE:b[0-9]+]], {{b[0-9]+}}, {{b[0-9]+}} + ; CHECK: orb {{b[0-9]+}}, [[EQ]], [[NE]] + %r = select <1 x i1> %cc, <1 x i1> %t, <1 x i1> %f + ret <1 x i1> %r +} diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll new file mode 100644 index 0000000000000..321e955be6cd9 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll @@ -0,0 +1,46 @@ +; RUN: llc -O0 -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define <1 x i1> @test_spill(<1 x i1> %b0, <1 x i1> %b1) +{ + ; CHECK-LABEL: test_spill + ; CHECK: rsr {{a[0-9]+}}, br + ; CHECK: s8i {{a[0-9]+}} + ; CHECK: callx8 a8 + %b2 = call <1 x i1> @get_xtbool() + + ; CHECK: l8ui {{a[0-9]+}} + ; CHECK: rsr {{a[0-9]+}}, br + ; CHECK: wsr {{a[0-9]+}}, br + ; CHECK: orb {{b[0-9]+}} + %r0 = or <1 x i1> %b0, %b1 + ret <1 x i1> %r0 +} + +declare <1 x i1> @get_xtbool() + +define <1 x i1> @test_xtbool_load(i32 %addr) { + ; CHECK-LABEL: test_xtbool_load + ; CHECK: l8ui {{a[0-9]+}} + ; CHECK: movi.n [[C:a[0-9]+]], 1 + ; CHECK: and [[SRC:a[0-9]+]], {{a[0-9]+}}, [[C]] + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: and [[AND:a[0-9]+]], {{a[0-9]+}}, [[BREG]] + ; CHECK: or [[OR:a[0-9]+]], [[AND]], [[SRC]] + ; CHECK: wsr [[OR]], br + %ptr = inttoptr i32 %addr to ptr + %load_bits = load <8 x i1>, ptr %ptr, align 1 + %extractvec = shufflevector <8 x i1> %load_bits, <8 x i1> poison, <1 x i32> zeroinitializer + ret <1 x i1> %extractvec +} + +define void @test_xtbool_store(i32 %addr, <1 x i1> %b) { +entry: + ; CHECK-LABEL: test_xtbool_store + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: extui [[DST:a[0-9]+]], [[BREG]], 0, 1 + ; CHECK: s8i [[DST]], {{a[0-9]+}}, {{[0-9]+}} + %ptr = inttoptr i32 %addr to ptr + %insertvec = shufflevector <1 x i1> %b, <1 x i1> poison, <8 x i32> + store <8 x i1> %insertvec, ptr %ptr, align 1 + ret void +} From f8d4128125580c97c08fa15634a609b9099d8fbf Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Mon, 27 Mar 2023 13:09:03 +0000 Subject: [PATCH 187/289] [Xtensa] Separate directory for Clang CodeGen tests Prepare a separate directory for Xtensa-specific Clang CodeGen tests. --- clang/test/CodeGen/{ => Xtensa}/xtensa-abi.c | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename clang/test/CodeGen/{ => Xtensa}/xtensa-abi.c (100%) diff --git a/clang/test/CodeGen/xtensa-abi.c b/clang/test/CodeGen/Xtensa/xtensa-abi.c similarity index 100% rename from clang/test/CodeGen/xtensa-abi.c rename to clang/test/CodeGen/Xtensa/xtensa-abi.c From b90b76a8d5a04263ae816a0c5d823e305fe284e0 Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Mon, 27 Mar 2023 13:09:04 +0000 Subject: [PATCH 188/289] [Xtensa] Add ABI test for xtbool Extend Xtensa C ABI test to include v1i1 parameters. --- clang/test/CodeGen/Xtensa/xtensa-abi.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/clang/test/CodeGen/Xtensa/xtensa-abi.c b/clang/test/CodeGen/Xtensa/xtensa-abi.c index e858f8e88044d..297df864abc88 100644 --- a/clang/test/CodeGen/Xtensa/xtensa-abi.c +++ b/clang/test/CodeGen/Xtensa/xtensa-abi.c @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; + #define __malloc_like __attribute__((__malloc__)) char *bufalloc () __malloc_like ;//__result_use_check; extern void* malloc (unsigned size); @@ -26,3 +28,7 @@ void callee_struct_a16b_2(struct S16 a, int b) {} void callee_struct_a16b_3(int a, struct S16 b) {} // CHECK: define dso_local void @callee_struct_a16b_3(i32 noundef %a, ptr noundef byval(%struct.S16) align 16 %b) + +xtbool test_xtbool(xtbool a) {} + +// CHECK: define dso_local <1 x i1> @test_xtbool(<1 x i1> noundef %a) From c916bd4d6302aa856a06cba586549340c3a21d8c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 22 Aug 2024 01:43:18 +0300 Subject: [PATCH 189/289] [Xtensa] Implement conditional move instrinsics Adding __builtin_xtensa_movt_s and __builtin_xtensa_movf_s intrinsics. Adding intrincic patterns to MOVT_S anf MOVF_S definitions. --- clang/include/clang/Basic/BuiltinsXtensa.def | 6 ++++++ .../CodeGen/Xtensa/xtensa-xtbool-intrinsics.c | 13 +++++++++++++ llvm/include/llvm/IR/Intrinsics.td | 1 + llvm/include/llvm/IR/IntrinsicsXtensa.td | 9 +++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 16 +++++++++++----- .../Xtensa/xtensa-xtbool-intrinsics.ll | 19 +++++++++++++++++++ 6 files changed, 59 insertions(+), 5 deletions(-) create mode 100644 clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll diff --git a/clang/include/clang/Basic/BuiltinsXtensa.def b/clang/include/clang/Basic/BuiltinsXtensa.def index 97366c76e97d8..b2f62438249c3 100644 --- a/clang/include/clang/Basic/BuiltinsXtensa.def +++ b/clang/include/clang/Basic/BuiltinsXtensa.def @@ -124,6 +124,12 @@ BUILTIN(__builtin_xtensa_wsr_m3, "vUi", "n") BUILTIN(__builtin_xtensa_rsr_m3, "Ui", "n") BUILTIN(__builtin_xtensa_xsr_m3, "vUi*", "n") +// Float intrinsics + +// float __builtin_xtensa_xt_movt_s(float frr, float frs, xtbool bt) +BUILTIN(__builtin_xtensa_xt_movt_s, "fffE1b", "n") +BUILTIN(__builtin_xtensa_xt_movf_s, "fffE1b", "n") + // generated code #include "clang/Basic/BuiltinsXtensaESP32S3.def" diff --git a/clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c new file mode 100644 index 0000000000000..2ad2535738671 --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -no-opaque-pointers -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; + +xtbool test_xtbool_movt(float a, float b, xtbool c) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.movt.s(float %{{.*}}, float {{.*}}, <1 x i1> {{.*}}) + return __builtin_xtensa_xt_movt_s(a, b, c); +} + +xtbool test_xtbool_movf(float a, float b, xtbool c) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.movf.s(float %{{.*}}, float {{.*}}, <1 x i1> {{.*}}) + return __builtin_xtensa_xt_movf_s(a, b, c); +} diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index 82d539f776e70..4754fd86c82a5 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -502,6 +502,7 @@ def llvm_aarch64_svcount_ty : LLVMType; def llvm_x86amx_ty : LLVMType; +def llvm_v1i1_ty : LLVMType; // 1 x i1 def llvm_v2i1_ty : LLVMType; // 2 x i1 def llvm_v4i1_ty : LLVMType; // 4 x i1 def llvm_v8i1_ty : LLVMType; // 8 x i1 diff --git a/llvm/include/llvm/IR/IntrinsicsXtensa.td b/llvm/include/llvm/IR/IntrinsicsXtensa.td index ab5d463277c37..6092deeeb8578 100644 --- a/llvm/include/llvm/IR/IntrinsicsXtensa.td +++ b/llvm/include/llvm/IR/IntrinsicsXtensa.td @@ -249,6 +249,15 @@ def int_xtensa_xsr_m3: ClangBuiltin<"__builtin_xtensa_xsr_m3">, Intrinsic<[], [llvm_ptr_ty], []>; +//===----------------------------------------------------------------------===// +// Float operations + +def int_xtensa_xt_movt_s: ClangBuiltin<"__builtin_xtensa_xt_movt_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_v1i1_ty], [IntrNoMem]>; + +def int_xtensa_xt_movf_s: ClangBuiltin<"__builtin_xtensa_xt_movf_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_v1i1_ty], [IntrNoMem]>; + // Generated code // --------------- include "llvm/IR/IntrinsicsXtensaESP32S3.td" diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index becd43aa5d8c9..d6d01d5d2adb4 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1156,9 +1156,6 @@ def MOV_S : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins FPR:$s), def MOVEQZ_S : RRR_Inst<0x00, 0x0B, 0x08, (outs FPR:$r), (ins FPR:$s, AR:$t), "moveqz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -def MOVF_S : RRR_Inst<0x00, 0x0B, 0x0C, (outs FPR:$r), (ins FPR:$s, BR:$t), - "movf.s\t$r, $s, $t", []>, Requires<[HasBoolean, HasSingleFloat]>; - def MOVGEZ_S : RRR_Inst<0x00, 0x0B, 0x0B, (outs FPR:$r), (ins FPR:$s, AR:$t), "movgez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; @@ -1168,8 +1165,17 @@ def MOVLTZ_S : RRR_Inst<0x00, 0x0B, 0x0A, (outs FPR:$r), (ins FPR:$s, AR:$t), def MOVNEZ_S : RRR_Inst<0x00, 0x0B, 0x09, (outs FPR:$r), (ins FPR:$s, AR:$t), "movnez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -def MOVT_S : RRR_Inst<0x00, 0x0B, 0x0D, (outs FPR:$r), (ins FPR:$s, BR:$t), - "movt.s\t$r, $s, $t", []>, Requires<[HasBoolean, HasSingleFloat]>; +let Constraints = "$dr = $r,@earlyclobber $dr" in { + def MOVT_S : RRR_Inst<0x00, 0x0B, 0x0D, (outs FPR:$dr), (ins FPR:$r, FPR:$s, BR:$t), + "movt.s\t$r, $s, $t", + [(set FPR:$dr, (int_xtensa_xt_movt_s FPR:$r, FPR:$s, BR:$t))]>, + Requires<[HasBoolean, HasSingleFloat]>; + + def MOVF_S : RRR_Inst<0x00, 0x0B, 0x0C, (outs FPR:$dr), (ins FPR:$r, FPR:$s, BR:$t), + "movf.s\t$r, $s, $t", + [(set FPR:$dr, (int_xtensa_xt_movf_s FPR:$r, FPR:$s, BR:$t))]>, + Requires<[HasBoolean, HasSingleFloat]>; +} // FP multipy-sub def MSUB_S : RRR_Inst<0x00, 0x0A, 0x05, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR:$t), diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll new file mode 100644 index 0000000000000..3ccda04cf83fe --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll @@ -0,0 +1,19 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + +define float @test_xtbool_movt(float %a, float %b, <1 x i1> %c) { + ; CHECK-LABEL: test_xtbool_movt + ; CHECK: movt.s {{f[0-9]+}}, {{f[0-9]+}}, b0 + %f = call float @llvm.xtensa.xt.movt.s(float %a, float %b, <1 x i1> %c) + ret float %f +} + +define float @test_xtbool_movf(float %a, float %b, <1 x i1> %c) { + ; CHECK-LABEL: test_xtbool_movf + ; CHECK: movf.s {{f[0-9]+}}, {{f[0-9]+}}, b0 + %f = call float @llvm.xtensa.xt.movf.s(float %a, float %b, <1 x i1> %c) + ret float %f +} + + +declare float @llvm.xtensa.xt.movt.s(float, float, <1 x i1>); +declare float @llvm.xtensa.xt.movf.s(float, float, <1 x i1>); From d954fa487616cd10a60aa418c5f79fc7dfa37dd1 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 22 Aug 2024 01:47:29 +0300 Subject: [PATCH 190/289] [Xtensa] Add basic float intrinsics --- clang/include/clang/Basic/BuiltinsXtensa.def | 47 +++++++++++++++ clang/lib/Sema/SemaXtensa.cpp | 8 +++ .../CodeGen/Xtensa/xtensa-float-intrinsics.c | 38 ++++++++++++ .../CodeGen/Xtensa/xtensa-xtbool-intrinsics.c | 37 +++++++++++- llvm/include/llvm/IR/IntrinsicsXtensa.td | 45 ++++++++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 41 +++++++++++++ .../CodeGen/Xtensa/xtensa-float-intrinsics.ll | 60 +++++++++++++++++++ .../Xtensa/xtensa-xtbool-intrinsics.ll | 55 +++++++++++++++++ 8 files changed, 330 insertions(+), 1 deletion(-) create mode 100644 clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll diff --git a/clang/include/clang/Basic/BuiltinsXtensa.def b/clang/include/clang/Basic/BuiltinsXtensa.def index b2f62438249c3..12dd5d68d2178 100644 --- a/clang/include/clang/Basic/BuiltinsXtensa.def +++ b/clang/include/clang/Basic/BuiltinsXtensa.def @@ -128,8 +128,55 @@ BUILTIN(__builtin_xtensa_xsr_m3, "vUi*", "n") // float __builtin_xtensa_xt_movt_s(float frr, float frs, xtbool bt) BUILTIN(__builtin_xtensa_xt_movt_s, "fffE1b", "n") + +// float __builtin_xtensa_xt_movf_s(float frr, float frs, xtbool bt) BUILTIN(__builtin_xtensa_xt_movf_s, "fffE1b", "n") +// xtbool __builtin_xtensa_xt_oeq_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_oeq_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_ole_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_ole_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_olt_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_olt_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_ueq_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_ueq_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_ule_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_ule_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_ult_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_ult_s, "E1bff", "n") + +// xtbool __builtin_xtensa_xt_un_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_un_s, "E1bff", "n") + +// xtfloat __builtin_xtensa_xt_sub_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_sub_s, "fff", "n") + +// xtfloat __builtin_xtensa_xt_add_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_add_s, "fff", "n") + +// xtfloat __builtin_xtensa_xt_abs_s(xtfloat frs) +BUILTIN(__builtin_xtensa_xt_abs_s, "ff", "n") + +// xtfloat __builtin_xtensa_xt_mul_s(xtfloat frs,xtfloat frt) +BUILTIN(__builtin_xtensa_xt_mul_s, "fff", "n") + +// int __builtin_xtensa_xt_trunc_s(xtfloat frs,immediate imm_t) +BUILTIN(__builtin_xtensa_xt_trunc_s, "ifi", "n") + +// int __builtin_xtensa_xt_utrunc_s(xtfloat frs,immediate imm_t) +BUILTIN(__builtin_xtensa_xt_utrunc_s, "ifi", "n") + +// xtfloat __builtin_xtensa_xt_float_s(int ars,immediate imm_t) +BUILTIN(__builtin_xtensa_xt_float_s, "fii", "n") + +// xtfloat __builtin_xtensa_xt_ufloat_s(int ars,immediate imm_t) +BUILTIN(__builtin_xtensa_xt_ufloat_s, "fii", "n") + // generated code #include "clang/Basic/BuiltinsXtensaESP32S3.def" diff --git a/clang/lib/Sema/SemaXtensa.cpp b/clang/lib/Sema/SemaXtensa.cpp index c4b51c8b6fb9e..4c88daf50453f 100644 --- a/clang/lib/Sema/SemaXtensa.cpp +++ b/clang/lib/Sema/SemaXtensa.cpp @@ -93,6 +93,14 @@ bool SemaXtensa::CheckXtensaBuiltinFunctionCall(const TargetInfo &TI, return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 3) || SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1) || SemaRef.BuiltinConstantArgRange(TheCall, 3, 2, 3); + case Xtensa::BI__builtin_xtensa_xt_trunc_s: + case Xtensa::BI__builtin_xtensa_xt_utrunc_s: + case Xtensa::BI__builtin_xtensa_xt_float_s: + case Xtensa::BI__builtin_xtensa_xt_ufloat_s: + i = 1; + l = 0; + u = 15; + break; case Xtensa::BI__builtin_xtensa_ee_andq: case Xtensa::BI__builtin_xtensa_ee_cmul_s16: case Xtensa::BI__builtin_xtensa_ee_fft_cmul_s16_st_xp: diff --git a/clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c new file mode 100644 index 0000000000000..97282730c7e36 --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c @@ -0,0 +1,38 @@ +// RUN: %clang_cc1 -no-opaque-pointers -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; + +float test_float_s(int a) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.float.s(i32 %{{.*}}, i32 1) + return __builtin_xtensa_xt_float_s(a, 1); +} + +float test_ufloat_s(int a) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.ufloat.s(i32 %{{.*}}, i32 1) + return __builtin_xtensa_xt_ufloat_s(a, 1); +} + +int test_trunc_s(float a) { + // CHECK: %{{.*}} = call i32 @llvm.xtensa.xt.trunc.s(float %{{.*}}, i32 1) + return __builtin_xtensa_xt_trunc_s(a, 1); +} + +int test_utrunc_s(float a) { + // CHECK: %{{.*}} = call i32 @llvm.xtensa.xt.utrunc.s(float %{{.*}}, i32 1) + return __builtin_xtensa_xt_utrunc_s(a, 1); +} + +float test_add_s(float a, float b) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.add.s(float %{{.*}}, float %{{.*}}) + return __builtin_xtensa_xt_add_s(a, b); +} + +float test_sub_s(float a, float b) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.sub.s(float %{{.*}}, float %{{.*}}) + return __builtin_xtensa_xt_sub_s(a, b); +} + +float test_mul_s(float a, float b) { + // CHECK: %{{.*}} = call float @llvm.xtensa.xt.mul.s(float %{{.*}}, float %{{.*}}) + return __builtin_xtensa_xt_mul_s(a, b); +} diff --git a/clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c index 2ad2535738671..8d9bc35907cca 100644 --- a/clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c +++ b/clang/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s typedef __attribute__((ext_vector_type(1))) _Bool xtbool; @@ -11,3 +11,38 @@ xtbool test_xtbool_movf(float a, float b, xtbool c) { // CHECK: %{{.*}} = call float @llvm.xtensa.xt.movf.s(float %{{.*}}, float {{.*}}, <1 x i1> {{.*}}) return __builtin_xtensa_xt_movf_s(a, b, c); } + +xtbool test_xtbool_oeq_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.oeq.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_oeq_s(a, b); +} + +xtbool test_xtbool_ueq_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.ueq.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_ueq_s(a, b); +} + +xtbool test_xtbool_olt_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.olt.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_olt_s(a, b); +} + +xtbool test_xtbool_ult_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.ult.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_ult_s(a, b); +} + +xtbool test_xtbool_ole_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.ole.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_ole_s(a, b); +} + +xtbool test_xtbool_ule_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.ule.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_ule_s(a, b); +} + +xtbool test_xtbool_un_s(float a, float b) { + // CHECK: %{{.*}} = call <1 x i1> @llvm.xtensa.xt.un.s(float %{{.*}}, float {{.*}}) + return __builtin_xtensa_xt_un_s(a, b); +} diff --git a/llvm/include/llvm/IR/IntrinsicsXtensa.td b/llvm/include/llvm/IR/IntrinsicsXtensa.td index 6092deeeb8578..2b9c8dd62cb7c 100644 --- a/llvm/include/llvm/IR/IntrinsicsXtensa.td +++ b/llvm/include/llvm/IR/IntrinsicsXtensa.td @@ -258,6 +258,51 @@ def int_xtensa_xt_movt_s: ClangBuiltin<"__builtin_xtensa_xt_movt_s">, def int_xtensa_xt_movf_s: ClangBuiltin<"__builtin_xtensa_xt_movf_s">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_v1i1_ty], [IntrNoMem]>; +def int_xtensa_xt_oeq_s: ClangBuiltin<"__builtin_xtensa_xt_oeq_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ole_s: ClangBuiltin<"__builtin_xtensa_xt_ole_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_olt_s: ClangBuiltin<"__builtin_xtensa_xt_olt_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ueq_s: ClangBuiltin<"__builtin_xtensa_xt_ueq_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ule_s: ClangBuiltin<"__builtin_xtensa_xt_ule_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ult_s: ClangBuiltin<"__builtin_xtensa_xt_ult_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_un_s: ClangBuiltin<"__builtin_xtensa_xt_un_s">, + Intrinsic<[llvm_v1i1_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_add_s: ClangBuiltin<"__builtin_xtensa_xt_add_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_abs_s: ClangBuiltin<"__builtin_xtensa_xt_abs_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_mul_s: ClangBuiltin<"__builtin_xtensa_xt_mul_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_sub_s: ClangBuiltin<"__builtin_xtensa_xt_sub_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_trunc_s: ClangBuiltin<"__builtin_xtensa_xt_trunc_s">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_utrunc_s: ClangBuiltin<"__builtin_xtensa_xt_utrunc_s">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_float_s: ClangBuiltin<"__builtin_xtensa_xt_float_s">, + Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_ufloat_s: ClangBuiltin<"__builtin_xtensa_xt_ufloat_s">, + Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + // Generated code // --------------- include "llvm/IR/IntrinsicsXtensaESP32S3.td" diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index d6d01d5d2adb4..30e72ae634f86 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -29,6 +29,11 @@ class ArithLogic_RRR oper2, bits<4> oper1, string instrAsm, let isReMaterializable = 0; } +class Arith_Pat + : Pat<(res_vt (opNode op_vt:$r, op_vt:$t)), + (inst op_vt:$r, op_vt:$t)>; + def ADD : ArithLogic_RRR<0x08, 0x00, "add", add, 1>; def SUB : ArithLogic_RRR<0x0C, 0x00, "sub", sub>; def AND : ArithLogic_RRR<0x01, 0x00, "and", and, 1>; @@ -979,6 +984,12 @@ def ADD_S : FPArith_RRR<0x00, 0x0A, "add.s", fadd, 1>; def SUB_S : FPArith_RRR<0x01, 0x0A, "sub.s", fsub>; def MUL_S : FPArith_RRR<0x02, 0x0A, "mul.s", fmul, 1>; +let Predicates = [HasSingleFloat] in { + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; +} + // FP load instructions let mayLoad = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { def LSI : RRI8_Inst<0x03, (outs FPR:$t), (ins mem32:$addr), @@ -1059,6 +1070,16 @@ def ULT_S : FCompare<0x05, 0x0b, "ult.s", Xtensa_cmpult, 0>; def ULE_S : FCompare<0x07, 0x0b, "ule.s", Xtensa_cmpule, 0>; def UN_S : FCompare<0x01, 0x0b, "un.s", Xtensa_cmpuo, 1>; +let Predicates = [HasSingleFloat] in { + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; + def : Arith_Pat; +} + def ABS_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "abs.s\t$r, $s", [(set FPR:$r, (fabs FPR:$s))]>, Requires<[HasSingleFloat]> { @@ -1067,6 +1088,10 @@ def ABS_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), def : Pat<(fabs FPR:$s), (ABS_S $s)>; +def : Pat<(f32 (int_xtensa_xt_abs_s FPR:$frs)), + (ABS_S FPR:$frs)>, + Requires<[HasSingleFloat]>; + def ADDEXP_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), "addexp.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0E; @@ -1109,6 +1134,10 @@ def FLOAT_S : RRR_Inst<0x00, 0x0A, 0x0c, (outs FPR:$r), (ins AR:$s, uimm4:$imm), def : Pat<(f32 (sint_to_fp AR:$s)), (FLOAT_S AR:$s, 0)>; +def : Pat<(f32 (int_xtensa_xt_float_s i32:$s, timm:$imm)), + (FLOAT_S AR:$s, uimm4:$imm)>, + Requires<[HasSingleFloat]>; + def FLOOR_S : RRR_Inst<0x00, 0x0A, 0x0A, (outs AR:$r), (ins FPR:$s, uimm4:$imm), "floor.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { bits<4> imm; @@ -1238,6 +1267,10 @@ def TRUNC_S : RRR_Inst<0x00, 0x0A, 0x09, (outs AR:$r), (ins FPR:$s, uimm4:$imm), def : Pat<(i32 (any_fp_to_sint FPR:$s)), (TRUNC_S FPR:$s, 0)>; +def : Pat<(i32 (int_xtensa_xt_trunc_s f32:$frs, timm:$imm)), + (TRUNC_S FPR:$frs, uimm4:$imm)>, + Requires<[HasSingleFloat]>; + def UFLOAT_S : RRR_Inst<0x00, 0x0A, 0x0D, (outs FPR:$r), (ins AR:$s, uimm4:$imm), "ufloat.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { bits<4> imm; @@ -1247,6 +1280,10 @@ def UFLOAT_S : RRR_Inst<0x00, 0x0A, 0x0D, (outs FPR:$r), (ins AR:$s, uimm4:$imm) def : Pat<(f32 (uint_to_fp AR:$s)), (UFLOAT_S AR:$s, 0)>; +def : Pat<(f32 (int_xtensa_xt_ufloat_s i32:$s, timm:$imm)), + (UFLOAT_S AR:$s, uimm4:$imm)>, + Requires<[HasSingleFloat]>; + def UTRUNC_S : RRR_Inst<0x00, 0x0A, 0x0e, (outs AR:$r), (ins FPR:$s, uimm4:$imm), "utrunc.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { bits<4> imm; @@ -1256,6 +1293,10 @@ def UTRUNC_S : RRR_Inst<0x00, 0x0A, 0x0e, (outs AR:$r), (ins FPR:$s, uimm4:$imm) def : Pat<(i32 (any_fp_to_uint FPR:$s)), (UTRUNC_S FPR:$s, 0)>; +def : Pat<(i32 (int_xtensa_xt_utrunc_s f32:$frs, timm:$imm)), + (UTRUNC_S FPR:$frs, uimm4:$imm)>, + Requires<[HasSingleFloat]>; + def WFR : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins AR:$s), "wfr\t$r, $s", [(set FPR:$r, (bitconvert AR:$s))]>, Requires<[HasSingleFloat]> { diff --git a/llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll b/llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll new file mode 100644 index 0000000000000..73d54e2a51210 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll @@ -0,0 +1,60 @@ +; RUN: llc -mtriple=xtensa -mcpu=esp32 %s -o - | FileCheck %s + + + +define float @test_float_s(i32 %a) { + ; CHECK-LABEL: test_float_s + ; CHECK: float.s {{f[0-9]+}}, a2, 1 + %r = call float @llvm.xtensa.xt.float.s(i32 %a, i32 1) + ret float %r +} + +define float @test_ufloat_s(i32 %a) { + ; CHECK-LABEL: test_ufloat_s + ; CHECK: ufloat.s {{f[0-9]+}}, a2, 1 + %r = call float @llvm.xtensa.xt.ufloat.s(i32 %a, i32 1) + ret float %r +} + +define i32 @test_trunc_s(float %a) { + ; CHECK-LABEL: test_trunc_s + ; CHECK: trunc.s a2, {{f[0-9]+}}, 1 + %r = call i32 @llvm.xtensa.xt.trunc.s(float %a, i32 1) + ret i32 %r +} + +define i32 @test_utrunc_s(float %a) { + ; CHECK-LABEL: test_utrunc_s + ; CHECK: trunc.s a2, {{f[0-9]+}}, 1 + %r = call i32 @llvm.xtensa.xt.utrunc.s(float %a, i32 1) + ret i32 %r +} + +define float @test_add_s(float %a, float %b) { + ; CHECK-LABEL: test_add_s + ; CHECK: add.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}} + %r = call float @llvm.xtensa.xt.add.s(float %a, float %b) + ret float %r +} + +define float @test_sub_s(float %a, float %b) { + ; CHECK-LABEL: test_sub_s + ; CHECK: sub.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}} + %r = call float @llvm.xtensa.xt.sub.s(float %a, float %b) + ret float %r +} + +define float @test_mul_s(float %a, float %b) { + ; CHECK-LABEL: test_mul_s + ; CHECK: mul.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}} + %r = call float @llvm.xtensa.xt.mul.s(float %a, float %b) + ret float %r +} + +declare float @llvm.xtensa.xt.float.s(i32, i32); +declare float @llvm.xtensa.xt.ufloat.s(i32, i32); +declare i32 @llvm.xtensa.xt.trunc.s(float , i32); +declare i32 @llvm.xtensa.xt.utrunc.s(float, i32); +declare float @llvm.xtensa.xt.add.s(float %a, float %b); +declare float @llvm.xtensa.xt.sub.s(float %a, float %b); +declare float @llvm.xtensa.xt.mul.s(float %a, float %b); diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll index 3ccda04cf83fe..4a9255c4b4ee4 100644 --- a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-intrinsics.ll @@ -14,6 +14,61 @@ define float @test_xtbool_movf(float %a, float %b, <1 x i1> %c) { ret float %f } +define <1 x i1> @test_xtbool_oeq_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_oeq_s + ; CHECK: oeq.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.oeq.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_xtbool_ueq_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_ueq_s + ; CHECK: ueq.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.ueq.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_xtbool_ole_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_ole_s + ; CHECK: ole.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.ole.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_xtbool_ule_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_ule_s + ; CHECK: ule.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.ule.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_xtbool_olt_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_olt_s + ; CHECK: olt.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.olt.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_xtbool_ult_s(float %a, float %b) { + ; CHECK-LABEL: test_xtbool_ult_s + ; CHECK: ult.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.ult.s(float %a, float %b) + ret <1 x i1> %r +} + +define <1 x i1> @test_un_s(float %a, float %b) { + ; CHECK-LABEL: test_un_s + ; CHECK: un.s b0, {{f[0-9]+}}, {{f[0-9]+}} + %r = call <1 x i1> @llvm.xtensa.xt.un.s(float %a, float %b) + ret <1 x i1> %r +} declare float @llvm.xtensa.xt.movt.s(float, float, <1 x i1>); declare float @llvm.xtensa.xt.movf.s(float, float, <1 x i1>); +declare <1 x i1> @llvm.xtensa.xt.oeq.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.ueq.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.ole.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.ule.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.olt.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.ult.s(float, float); +declare <1 x i1> @llvm.xtensa.xt.un.s(float %a, float %b); From a2484a463835ec1b8f6c38007b701ad9fb4e8e0a Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 22 Aug 2024 01:50:24 +0300 Subject: [PATCH 191/289] [Xtensa] Implement remaining floating point intrinsics --- clang/include/clang/Basic/BuiltinsXtensa.def | 106 ++++++++++ clang/lib/CodeGen/CGBuiltin.cpp | 46 +++++ clang/lib/CodeGen/CodeGenFunction.h | 3 + clang/lib/Sema/SemaXtensa.cpp | 11 + .../CodeGen/Xtensa/xtensa-float-intrinsics.c | 12 +- .../CodeGen/Xtensa/xtensa-gen-intrinsics.c | 88 ++++++++ llvm/include/llvm/IR/IntrinsicsXtensa.td | 111 ++++++++++ .../Disassembler/XtensaDisassembler.cpp | 8 + .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 16 ++ llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp | 35 ++++ llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 8 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 192 +++++++++++++++--- llvm/lib/Target/Xtensa/XtensaOperands.td | 5 +- .../CodeGen/Xtensa/xtensa-float-intrinsics.ll | 18 ++ .../CodeGen/Xtensa/xtensa-gen-intrinsics.ll | 80 ++++++++ 15 files changed, 705 insertions(+), 34 deletions(-) create mode 100644 clang/test/CodeGen/Xtensa/xtensa-gen-intrinsics.c create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-gen-intrinsics.ll diff --git a/clang/include/clang/Basic/BuiltinsXtensa.def b/clang/include/clang/Basic/BuiltinsXtensa.def index 12dd5d68d2178..a09762568defd 100644 --- a/clang/include/clang/Basic/BuiltinsXtensa.def +++ b/clang/include/clang/Basic/BuiltinsXtensa.def @@ -177,6 +177,112 @@ BUILTIN(__builtin_xtensa_xt_float_s, "fii", "n") // xtfloat __builtin_xtensa_xt_ufloat_s(int ars,immediate imm_t) BUILTIN(__builtin_xtensa_xt_ufloat_s, "fii", "n") +// xtfloat __builtin_xtensa___builtin_xtensa_xt_addexp_s(xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_addexp_s, "fff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_addexpm_s(xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_addexpm_s, "fff", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_ceil_s(xtfloat,immediate) +BUILTIN(__builtin_xtensa_xt_ceil_s, "ifi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_div0_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_div0_s, "ff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_divn_s(xtfloat,xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_divn_s, "ffff", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_floor_s(xtfloat,immediate) +BUILTIN(__builtin_xtensa_xt_floor_s, "ifi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_lsi(const xtfloat*,immediate) +BUILTIN(__builtin_xtensa_xt_lsi, "ff*i", "n") + +BUILTIN(__builtin_xtensa_xt_lsip, "ff**i", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_lsx(const xtfloat*,int) +BUILTIN(__builtin_xtensa_xt_lsx, "ff*i", "n") + +BUILTIN(__builtin_xtensa_xt_lsxp, "ff*i", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_madd_s(xtfloat,xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_madd_s, "ffff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_maddn_s(xtfloat,xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_maddn_s, "ffff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_mkdadj_s(xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_mkdadj_s, "fff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_mksadj_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_mksadj_s, "ff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_mov_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_mov_s, "ff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_moveqz_s(xtfloat,xtfloat,int) +BUILTIN(__builtin_xtensa_xt_moveqz_s, "fffi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_movgez_s(xtfloat,xtfloat,int) +BUILTIN(__builtin_xtensa_xt_movgez_s, "fffi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_movltz_s(xtfloat,xtfloat,int) +BUILTIN(__builtin_xtensa_xt_movltz_s, "fffi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_movnez_s(xtfloat,xtfloat,int) +BUILTIN(__builtin_xtensa_xt_movnez_s, "fffi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_msub_s(xtfloat,xtfloat,xtfloat) +BUILTIN(__builtin_xtensa_xt_msub_s, "ffff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_neg_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_neg_s, "ff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_nexp01_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_nexp01_s, "ff", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_recip0_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_recip0_s, "ff", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_rfr(xtfloat) +BUILTIN(__builtin_xtensa_xt_rfr, "if", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_round_s(xtfloat,immediate) +BUILTIN(__builtin_xtensa_xt_round_s, "ifi", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_rsqrt0_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_rsqrt0_s, "ff", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_rur_fcr() +BUILTIN(__builtin_xtensa_xt_rur_fcr, "i", "n") + +// int __builtin_xtensa___builtin_xtensa_xt_rur_fsr() +BUILTIN(__builtin_xtensa_xt_rur_fsr, "i", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_sqrt0_s(xtfloat) +BUILTIN(__builtin_xtensa_xt_sqrt0_s, "ff", "n") + +// void __builtin_xtensa___builtin_xtensa_xt_ssi(xtfloat,xtfloat*,immediate) +BUILTIN(__builtin_xtensa_xt_ssi, "vff*i", "n") + +// xtfloat* __builtin_xtensa___builtin_xtensa_xt_ssip(xtfloat,xtfloat*,immediate) +BUILTIN(__builtin_xtensa_xt_ssip, "f*ff*i", "n") + +// void __builtin_xtensa___builtin_xtensa_xt_ssx(xtfloat,xtfloat*,int) +BUILTIN(__builtin_xtensa_xt_ssx, "vff*i", "n") + +// xtfloat* __builtin_xtensa___builtin_xtensa_xt_ssxp(xtfloat,xtfloat*,int) +BUILTIN(__builtin_xtensa_xt_ssxp, "f*ff*i", "n") + +// xtfloat __builtin_xtensa___builtin_xtensa_xt_wfr(int) +BUILTIN(__builtin_xtensa_xt_wfr, "fi", "n") + +// void __builtin_xtensa___builtin_xtensa_xt_wur_fcr(int) +BUILTIN(__builtin_xtensa_xt_wur_fcr, "vi", "n") + +// void __builtin_xtensa___builtin_xtensa_xt_wur_fsr(int) +BUILTIN(__builtin_xtensa_xt_wur_fsr, "vi", "n") + // generated code #include "clang/Basic/BuiltinsXtensaESP32S3.def" diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 5639239359ab8..bf36a06f27062 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -55,6 +55,7 @@ #include "llvm/IR/IntrinsicsVE.h" #include "llvm/IR/IntrinsicsWebAssembly.h" #include "llvm/IR/IntrinsicsX86.h" +#include "llvm/IR/IntrinsicsXtensa.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/MatrixBuilder.h" #include "llvm/IR/MemoryModelRelaxationAnnotations.h" @@ -6318,6 +6319,8 @@ static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, if (CGF->getTarget().getTriple().getOS() != llvm::Triple::OSType::AMDHSA) return nullptr; return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); + case llvm::Triple::xtensa: + return CGF->EmitXtensaBuiltinExpr(BuiltinID, E, ReturnValue, Arch); default: return nullptr; } @@ -22081,3 +22084,46 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID, llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); return Builder.CreateCall(F, Ops, ""); } + +llvm::Value * +CodeGenFunction::EmitXtensaBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { + + unsigned IntrinsicID; + switch (BuiltinID) { + case Xtensa::BI__builtin_xtensa_xt_lsxp: + IntrinsicID = Intrinsic::xtensa_xt_lsxp; + break; + case Xtensa::BI__builtin_xtensa_xt_lsip: + IntrinsicID = Intrinsic::xtensa_xt_lsip; + break; + default: + llvm_unreachable("unexpected builtin ID"); + } + + llvm::Function *F = CGM.getIntrinsic(IntrinsicID); + // 1st argument is passed by pointer + /* float lsip(float **a, int off) => float p = *a + ret, p' = @int.xtensa.lsip(p, off) + *a = p' + */ + auto InoutPtrTy = F->getArg(0)->getType()->getPointerTo(); + Address InoutPtrAddr = Builder.CreateElementBitCast( + EmitPointerWithAlignment(E->getArg(0)), InoutPtrTy); + + unsigned NumArgs = E->getNumArgs(); + Value *InoutVal = Builder.CreateLoad(InoutPtrAddr); + SmallVector Args; + + Args.push_back(InoutVal); + for (unsigned i = 1; i < NumArgs; i++) + Args.push_back(EmitScalarExpr(E->getArg(i))); + + Value *Val = Builder.CreateCall(F, Args, "retval"); + Value *Val0 = Builder.CreateExtractValue(Val, 0); + Value *Val1 = Builder.CreateExtractValue(Val, 1); + // ret store + Builder.CreateStore(Val1, InoutPtrAddr); + return Val0; +} diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 60e6841e1b3d6..b43346e5c37d1 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4801,6 +4801,9 @@ class CodeGenFunction : public CodeGenTypeCache { /// Emits a reference binding to the passed in expression. RValue EmitReferenceBindingToExpr(const Expr *E); + llvm::Value *EmitXtensaBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); //===--------------------------------------------------------------------===// // Expression Emission //===--------------------------------------------------------------------===// diff --git a/clang/lib/Sema/SemaXtensa.cpp b/clang/lib/Sema/SemaXtensa.cpp index 4c88daf50453f..b3cae0adbfe31 100644 --- a/clang/lib/Sema/SemaXtensa.cpp +++ b/clang/lib/Sema/SemaXtensa.cpp @@ -97,10 +97,21 @@ bool SemaXtensa::CheckXtensaBuiltinFunctionCall(const TargetInfo &TI, case Xtensa::BI__builtin_xtensa_xt_utrunc_s: case Xtensa::BI__builtin_xtensa_xt_float_s: case Xtensa::BI__builtin_xtensa_xt_ufloat_s: + case Xtensa::BI__builtin_xtensa_xt_ceil_s: + case Xtensa::BI__builtin_xtensa_xt_floor_s: + case Xtensa::BI__builtin_xtensa_xt_round_s: i = 1; l = 0; u = 15; break; + case Xtensa::BI__builtin_xtensa_xt_lsi: + case Xtensa::BI__builtin_xtensa_xt_lsip: + return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1020) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 4); + case Xtensa::BI__builtin_xtensa_xt_ssi: + case Xtensa::BI__builtin_xtensa_xt_ssip: + return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1020) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); case Xtensa::BI__builtin_xtensa_ee_andq: case Xtensa::BI__builtin_xtensa_ee_cmul_s16: case Xtensa::BI__builtin_xtensa_ee_fft_cmul_s16_st_xp: diff --git a/clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c index 97282730c7e36..150eface40a0b 100644 --- a/clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c +++ b/clang/test/CodeGen/Xtensa/xtensa-float-intrinsics.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple xtensa -O0 -emit-llvm %s -o - | FileCheck %s typedef __attribute__((ext_vector_type(1))) _Bool xtbool; @@ -36,3 +36,13 @@ float test_mul_s(float a, float b) { // CHECK: %{{.*}} = call float @llvm.xtensa.xt.mul.s(float %{{.*}}, float %{{.*}}) return __builtin_xtensa_xt_mul_s(a, b); } + +float test_xt_lsip(float * a0) { + // CHECK: %{{.*}} = call { float, ptr } @llvm.xtensa.xt.lsip(ptr %{{.*}}, i32 0) + return __builtin_xtensa_xt_lsip(&a0, 0); +} + +float test_xt_lsxp(float * a0, int a1) { + // CHECK: %{{.*}} = call { float, ptr } @llvm.xtensa.xt.lsxp(ptr %{{.*}}, i32 %{{.*}}) + return __builtin_xtensa_xt_lsxp(&a0, a1); +} diff --git a/clang/test/CodeGen/Xtensa/xtensa-gen-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-gen-intrinsics.c new file mode 100644 index 0000000000000..2b8de3d7b6d33 --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-gen-intrinsics.c @@ -0,0 +1,88 @@ +# Fails on Winodws due to incorrectly built command line: `'(': command not found`, so disable it temporarily +# UNSUPPORTED: system-windows + +# RUN: python3 %s > %t && ( %clang_cc1 -Dxtfloat=float -O0 -triple=xtensa %t -o - -emit-llvm | FileCheck %t ) + +FIXTURES = [ +('xtfloat', 'xt_addexp_s', ['xtfloat', 'xtfloat']) , +('xtfloat', 'xt_addexpm_s', ['xtfloat', 'xtfloat']) , +('int', 'xt_ceil_s', ['xtfloat', 0]) , +('xtfloat', 'xt_div0_s', ['xtfloat']) , +('xtfloat', 'xt_divn_s', ['xtfloat', 'xtfloat', 'xtfloat']) , +('int', 'xt_floor_s', ['xtfloat', 0]) , +('xtfloat', 'xt_lsi', ['xtfloat*', 0]) , +('xtfloat', 'xt_lsx', ['xtfloat*', 'int']) , +('xtfloat', 'xt_madd_s', ['xtfloat', 'xtfloat', 'xtfloat']) , +('xtfloat', 'xt_maddn_s', ['xtfloat', 'xtfloat', 'xtfloat']) , +('xtfloat', 'xt_mkdadj_s', ['xtfloat', 'xtfloat']) , +('xtfloat', 'xt_mksadj_s', ['xtfloat']) , +('xtfloat', 'xt_mov_s', ['xtfloat']) , +('xtfloat', 'xt_moveqz_s', ['xtfloat', 'xtfloat', 'int']) , +('xtfloat', 'xt_movgez_s', ['xtfloat', 'xtfloat', 'int']) , +('xtfloat', 'xt_movltz_s', ['xtfloat', 'xtfloat', 'int']) , +('xtfloat', 'xt_movnez_s', ['xtfloat', 'xtfloat', 'int']) , +('xtfloat', 'xt_msub_s', ['xtfloat', 'xtfloat', 'xtfloat']) , +('xtfloat', 'xt_neg_s', ['xtfloat']) , +('xtfloat', 'xt_nexp01_s', ['xtfloat']) , +('xtfloat', 'xt_recip0_s', ['xtfloat']) , +('int', 'xt_rfr', ['xtfloat']) , +('int', 'xt_round_s', ['xtfloat', 0]) , +('xtfloat', 'xt_rsqrt0_s', ['xtfloat']) , +('int', 'xt_rur_fcr', []) , +('int', 'xt_rur_fsr', []) , +('xtfloat', 'xt_sqrt0_s', ['xtfloat']) , +('void', 'xt_ssi', ['xtfloat', 'xtfloat*', 0]) , +('xtfloat*', 'xt_ssip', ['xtfloat', 'xtfloat*', 0]) , +('void', 'xt_ssx', ['xtfloat', 'xtfloat*', 'int']) , +('xtfloat*', 'xt_ssxp', ['xtfloat', 'xtfloat*', 'int']) , +('xtfloat', 'xt_wfr', ['int']) , +('void', 'xt_wur_fcr', ['int']) , +('void', 'xt_wur_fsr', ['int']) , +] + +from dataclasses import dataclass + +TYPES = { + 'xtfloat' : 'float', + 'int' : 'i32', + 'void':'void' +} + +def ctype2llvm(typ): + if '*' in typ: + return 'ptr' + else: + return TYPES[typ] + + +template = """ +{ret} test_{fun}({fun_args}) {{ + // CHECK: {assign} call {llvm_ret} @llvm.xtensa.{builtin}({llvm_args}) + return __builtin_xtensa_{fun}({call_args}); +}} +""" + +@dataclass +class F: + ret: str + fun : str + args: [str] + #builtin + #llvm_ret + #llvm_args + #call_args + +for f in FIXTURES: + f = F(*f) + args = f.args + f.fun_args = ", ".join( + ['%s a%d' % (a,i) for i,a, in enumerate(args) if isinstance(a,str)]) + f.builtin = f.fun.replace('_','.') + f.llvm_args = ", ".join( + [('%s {{.*}}' % ctype2llvm(a)) if isinstance(a,str) else ('i32 %d' % a) + for i,a, in enumerate(args)]) + f.call_args = ", ".join(['a%d' % i if isinstance(a,str) else str(a) + for i,a in enumerate(args)]) + f.llvm_ret = ctype2llvm(f.ret) + f.assign = '' if f.ret == 'void' else '{{.*}} =' + print(template.format(**f.__dict__)) diff --git a/llvm/include/llvm/IR/IntrinsicsXtensa.td b/llvm/include/llvm/IR/IntrinsicsXtensa.td index 2b9c8dd62cb7c..36b9571ee4b4b 100644 --- a/llvm/include/llvm/IR/IntrinsicsXtensa.td +++ b/llvm/include/llvm/IR/IntrinsicsXtensa.td @@ -303,6 +303,117 @@ def int_xtensa_xt_float_s: ClangBuiltin<"__builtin_xtensa_xt_float_s">, def int_xtensa_xt_ufloat_s: ClangBuiltin<"__builtin_xtensa_xt_ufloat_s">, Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; +def int_xtensa_xt_addexp_s: ClangBuiltin<"__builtin_xtensa_xt_addexp_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_addexpm_s: ClangBuiltin<"__builtin_xtensa_xt_addexpm_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ceil_s: ClangBuiltin<"__builtin_xtensa_xt_ceil_s">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_conjc_s: ClangBuiltin<"__builtin_xtensa_xt_conjc_s">, + Intrinsic<[llvm_v2f32_ty], [llvm_v2f32_ty], [IntrNoMem]>; + +def int_xtensa_xt_div0_s: ClangBuiltin<"__builtin_xtensa_xt_div0_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_divn_s: ClangBuiltin<"__builtin_xtensa_xt_divn_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_floor_s: ClangBuiltin<"__builtin_xtensa_xt_floor_s">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_lsi: ClangBuiltin<"__builtin_xtensa_xt_lsi">, + Intrinsic<[llvm_float_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_xt_lsip: + Intrinsic<[llvm_float_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_xt_lsx: ClangBuiltin<"__builtin_xtensa_xt_lsx">, + Intrinsic<[llvm_float_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_xt_lsxp: + Intrinsic<[llvm_float_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_xt_madd_s: ClangBuiltin<"__builtin_xtensa_xt_madd_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_maddn_s: ClangBuiltin<"__builtin_xtensa_xt_maddn_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_mkdadj_s: ClangBuiltin<"__builtin_xtensa_xt_mkdadj_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_mksadj_s: ClangBuiltin<"__builtin_xtensa_xt_mksadj_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_mov_s: ClangBuiltin<"__builtin_xtensa_xt_mov_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_moveqz_s: ClangBuiltin<"__builtin_xtensa_xt_moveqz_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_xt_movgez_s: ClangBuiltin<"__builtin_xtensa_xt_movgez_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_xt_movltz_s: ClangBuiltin<"__builtin_xtensa_xt_movltz_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_xt_movnez_s: ClangBuiltin<"__builtin_xtensa_xt_movnez_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_xt_msub_s: ClangBuiltin<"__builtin_xtensa_xt_msub_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_neg_s: ClangBuiltin<"__builtin_xtensa_xt_neg_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_nexp01_s: ClangBuiltin<"__builtin_xtensa_xt_nexp01_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_recip0_s: ClangBuiltin<"__builtin_xtensa_xt_recip0_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_rfr: ClangBuiltin<"__builtin_xtensa_xt_rfr">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_round_s: ClangBuiltin<"__builtin_xtensa_xt_round_s">, + Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_xt_rsqrt0_s: ClangBuiltin<"__builtin_xtensa_xt_rsqrt0_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_rur_fcr: ClangBuiltin<"__builtin_xtensa_xt_rur_fcr">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_xt_rur_fsr: ClangBuiltin<"__builtin_xtensa_xt_rur_fsr">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_xt_sqrt0_s: ClangBuiltin<"__builtin_xtensa_xt_sqrt0_s">, + Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; + +def int_xtensa_xt_ssi: ClangBuiltin<"__builtin_xtensa_xt_ssi">, + Intrinsic<[], [llvm_float_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_xt_ssip: ClangBuiltin<"__builtin_xtensa_xt_ssip">, + Intrinsic<[llvm_ptr_ty], [llvm_float_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_xt_ssx: ClangBuiltin<"__builtin_xtensa_xt_ssx">, + Intrinsic<[], [llvm_float_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_xt_ssxp: ClangBuiltin<"__builtin_xtensa_xt_ssxp">, + Intrinsic<[llvm_ptr_ty], [llvm_float_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_xt_wfr: ClangBuiltin<"__builtin_xtensa_xt_wfr">, + Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_xt_wur_fcr: ClangBuiltin<"__builtin_xtensa_xt_wur_fcr">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_xt_wur_fsr: ClangBuiltin<"__builtin_xtensa_xt_wur_fsr">, + Intrinsic<[], [llvm_i32_ty], []>; + // Generated code // --------------- include "llvm/IR/IntrinsicsXtensaESP32S3.td" diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 8ea0991b5040e..5d76b2c88fead 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -564,6 +564,14 @@ static DecodeStatus decodeImm64n_4nOperand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeOffset8m32Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm << 2)); + return MCDisassembler::Success; +} + static DecodeStatus decodeEntry_Imm12OpValue(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 185507a93c410..958dedbaaa2e5 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -123,6 +123,10 @@ class XtensaMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + uint32_t getOffset8m32OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getEntry_Imm12OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; @@ -518,6 +522,18 @@ XtensaMCCodeEmitter::getImm64n_4nOpValue(const MCInst &MI, unsigned OpNo, return Res & 0x3f; } +uint32_t +XtensaMCCodeEmitter::getOffset8m32OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint32_t Res = static_cast(MO.getImm()); + + assert(((Res & 0x3) == 0) && "Unexpected operand value!"); + + return Res; +} + uint32_t XtensaMCCodeEmitter::getEntry_Imm12OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp index e4bc8eddf3945..4a3ba220e2361 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -465,6 +465,41 @@ void XtensaDAGToDAGISel::Select(SDNode *Node) { } break; } + + case ISD::INTRINSIC_W_CHAIN: { + unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + unsigned OpCode = 0; + bool Skip = false; + + switch (IntNo) { + default: + Skip = true; + break; + case Intrinsic::xtensa_xt_lsxp: + OpCode = Xtensa::LSXP; + break; + case Intrinsic::xtensa_xt_lsip: + OpCode = Xtensa::LSIP; + break; + } + if (Skip) + break; + + SDValue Chain = Node->getOperand(0); + + auto ResTys = Node->getVTList(); + + SmallVector Ops; + for (unsigned i = 2; i < Node->getNumOperands(); i++) + Ops.push_back(Node->getOperand(i)); + Ops.push_back(Chain); + + SDNode *NewNode = CurDAG->getMachineNode(OpCode, DL, ResTys, Ops); + + ReplaceNode(Node, NewNode); + return; + } + default: break; } diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 56ad439b3d4f9..561cd5ba71cdb 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -3316,9 +3316,11 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( case Xtensa::LSIP: case Xtensa::LSX: case Xtensa::LSXP: { - const MachineMemOperand &MMO = **MI.memoperands_begin(); - if (MMO.isVolatile()) { - BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); + if (MI.memoperands().size() > 0) { + const MachineMemOperand &MMO = **MI.memoperands_begin(); + if (MMO.isVolatile()) { + BuildMI(*MBB, MI, DL, TII.get(Xtensa::MEMW)); + } } return MBB; } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 30e72ae634f86..89fcf2ad55b08 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -598,6 +598,20 @@ def RUR : RRR_Inst<0x00, 0x03, 0x0E, (outs AR:$r), (ins UR:$ur), let t = ur{3-0}; } +def WUR_FCR :RRR_Inst<0x00, 0x03, 0x0F, (outs), (ins AR:$t), + "wur.fcr \t$t", []> { + let r = 232{7-4}; + let s = 232{3-0}; + let Defs = [FCR]; +} + +def WUR_FSR :RRR_Inst<0x00, 0x03, 0x0F, (outs), (ins AR:$t), + "wur.fsr\t $t", []> { + let r = 233{7-4}; + let s = 233{3-0}; + let Defs = [FSR]; +} + //===----------------------------------------------------------------------===// // External Registers read/write instructions //===----------------------------------------------------------------------===// @@ -1001,20 +1015,22 @@ let mayLoad = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { let s{3-0} = addr{3-0}; } - def LSIP : RRI8_Inst<0x03, (outs FPR:$t), (ins mem32:$addr), - "lsip\t$t, $addr", []> { - bits<12> addr; + def LSIP : RRI8_Inst<0x03, (outs FPR:$t, AR:$ds), (ins AR:$s, offset8m32:$off), + "lsip\t$t, $s, $off", []> { + bits<10> off; let r = 0x08; - let imm8{7-0} = addr{11-4}; - let s{3-0} = addr{3-0}; + let imm8{7-0} = off{9-2}; + let Constraints = "$ds = $s,@earlyclobber $ds"; } - def LSX : RRR_Inst<0x00, 0x08, 0x00, (outs), (ins FPR:$r, AR:$s, AR:$t), + def LSX : RRR_Inst<0x00, 0x08, 0x00, (outs FPR:$r), (ins AR:$s, AR:$t), "lsx\t$r, $s, $t", []>; - def LSXP : RRR_Inst<0x00, 0x08, 0x01, (outs), (ins FPR:$r, AR:$s, AR:$t), - "lsxp\t$r, $s, $t", []>; + def LSXP : RRR_Inst<0x00, 0x08, 0x01, (outs FPR:$r, AR:$ds), (ins AR:$s, AR:$t), + "lsxp\t$r, $s, $t", []> { + let Constraints = "$ds = $s,@earlyclobber $ds"; + } } def : Pat<(f32 (load addr_ish4:$addr)), (f32 (LSI mem32:$addr))>; @@ -1030,20 +1046,22 @@ let mayStore = 1, usesCustomInserter = 1, Predicates = [HasSingleFloat] in { let s{3-0} = addr{3-0}; } - def SSIP : RRI8_Inst<0x03, (outs), (ins FPR:$t, mem32:$addr), - "ssip\t$t, $addr", []> { - bits<12> addr; + def SSIP : RRI8_Inst<0x03, (outs AR:$ds), (ins FPR:$t, AR:$s, offset8m32:$off), + "ssip\t$t, $s, $off", []> { + bits<10> off; let r = 0x0C; - let imm8{7-0} = addr{11-4}; - let s{3-0} = addr{3-0}; + let imm8{7-0} = off{9-2}; + let Constraints = "$ds = $s,@earlyclobber $ds"; } def SSX: RRR_Inst<0x00, 0x08, 0x04, (outs), (ins FPR:$r, AR:$s, AR:$t), "ssx\t$r, $s, $t", []>; - def SSXP: RRR_Inst<0x00, 0x08, 0x05, (outs), (ins FPR:$r, AR:$s, AR:$t), - "ssxp\t$r, $s, $t", []>; + def SSXP: RRR_Inst<0x00, 0x08, 0x05, (outs AR:$ds), (ins FPR:$r, AR:$s, AR:$t), + "ssxp\t$r, $s, $t", []> { + let Constraints = "$ds = $s,@earlyclobber $ds"; + } } def : Pat<(store FPR:$t, addr_ish4:$addr), (SSI FPR:$t, mem32:$addr)>; @@ -1092,15 +1110,18 @@ def : Pat<(f32 (int_xtensa_xt_abs_s FPR:$frs)), (ABS_S FPR:$frs)>, Requires<[HasSingleFloat]>; -def ADDEXP_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), +let Constraints = "$dr = $r,@earlyclobber $dr" in { + +def ADDEXP_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$dr), (ins FPR:$r, FPR:$s), "addexp.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0E; } -def ADDEXPM_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), +def ADDEXPM_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$dr), (ins FPR:$r, FPR:$s), "addexpm.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0F; } +} def CEIL_S : RRR_Inst<0x00, 0x0A, 0x0B, (outs AR:$r), (ins FPR:$s, uimm4:$imm), "ceil.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { @@ -1122,8 +1143,10 @@ def DIV0_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), let t = 0x7; } -def DIVN_S : RRR_Inst<0x00, 0x0A, 0x07, (outs FPR:$r), (ins FPR:$s, FPR:$t), - "divn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; +def DIVN_S : RRR_Inst<0x00, 0x0A, 0x07, (outs FPR:$dr), (ins FPR:$r, FPR:$s, FPR:$t), + "divn.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]> { +let Constraints = "$dr = $r,@earlyclobber $dr"; +} def FLOAT_S : RRR_Inst<0x00, 0x0A, 0x0c, (outs FPR:$r), (ins AR:$s, uimm4:$imm), "float.s\t$r, $s, $imm", []>, Requires<[HasSingleFloat]> { @@ -1165,9 +1188,10 @@ def MADD_S : RRR_Inst<0x00, 0x0A, 0x04, (outs FPR:$r), (ins FPR:$a, FPR:$s, FPR: def : Pat<(fma FPR:$r1, FPR:$r2, FPR:$r3), (MADD_S $r3, $r1, $r2)>; -def MKDADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), +def MKDADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$dr), (ins FPR:$r, FPR:$s), "mkdadj.s\t$r, $s", []>, Requires<[HasSingleFloat]> { let t = 0x0D; + let Constraints = "$dr = $r,@earlyclobber $dr"; } def MKSADJ_S : RRR_Inst<0x00, 0x0A, 0x0F, (outs FPR:$r), (ins FPR:$s), @@ -1182,19 +1206,20 @@ def MOV_S : RRR_Inst<0x00, 0x0A, 0x0f, (outs FPR:$r), (ins FPR:$s), let t = 0x00; } -def MOVEQZ_S : RRR_Inst<0x00, 0x0B, 0x08, (outs FPR:$r), (ins FPR:$s, AR:$t), - "moveqz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; +let Constraints = "$dr = $r,@earlyclobber $dr" in { + + def MOVEQZ_S : RRR_Inst<0x00, 0x0B, 0x08, (outs FPR:$dr), (ins FPR:$r, FPR:$s, AR:$t), + "moveqz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -def MOVGEZ_S : RRR_Inst<0x00, 0x0B, 0x0B, (outs FPR:$r), (ins FPR:$s, AR:$t), - "movgez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + def MOVGEZ_S : RRR_Inst<0x00, 0x0B, 0x0B, (outs FPR:$dr), (ins FPR:$r, FPR:$s, AR:$t), + "movgez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -def MOVLTZ_S : RRR_Inst<0x00, 0x0B, 0x0A, (outs FPR:$r), (ins FPR:$s, AR:$t), - "movltz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + def MOVLTZ_S : RRR_Inst<0x00, 0x0B, 0x0A, (outs FPR:$dr), (ins FPR:$r, FPR:$s, AR:$t), + "movltz.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -def MOVNEZ_S : RRR_Inst<0x00, 0x0B, 0x09, (outs FPR:$r), (ins FPR:$s, AR:$t), - "movnez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; + def MOVNEZ_S : RRR_Inst<0x00, 0x0B, 0x09, (outs FPR:$dr), (ins FPR:$r, FPR:$s, AR:$t), + "movnez.s\t$r, $s, $t", []>, Requires<[HasSingleFloat]>; -let Constraints = "$dr = $r,@earlyclobber $dr" in { def MOVT_S : RRR_Inst<0x00, 0x0B, 0x0D, (outs FPR:$dr), (ins FPR:$r, FPR:$s, BR:$t), "movt.s\t$r, $s, $t", [(set FPR:$dr, (int_xtensa_xt_movt_s FPR:$r, FPR:$s, BR:$t))]>, @@ -1815,6 +1840,115 @@ let Predicates = [HasESP32S3Ops] in { } include "XtensaS3DSPInstrInfo.td" + +//===----------------------------------------------------------------------===// +// FP intrinsic patterns +//===----------------------------------------------------------------------===// +let Predicates = [HasSingleFloat] in { + + def ADDEXP_S_PAT :Pat<(f32 (int_xtensa_xt_addexp_s FPR:$frr, FPR:$frs)), + (ADDEXP_S FPR:$frr, FPR:$frs)>; + + def ADDEXPM_S_PAT :Pat<(f32 (int_xtensa_xt_addexpm_s FPR:$frr, FPR:$frs)), + (ADDEXPM_S FPR:$frr, FPR:$frs)>; + + def CEIL_S_PAT :Pat<(i32 (int_xtensa_xt_ceil_s FPR:$frs, timm:$imm_t)), + (CEIL_S FPR:$frs, timm:$imm_t)>; + + def DIV0_S_PAT :Pat<(f32 (int_xtensa_xt_div0_s FPR:$frs)), + (DIV0_S FPR:$frs)>; + + def DIVN_S_PAT :Pat<(f32 (int_xtensa_xt_divn_s FPR:$frr, FPR:$frs, FPR:$frt)), + (DIVN_S FPR:$frr, FPR:$frs, FPR:$frt)>; + + def FLOOR_S_PAT :Pat<(i32 (int_xtensa_xt_floor_s FPR:$frs, timm:$imm_t)), + (FLOOR_S FPR:$frs, timm:$imm_t)>; + + def LSI_PAT :Pat<(f32 (int_xtensa_xt_lsi AR:$ars, timm:$imm8x4)), + (LSI AR:$ars, timm:$imm8x4)>; + + def LSX_PAT :Pat<(f32 (int_xtensa_xt_lsx AR:$ars, AR:$art)), + (LSX AR:$ars, AR:$art)>; + + def MADD_S_PAT :Pat<(f32 (int_xtensa_xt_madd_s FPR:$frr, FPR:$frs, FPR:$frt)), + (MADD_S FPR:$frr, FPR:$frs, FPR:$frt)>; + + def MADDN_S_PAT :Pat<(f32 (int_xtensa_xt_maddn_s FPR:$frr, FPR:$frs, FPR:$frt)), + (MADDN_S FPR:$frr, FPR:$frs, FPR:$frt)>; + + def MKDADJ_S_PAT :Pat<(f32 (int_xtensa_xt_mkdadj_s FPR:$frr, FPR:$frs)), + (MKDADJ_S FPR:$frr, FPR:$frs)>; + + def MKSADJ_S_PAT :Pat<(f32 (int_xtensa_xt_mksadj_s FPR:$frs)), + (MKSADJ_S FPR:$frs)>; + + def MOV_S_PAT :Pat<(f32 (int_xtensa_xt_mov_s FPR:$frs)), + (MOV_S FPR:$frs)>; + + def MOVEQZ_S_PAT :Pat<(f32 (int_xtensa_xt_moveqz_s FPR:$frr, FPR:$frs, AR:$art)), + (MOVEQZ_S FPR:$frr, FPR:$frs, AR:$art)>; + + def MOVGEZ_S_PAT :Pat<(f32 (int_xtensa_xt_movgez_s FPR:$frr, FPR:$frs, AR:$art)), + (MOVGEZ_S FPR:$frr, FPR:$frs, AR:$art)>; + + def MOVLTZ_S_PAT :Pat<(f32 (int_xtensa_xt_movltz_s FPR:$frr, FPR:$frs, AR:$art)), + (MOVLTZ_S FPR:$frr, FPR:$frs, AR:$art)>; + + def MOVNEZ_S_PAT :Pat<(f32 (int_xtensa_xt_movnez_s FPR:$frr, FPR:$frs, AR:$art)), + (MOVNEZ_S FPR:$frr, FPR:$frs, AR:$art)>; + + def MSUB_S_PAT :Pat<(f32 (int_xtensa_xt_msub_s FPR:$frr, FPR:$frs, FPR:$frt)), + (MSUB_S FPR:$frr, FPR:$frs, FPR:$frt)>; + + def NEG_S_PAT :Pat<(f32 (int_xtensa_xt_neg_s FPR:$frs)), + (NEG_S FPR:$frs)>; + + def NEXP01_S_PAT :Pat<(f32 (int_xtensa_xt_nexp01_s FPR:$frs)), + (NEXP01_S FPR:$frs)>; + + def RECIP0_S_PAT :Pat<(f32 (int_xtensa_xt_recip0_s FPR:$frs)), + (RECIP0_S FPR:$frs)>; + + def RFR_PAT :Pat<(i32 (int_xtensa_xt_rfr FPR:$frs)), + (RFR FPR:$frs)>; + + def ROUND_S_PAT :Pat<(i32 (int_xtensa_xt_round_s FPR:$frs, timm:$imm_t)), + (ROUND_S FPR:$frs, timm:$imm_t)>; + + def RSQRT0_S_PAT :Pat<(f32 (int_xtensa_xt_rsqrt0_s FPR:$frs)), + (RSQRT0_S FPR:$frs)>; + + def RUR_FCR_PAT :Pat<(i32 (int_xtensa_xt_rur_fcr )), + (RUR FCR)>; + + def RUR_FSR_PAT :Pat<(i32 (int_xtensa_xt_rur_fsr )), + (RUR FSR )>; + + def SQRT0_S_PAT :Pat<(f32 (int_xtensa_xt_sqrt0_s FPR:$frs)), + (SQRT0_S FPR:$frs)>; + + def SSI_PAT :Pat<(int_xtensa_xt_ssi FPR:$frt, AR:$ars, timm:$imm8x4), + (SSI FPR:$frt, AR:$ars, timm:$imm8x4)>; + + def SSIP_PAT :Pat<(i32 (int_xtensa_xt_ssip FPR:$frt, AR:$ars, timm:$imm8x4)), + (SSIP FPR:$frt, AR:$ars, timm:$imm8x4)>; + + def SSX_PAT :Pat<(int_xtensa_xt_ssx FPR:$frr, AR:$ars, AR:$art), + (SSX FPR:$frr, AR:$ars, AR:$art)>; + + def SSXP_PAT :Pat<(i32 (int_xtensa_xt_ssxp FPR:$frr, AR:$ars, AR:$art)), + (SSXP FPR:$frr, AR:$ars, AR:$art)>; + + def WFR_PAT :Pat<(f32 (int_xtensa_xt_wfr AR:$ars)), + (WFR AR:$ars)>; + + def WUR_FCR_PAT :Pat<(int_xtensa_xt_wur_fcr AR:$art), + (WUR_FCR AR:$art)>; + + def WUR_FSR_PAT :Pat<(int_xtensa_xt_wur_fsr AR:$art), + (WUR_FSR AR:$art)>; + +} //===----------------------------------------------------------------------===// // DSP Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index 6a9bf514be8d3..ad22bac40ea76 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -123,7 +123,10 @@ def offset8m16 : Immediate; def offset8m32 : Immediate= 0 && Imm <= 1020 && (Imm & 0x3 == 0); }], - "Offset8m32_AsmOperand">; + "Offset8m32_AsmOperand"> { + let EncoderMethod = "getOffset8m32OpValue"; + let DecoderMethod = "decodeOffset8m32Operand"; +} // Memory offset 0..60 for 32-bit memory accesses def Offset4m32_AsmOperand : ImmAsmOperand<"Offset4m32">; diff --git a/llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll b/llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll index 73d54e2a51210..48d3424a8c62f 100644 --- a/llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll +++ b/llvm/test/CodeGen/Xtensa/xtensa-float-intrinsics.ll @@ -51,6 +51,24 @@ define float @test_mul_s(float %a, float %b) { ret float %r } +define float @test_lsxp(ptr %a, i32 %b) { + ; CHECK-LABEL: test_lsxp + ; CHECK: lsxp {{f[0-9]+}}, {{a[0-9]+}}, {{a[0-9]+}} + %s = call {float,ptr} @llvm.xtensa.xt.lsxp(ptr %a, i32 %b) + %r = extractvalue {float,ptr} %s , 0 + ret float %r +} + +define float @test_lsip(ptr %a) { + ; CHECK-LABEL: test_lsip + ; CHECK: lsip {{f[0-9]+}}, {{a[0-9]+}}, 4 + %s = call {float,ptr} @llvm.xtensa.xt.lsip(ptr %a, i32 4) + %r = extractvalue {float,ptr} %s , 0 + ret float %r +} + +declare {float,ptr} @llvm.xtensa.xt.lsip(ptr, i32); +declare {float,ptr} @llvm.xtensa.xt.lsxp(ptr, i32); declare float @llvm.xtensa.xt.float.s(i32, i32); declare float @llvm.xtensa.xt.ufloat.s(i32, i32); declare i32 @llvm.xtensa.xt.trunc.s(float , i32); diff --git a/llvm/test/CodeGen/Xtensa/xtensa-gen-intrinsics.ll b/llvm/test/CodeGen/Xtensa/xtensa-gen-intrinsics.ll new file mode 100644 index 0000000000000..e3d3ff9b92516 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-gen-intrinsics.ll @@ -0,0 +1,80 @@ +# RUN: python3 %s > %t && ( llc -O0 -mtriple=xtensa -mcpu=esp32 %t -o - | FileCheck %t ) + +from dataclasses import dataclass + +@dataclass +class F: + ret: str + fun : str + instr: str + args : [str] + +FIXTURES = [ +('float', 'xt_addexp_s', 'addexp.s {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float']) , +('float', 'xt_addexpm_s', 'addexpm.s {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float']) , +('i32', 'xt_ceil_s', 'ceil.s {{a[0-9]+}}, {{f[0-9]+}}, 0', ['float', 0]) , +('float', 'xt_div0_s', 'div0.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('float', 'xt_divn_s', 'divn.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float', 'float']) , +('i32', 'xt_floor_s', 'floor.s {{a[0-9]+}}, {{f[0-9]+}}, 0', ['float', 0]) , +('float', 'xt_lsi', 'lsi {{f[0-9]+}}, {{a[0-9]+}}, 0', ['ptr', 0]) , +# skip xt_lsip , +('float', 'xt_lsx', 'lsx {{f[0-9]+}}, {{a[0-9]+}}, {{a[0-9]+}}', ['ptr', 'i32']) , +# skip xt_lsxp , +('float', 'xt_madd_s', 'madd.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float', 'float']) , +('float', 'xt_maddn_s', 'maddn.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float', 'float']) , +('float', 'xt_mkdadj_s', 'mkdadj.s {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float']) , +('float', 'xt_mksadj_s', 'mksadj.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('float', 'xt_mov_s', 'mov.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('float', 'xt_moveqz_s', 'moveqz.s {{f[0-9]+}}, {{f[0-9]+}}, {{a[0-9]+}}', ['float', 'float', 'i32']) , +('float', 'xt_movgez_s', 'movgez.s {{f[0-9]+}}, {{f[0-9]+}}, {{a[0-9]+}}', ['float', 'float', 'i32']) , +('float', 'xt_movltz_s', 'movltz.s {{f[0-9]+}}, {{f[0-9]+}}, {{a[0-9]+}}', ['float', 'float', 'i32']) , +('float', 'xt_movnez_s', 'movnez.s {{f[0-9]+}}, {{f[0-9]+}}, {{a[0-9]+}}', ['float', 'float', 'i32']) , +('float', 'xt_msub_s', 'msub.s {{f[0-9]+}}, {{f[0-9]+}}, {{f[0-9]+}}', ['float', 'float', 'float']) , +('float', 'xt_neg_s', 'neg.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('float', 'xt_nexp01_s', 'nexp01.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('float', 'xt_recip0_s', 'recip0.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('i32', 'xt_rfr', 'rfr {{a[0-9]+}}, {{f[0-9]+}}', ['float']) , +('i32', 'xt_round_s', 'round.s {{a[0-9]+}}, {{f[0-9]+}}, 0', ['float', 0]) , +('float', 'xt_rsqrt0_s', 'rsqrt0.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('i32', 'xt_rur_fcr', 'rur {{a[0-9]+}}, fcr', []) , +('i32', 'xt_rur_fsr', 'rur {{a[0-9]+}}, fsr', []) , +('float', 'xt_sqrt0_s', 'sqrt0.s {{f[0-9]+}}, {{f[0-9]+}}', ['float']) , +('void', 'xt_ssi', 'ssi {{f[0-9]+}}, {{a[0-9]+}}, 0', ['float', 'ptr', 0]) , +('ptr', 'xt_ssip', 'ssip {{f[0-9]+}}, {{a[0-9]+}}, 0', ['float', 'ptr', 0]) , +('void', 'xt_ssx', 'ssx {{f[0-9]+}}, {{a[0-9]+}}, {{a[0-9]+}}', ['float', 'ptr', 'i32']) , +('ptr', 'xt_ssxp', 'ssxp {{f[0-9]+}}, {{a[0-9]+}}, {{a[0-9]+}}', ['float', 'ptr', 'i32']) , +('float', 'xt_wfr', 'wfr {{f[0-9]+}}, {{a[0-9]+}}', ['i32']) , +('void', 'xt_wur_fcr', 'wur.fcr {{a[0-9]+}}', ['i32']) , +('void', 'xt_wur_fsr', 'wur.fsr {{a[0-9]+}}', ['i32']) , +] + +template = """ +define {ret} @test_{fun}({fun_args}) {{ + ; CHECK-LABEL: {fun} + ; CHECK: {instr} + {ret_var} {assign} call {ret} @llvm.xtensa.{builtin}({call_args}) + ret {ret} {ret_var} +}} +declare {ret} @llvm.xtensa.{builtin}({call_types}); +""" + +for f in FIXTURES: + if isinstance(f, dict): + f = F(**f) + elif isinstance(f, tuple): + f = F(*f) + args = f.args + f.fun_args = ",".join( + ['%s %%a%d' % (a,i) for i,a, in enumerate(args) if isinstance(a,str)]) + f.builtin = f.fun.replace('_','.') + f.call_args = ",".join( + [('%s %%a%d' % (a, i)) if isinstance(a,str) else ('i32 %d' % a) + for i,a, in enumerate(args)]) + f.call_types = ",".join([a if isinstance(a,str) else 'i32' for a in args]) + if f.ret == 'void': + f.assign = "" + f.ret_var = "" + else: + f.assign = "=" + f.ret_var = "%r" + print(template.format(**f.__dict__)) From f97c1af7b044bacba1d4ddfc4075de10e2ab6ebf Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Mon, 27 Mar 2023 13:09:06 +0000 Subject: [PATCH 192/289] [Xtensa] Add Cannonlake CPU This patch adds a definition of Xtensa LX6 CPU variant present in Intel Cannonlake and Tigerlake SOC platforms. --- llvm/include/llvm/TargetParser/XtensaTargetParser.def | 7 +++++++ llvm/lib/Target/Xtensa/Xtensa.td | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/llvm/include/llvm/TargetParser/XtensaTargetParser.def b/llvm/include/llvm/TargetParser/XtensaTargetParser.def index b765b015c1265..3fe9b2760b773 100644 --- a/llvm/include/llvm/TargetParser/XtensaTargetParser.def +++ b/llvm/include/llvm/TargetParser/XtensaTargetParser.def @@ -73,6 +73,13 @@ XTENSA_CPU(ESP32S3, {"esp32s3"}, FK_COPROCESSOR | FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | FK_ESP32S3OPS)) +XTENSA_CPU(CNL, {"cnl"}, (FK_DENSITY | FK_FP | FK_LOOP | FK_MAC16 | FK_WINDOWED | FK_BOOLEAN | + FK_SEXT | FK_NSA | FK_MUL32 | FK_MUL32HIGH | FK_S32C1I | + FK_THREADPTR | FK_DIV32 | FK_ATOMCTL | FK_MEMCTL | FK_DEBUG | + FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | + FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR)) + + #undef XTENSA_CPU #ifndef XTENSA_CPU_ALIAS diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 572c76cc7c1ff..2171433b0fba0 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -206,6 +206,13 @@ def : Proc<"esp32s3", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureM FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, FeatureRegionProtection, FeatureMiscSR, FeatureMINMAX, FeatureCLAMPS, FeatureESP32S3Ops]>; +def : Proc<"cnl", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureWindowed, FeatureBoolean, + FeatureSEXT, FeatureNSA, FeatureMul32, FeatureMul32High, FeatureS32C1I, + FeatureTHREADPTR, FeatureDiv32, FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, + FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, + FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, + FeatureRegionProtection, FeatureMiscSR]>; + //===----------------------------------------------------------------------===// // Register File Description //===----------------------------------------------------------------------===// From c8a542e8457d3a984c57bbf08b407a756e9c55bd Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 26 Mar 2024 15:40:33 +0300 Subject: [PATCH 193/289] [Xtensa] Make assembler output compatible with GAS Some Xtensa targets may still use GAS as a default assemblwr through -fno-integrated-as option. These changes make the assembly output compatible with GAS by default. - GAS does not recognize .word but .2byte works for both - Dwarf CFI is not supported by GAS. Option -fdwarf-exceptions can still turn it on but there is no option to turn it off, so an opt-in approach is more portable. --- llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp | 3 +-- llvm/test/CodeGen/Xtensa/arith-intrinsics.ll | 2 +- llvm/test/CodeGen/Xtensa/calling-conv-call8.ll | 4 ++-- llvm/test/CodeGen/Xtensa/ctlz-cttz.ll | 2 +- llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll | 2 +- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp index 4537369b017d0..1dc3fca572fe8 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCAsmInfo.cpp @@ -23,12 +23,11 @@ XtensaMCAsmInfo::XtensaMCAsmInfo(const Triple &TT) { PrivateGlobalPrefix = ".L"; CommentString = "#"; ZeroDirective = "\t.space\t"; - Data16bitsDirective = "\t.half\t"; + Data16bitsDirective = "\t.2byte\t"; Data32bitsDirective = "\t.word\t"; Data64bitsDirective = "\t.quad\t"; GlobalDirective = "\t.global\t"; UsesELFSectionDirectiveForBSS = true; SupportsDebugInformation = true; - ExceptionsType = ExceptionHandling::DwarfCFI; AlignmentIsInBytes = true; } diff --git a/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll b/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll index c4a0749a0ed1e..ee492583bd72d 100644 --- a/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll +++ b/llvm/test/CodeGen/Xtensa/arith-intrinsics.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa -mcpu=esp32 --exception-model=dwarf -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s declare i32 @llvm.abs.i32(i32, i1) diff --git a/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll b/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll index 16056b99101fb..5ad2d4e471fc2 100644 --- a/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll +++ b/llvm/test/CodeGen/Xtensa/calling-conv-call8.ll @@ -1,6 +1,6 @@ -; RUN: llc -mtriple=xtensa -mcpu=esp32 -O1 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs --exception-model=dwarf < %s \ ; RUN: | FileCheck -check-prefix=XTENSA-STRUCT16 %s -; RUN: llc -mtriple=xtensa -mcpu=esp32 -O1 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs --exception-model=dwarf < %s \ ; RUN: | FileCheck -check-prefix=XTENSA-I128 %s %struct.S = type { [4 x i32] } diff --git a/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll b/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll index 8008ba354e6ab..a265b1f79913a 100644 --- a/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll +++ b/llvm/test/CodeGen/Xtensa/ctlz-cttz.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa -mcpu=esp32 --exception-model=dwarf -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s declare i32 @llvm.ctlz.i32(i32, i1) diff --git a/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll b/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll index e6faa89c0ec59..0206120729abe 100644 --- a/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll +++ b/llvm/test/CodeGen/Xtensa/minmax-intrinsics.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s \ +; RUN: llc -mtriple=xtensa -mcpu=esp32 --exception-model=dwarf -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=XTENSA %s declare i32 @llvm.smin.i32(i32, i32) From 217d4f0bedbb78cde0225c3dd76c7cb599c336d6 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 30 Sep 2024 02:25:01 +0300 Subject: [PATCH 194/289] [Xtensa] Fix hwloop tests. --- llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll | 1 - llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll | 1 - 2 files changed, 2 deletions(-) diff --git a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll index a72e89105587c..12355f7b1877a 100644 --- a/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll +++ b/llvm/test/CodeGen/Xtensa/hwloop_inner_loop.ll @@ -6,7 +6,6 @@ define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #0 { ; CHECK-LABEL: test_hwloop: ; CHECK: entry a1, 32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: blti a4, 1, .LBB0_5 ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: movi.n a8, 0 diff --git a/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll b/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll index d2899ae550509..f5f576fd1fea3 100644 --- a/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll +++ b/llvm/test/CodeGen/Xtensa/hwloop_unsuitable_loop.ll @@ -5,7 +5,6 @@ define i32 @test_hwloop(i32 %a, i32 %b, i32 %n) local_unnamed_addr #1 { ; CHECK-LABEL: test_hwloop: ; CHECK: entry a1, 32 -; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: blti a4, 1, .LBB0_5 ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: movi.n a8, 0 From a7eacb4646a339b080cf8ac69997fe7e054449bb Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 10:17:18 +0000 Subject: [PATCH 195/289] [Xtensa] Add HIFI3 intrinsic functions --- llvm/include/llvm/IR/IntrinsicsXtensa.td | 19 + llvm/include/llvm/IR/IntrinsicsXtensaHIFI.td | 2613 ++++++++++++++++++ 2 files changed, 2632 insertions(+) create mode 100644 llvm/include/llvm/IR/IntrinsicsXtensaHIFI.td diff --git a/llvm/include/llvm/IR/IntrinsicsXtensa.td b/llvm/include/llvm/IR/IntrinsicsXtensa.td index 36b9571ee4b4b..e805a02f62455 100644 --- a/llvm/include/llvm/IR/IntrinsicsXtensa.td +++ b/llvm/include/llvm/IR/IntrinsicsXtensa.td @@ -417,4 +417,23 @@ def int_xtensa_xt_wur_fsr: ClangBuiltin<"__builtin_xtensa_xt_wur_fsr">, // Generated code // --------------- include "llvm/IR/IntrinsicsXtensaESP32S3.td" + +//===----------------------------------------------------------------------===// +// HiFi3 Intrinsics +//===----------------------------------------------------------------------===// + +//Extended Access +def int_xtensa_xt_l32ex: ClangBuiltin<"__builtin_xtensa_xt_l32ex">, + Intrinsic<[llvm_i32_ty], [LLVMQualPointerType<0>],[]>; + +def int_xtensa_xt_s32ex: + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, LLVMQualPointerType<0>],[]>; + +def int_xtensa_xt_getex: + Intrinsic<[llvm_i32_ty], [llvm_i32_ty],[]>; + +def int_xtensa_xt_clrex: ClangBuiltin<"__builtin_xtensa_xt_clrex">, + Intrinsic<[], [],[]>; + +include "llvm/IR/IntrinsicsXtensaHIFI.td" } diff --git a/llvm/include/llvm/IR/IntrinsicsXtensaHIFI.td b/llvm/include/llvm/IR/IntrinsicsXtensaHIFI.td new file mode 100644 index 0000000000000..662e942829cc8 --- /dev/null +++ b/llvm/include/llvm/IR/IntrinsicsXtensaHIFI.td @@ -0,0 +1,2613 @@ +//===- IntrinsicsXtensaHIFI.td - Defines Xtensa HIFI intrinsics -----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines all of the intrinsics for Xtensa HIFI extension. +// +//===----------------------------------------------------------------------===// + +def int_xtensa_ae_abs16s: ClangBuiltin<"__builtin_xtensa_ae_abs16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_abs24s: ClangBuiltin<"__builtin_xtensa_ae_abs24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_abs32: ClangBuiltin<"__builtin_xtensa_ae_abs32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_abs32s: ClangBuiltin<"__builtin_xtensa_ae_abs32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_abs64: ClangBuiltin<"__builtin_xtensa_ae_abs64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_abs64s: ClangBuiltin<"__builtin_xtensa_ae_abs64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_add16: ClangBuiltin<"__builtin_xtensa_ae_add16">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_add16s: ClangBuiltin<"__builtin_xtensa_ae_add16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_add24s: ClangBuiltin<"__builtin_xtensa_ae_add24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_add32: ClangBuiltin<"__builtin_xtensa_ae_add32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_add32_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_add32_hl_lh">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_add32s: ClangBuiltin<"__builtin_xtensa_ae_add32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_add64: ClangBuiltin<"__builtin_xtensa_ae_add64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_add64s: ClangBuiltin<"__builtin_xtensa_ae_add64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_addbrba32: ClangBuiltin<"__builtin_xtensa_ae_addbrba32">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_addsub32: ClangBuiltin<"__builtin_xtensa_ae_addsub32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_addsub32s: ClangBuiltin<"__builtin_xtensa_ae_addsub32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_and: ClangBuiltin<"__builtin_xtensa_ae_and">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvt32x2f16_10: ClangBuiltin<"__builtin_xtensa_ae_cvt32x2f16_10">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvt32x2f16_32: ClangBuiltin<"__builtin_xtensa_ae_cvt32x2f16_32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvt48a32: ClangBuiltin<"__builtin_xtensa_ae_cvt48a32">, + Intrinsic<[llvm_v1i64_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvt64a32: ClangBuiltin<"__builtin_xtensa_ae_cvt64a32">, + Intrinsic<[llvm_v1i64_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvt64f32_h: ClangBuiltin<"__builtin_xtensa_ae_cvt64f32_h">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvta32f24s_h: ClangBuiltin<"__builtin_xtensa_ae_cvta32f24s_h">, + Intrinsic<[llvm_i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvta32f24s_l: ClangBuiltin<"__builtin_xtensa_ae_cvta32f24s_l">, + Intrinsic<[llvm_i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvtq56a32s: ClangBuiltin<"__builtin_xtensa_ae_cvtq56a32s">, + Intrinsic<[llvm_v1i64_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvtq56p32s_h: ClangBuiltin<"__builtin_xtensa_ae_cvtq56p32s_h">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_cvtq56p32s_l: ClangBuiltin<"__builtin_xtensa_ae_cvtq56p32s_l">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_db: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_db_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_db_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_dbi: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_dbi_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_dbi_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_div64d32_h: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_div64d32_l: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_eq16: + Intrinsic<[llvm_v4i1_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_eq32: + Intrinsic<[llvm_v2i1_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_eq64: + Intrinsic<[llvm_v1i1_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_l16_i: ClangBuiltin<"__builtin_xtensa_ae_l16_i">, + Intrinsic<[llvm_v4i16_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16_ip: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16_x: ClangBuiltin<"__builtin_xtensa_ae_l16_x">, + Intrinsic<[llvm_v4i16_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16_xc: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16_xp: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16m_i: ClangBuiltin<"__builtin_xtensa_ae_l16m_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16m_iu: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16m_x: ClangBuiltin<"__builtin_xtensa_ae_l16m_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16m_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16m_xu: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x2m_i: ClangBuiltin<"__builtin_xtensa_ae_l16x2m_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16x2m_iu: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16x2m_x: ClangBuiltin<"__builtin_xtensa_ae_l16x2m_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x2m_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x2m_xu: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x4_i: ClangBuiltin<"__builtin_xtensa_ae_l16x4_i">, + Intrinsic<[llvm_v4i16_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16x4_ip: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l16x4_ric: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l16x4_rip: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l16x4_x: ClangBuiltin<"__builtin_xtensa_ae_l16x4_x">, + Intrinsic<[llvm_v4i16_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x4_xc: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l16x4_xp: + Intrinsic<[llvm_v4i16_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32_i: ClangBuiltin<"__builtin_xtensa_ae_l32_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32_ip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32_x: ClangBuiltin<"__builtin_xtensa_ae_l32_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32_xp: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32f24_i: ClangBuiltin<"__builtin_xtensa_ae_l32f24_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32f24_ip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32f24_x: ClangBuiltin<"__builtin_xtensa_ae_l32f24_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32f24_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32f24_xp: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32m_i: ClangBuiltin<"__builtin_xtensa_ae_l32m_i">, + Intrinsic<[llvm_v1i64_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32m_iu: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32m_x: ClangBuiltin<"__builtin_xtensa_ae_l32m_x">, + Intrinsic<[llvm_v1i64_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32m_xc: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32m_xu: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2_i: ClangBuiltin<"__builtin_xtensa_ae_l32x2_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32x2_ip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32x2_ric: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l32x2_rip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l32x2_x: ClangBuiltin<"__builtin_xtensa_ae_l32x2_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2_xp: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2f24_i: ClangBuiltin<"__builtin_xtensa_ae_l32x2f24_i">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32x2f24_ip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l32x2f24_ric: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l32x2f24_rip: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_l32x2f24_x: ClangBuiltin<"__builtin_xtensa_ae_l32x2f24_x">, + Intrinsic<[llvm_v2i32_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2f24_xc: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l32x2f24_xp: + Intrinsic<[llvm_v2i32_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l64_i: ClangBuiltin<"__builtin_xtensa_ae_l64_i">, + Intrinsic<[llvm_v1i64_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l64_ip: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_l64_x: ClangBuiltin<"__builtin_xtensa_ae_l64_x">, + Intrinsic<[llvm_v1i64_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l64_xc: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_l64_xp: + Intrinsic<[llvm_v1i64_ty, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_la16x4_ic: + Intrinsic<[llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la16x4_ip: + Intrinsic<[llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la16x4_ric: + Intrinsic<[llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la16x4_rip: + Intrinsic<[llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la16x4neg_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la16x4pos_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la24_ic: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24_ip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24_ric: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24_rip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24neg_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la24pos_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2_ic: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2_ip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2_ric: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2_rip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2neg_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la24x2pos_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2_ic: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2_ip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2_ric: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2_rip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2f24_ic: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2f24_ip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2f24_ric: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2f24_rip: + Intrinsic<[llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2neg_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la32x2pos_pc: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_la64_pp: ClangBuiltin<"__builtin_xtensa_ae_la64_pp">, + Intrinsic<[llvm_v8i8_ty], [llvm_ptr_ty], []>; + +def int_xtensa_ae_lalign64_i: ClangBuiltin<"__builtin_xtensa_ae_lalign64_i">, + Intrinsic<[llvm_v8i8_ty], [llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_lb: ClangBuiltin<"__builtin_xtensa_ae_lb">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_lbi: ClangBuiltin<"__builtin_xtensa_ae_lbi">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_lbk: ClangBuiltin<"__builtin_xtensa_ae_lbk">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_lbki: ClangBuiltin<"__builtin_xtensa_ae_lbki">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_lbs: ClangBuiltin<"__builtin_xtensa_ae_lbs">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_lbsi: ClangBuiltin<"__builtin_xtensa_ae_lbsi">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_le16: + Intrinsic<[llvm_v4i1_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_le32: + Intrinsic<[llvm_v2i1_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_le64: + Intrinsic<[llvm_v1i1_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_lt16: + Intrinsic<[llvm_v4i1_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_lt32: + Intrinsic<[llvm_v2i1_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_lt64: + Intrinsic<[llvm_v1i1_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_max32: ClangBuiltin<"__builtin_xtensa_ae_max32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_max64: ClangBuiltin<"__builtin_xtensa_ae_max64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_maxabs32s: ClangBuiltin<"__builtin_xtensa_ae_maxabs32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_maxabs64s: ClangBuiltin<"__builtin_xtensa_ae_maxabs64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_min32: ClangBuiltin<"__builtin_xtensa_ae_min32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_min64: ClangBuiltin<"__builtin_xtensa_ae_min64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_minabs32s: ClangBuiltin<"__builtin_xtensa_ae_minabs32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_minabs64s: ClangBuiltin<"__builtin_xtensa_ae_minabs64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_mov: ClangBuiltin<"__builtin_xtensa_ae_mov">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad16_0: ClangBuiltin<"__builtin_xtensa_ae_movad16_0">, + Intrinsic<[llvm_i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad16_1: ClangBuiltin<"__builtin_xtensa_ae_movad16_1">, + Intrinsic<[llvm_i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad16_2: ClangBuiltin<"__builtin_xtensa_ae_movad16_2">, + Intrinsic<[llvm_i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad16_3: ClangBuiltin<"__builtin_xtensa_ae_movad16_3">, + Intrinsic<[llvm_i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad32_h: ClangBuiltin<"__builtin_xtensa_ae_movad32_h">, + Intrinsic<[llvm_i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movad32_l: ClangBuiltin<"__builtin_xtensa_ae_movad32_l">, + Intrinsic<[llvm_i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movalign: ClangBuiltin<"__builtin_xtensa_ae_movalign">, + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty], [IntrNoMem]>; + +def int_xtensa_ae_movda16: ClangBuiltin<"__builtin_xtensa_ae_movda16">, + Intrinsic<[llvm_v4i16_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movda16x2: ClangBuiltin<"__builtin_xtensa_ae_movda16x2">, + Intrinsic<[llvm_v4i16_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movda32: ClangBuiltin<"__builtin_xtensa_ae_movda32">, + Intrinsic<[llvm_v1i32_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movda32x2: ClangBuiltin<"__builtin_xtensa_ae_movda32x2">, + Intrinsic<[llvm_v2i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_movf16x4: + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty, llvm_v4i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_movf32x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_movf64: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v1i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_movi: ClangBuiltin<"__builtin_xtensa_ae_movi">, + Intrinsic<[llvm_v2i32_ty], [llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_movt16x4: + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty, llvm_v4i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_movt32x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_movt64: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v1i1_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul16x4: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32_hh: ClangBuiltin<"__builtin_xtensa_ae_mul32_hh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32_lh: ClangBuiltin<"__builtin_xtensa_ae_mul32_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32_ll: ClangBuiltin<"__builtin_xtensa_ae_mul32_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32u_ll: ClangBuiltin<"__builtin_xtensa_ae_mul32u_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h0: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h0_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h1: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h1_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h2_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h3: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_h3_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_h3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l0: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l1: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l1_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l3: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mul32x16_l3_s2: ClangBuiltin<"__builtin_xtensa_ae_mul32x16_l3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula16x4: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32u_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_h3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mula32x16_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h0_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h0_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h2_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h2_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaad32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_11_00: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_11_00_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_13_02: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_13_02_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_33_22: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd16ss_33_22_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h0_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h0_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h2_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h2_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaafd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulac24: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulac32x16_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulac32x16_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_00: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_00_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_10: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_11: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_20: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_21: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_22: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_30: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_31: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_32: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16ss_33: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf16x4ss: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32r_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32r_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32r_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32r_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32s_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32s_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32s_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32s_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_h3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf32x16_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf48q32sp16s_l: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf48q32sp16s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf48q32sp16u_l: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaf48q32sp16u_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafc24ra: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafc32x16ras_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafc32x16ras_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd24x2_fir_h: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd24x2_fir_l: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd32x16x2_fir_hh: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd32x16x2_fir_hl: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd32x16x2_fir_lh: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafd32x16x2_fir_ll: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp24x2r: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp24x2r_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp24x2ra: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp24x2ra_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2ras_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2ras_h_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2ras_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2ras_l_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2rs_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2rs_h_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2rs_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x16x2rs_l_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x2ras: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafp32x2rs: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafq32sp24s_h_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulafq32sp24s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulap24x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulap24x2_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulap32x16x2_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulap32x16x2_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulap32x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaq32sp16s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulaq32sp16u_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mularfq32sp24s_h_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mularfq32sp24s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_hh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulas32f48p16s_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulasfd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulc24: ClangBuiltin<"__builtin_xtensa_ae_mulc24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulc32x16_h: ClangBuiltin<"__builtin_xtensa_ae_mulc32x16_h">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulc32x16_l: ClangBuiltin<"__builtin_xtensa_ae_mulc32x16_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_00: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_00">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_00_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_00_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_10: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_10">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_11: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_11">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_20: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_20">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_21: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_21">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_22: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_22">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_30: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_30">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_31: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_31">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_32: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16ss_33: ClangBuiltin<"__builtin_xtensa_ae_mulf16ss_33">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf16x4ss: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32r_hh: ClangBuiltin<"__builtin_xtensa_ae_mulf32r_hh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32r_lh: ClangBuiltin<"__builtin_xtensa_ae_mulf32r_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32r_ll: ClangBuiltin<"__builtin_xtensa_ae_mulf32r_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32r_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32r_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32s_hh: ClangBuiltin<"__builtin_xtensa_ae_mulf32s_hh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32s_lh: ClangBuiltin<"__builtin_xtensa_ae_mulf32s_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32s_ll: ClangBuiltin<"__builtin_xtensa_ae_mulf32s_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32s_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32s_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h0: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h1: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h1_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h3: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_h3_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_h3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l0: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l1: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l1_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l3: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf32x16_l3_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf32x16_l3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf48q32sp16s_l: ClangBuiltin<"__builtin_xtensa_ae_mulf48q32sp16s_l">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf48q32sp16s_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf48q32sp16s_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf48q32sp16u_l: ClangBuiltin<"__builtin_xtensa_ae_mulf48q32sp16u_l">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulf48q32sp16u_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulf48q32sp16u_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfc24ra: ClangBuiltin<"__builtin_xtensa_ae_mulfc24ra">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfc32x16ras_h: ClangBuiltin<"__builtin_xtensa_ae_mulfc32x16ras_h">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfc32x16ras_l: ClangBuiltin<"__builtin_xtensa_ae_mulfc32x16ras_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd24x2_fir_h: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd24x2_fir_l: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd32x16x2_fir_hh: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd32x16x2_fir_hl: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd32x16x2_fir_lh: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfd32x16x2_fir_ll: + Intrinsic<[llvm_v1i64_ty, llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp16x4ras: ClangBuiltin<"__builtin_xtensa_ae_mulfp16x4ras">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp16x4s: ClangBuiltin<"__builtin_xtensa_ae_mulfp16x4s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp24x2r: ClangBuiltin<"__builtin_xtensa_ae_mulfp24x2r">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp24x2r_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp24x2r_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp24x2ra: ClangBuiltin<"__builtin_xtensa_ae_mulfp24x2ra">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp24x2ra_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp24x2ra_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2ras_h: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2ras_h">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2ras_h_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2ras_h_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2ras_l: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2ras_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2ras_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2ras_l_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2rs_h: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2rs_h">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2rs_h_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2rs_h_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2rs_l: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2rs_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x16x2rs_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x16x2rs_l_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x2ras: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x2ras">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfp32x2rs: ClangBuiltin<"__builtin_xtensa_ae_mulfp32x2rs">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfq32sp24s_h_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfq32sp24s_h_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulfq32sp24s_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulfq32sp24s_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulp24x2: ClangBuiltin<"__builtin_xtensa_ae_mulp24x2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulp24x2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulp24x2_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulp32x16x2_h: ClangBuiltin<"__builtin_xtensa_ae_mulp32x16x2_h">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulp32x16x2_l: ClangBuiltin<"__builtin_xtensa_ae_mulp32x16x2_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulp32x2: ClangBuiltin<"__builtin_xtensa_ae_mulp32x2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulq32sp16s_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulq32sp16s_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulq32sp16u_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulq32sp16u_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulrfq32sp24s_h_s2: ClangBuiltin<"__builtin_xtensa_ae_mulrfq32sp24s_h_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulrfq32sp24s_l_s2: ClangBuiltin<"__builtin_xtensa_ae_mulrfq32sp24s_l_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls16x4: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_hh: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_hh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_hh_s2: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_hh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_lh: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_ll: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32f48p16s_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_muls32f48p16s_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32u_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_h3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_muls32x16_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsad32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsafd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_00: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_00_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_10: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_11: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_20: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_21: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_22: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_30: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_31: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_32: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16ss_33: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf16x4ss: + Intrinsic<[llvm_v2i32_ty, llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32r_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32r_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32r_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32r_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32s_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32s_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32s_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_h3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l1: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l1_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l3: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf32x16_l3_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf48q32sp16s_l: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf48q32sp16s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf48q32sp16u_l: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsf48q32sp16u_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp24x2r: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp24x2r_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp24x2ra: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp24x2ra_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2ras_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2ras_h_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2ras_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2ras_l_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2rs_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2rs_h_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2rs_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x16x2rs_l_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x2ras: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfp32x2rs: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfq32sp24s_h_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsfq32sp24s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsp24x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsp24x2_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsp32x16x2_h: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsp32x16x2_l: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsp32x2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsq32sp16s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsq32sp16u_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsrfq32sp24s_h_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulsrfq32sp24s_l_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_hh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_hh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulss32f48p16s_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_11_00: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_11_00_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_13_02: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_13_02_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_33_22: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd16ss_33_22_s2: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd24_hh_ll: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd24_hh_ll_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd24_hl_lh: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd24_hl_lh_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd32x16_h1_l0: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd32x16_h1_l0_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd32x16_h3_l2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulssfd32x16_h3_l2_s2: + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzaad24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzaad24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h0_l1: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h0_l1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h0_l1_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h0_l1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h2_l3: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h2_l3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h2_l3_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h2_l3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaad32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaad32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_11_00: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_11_00">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_11_00_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_11_00_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_13_02: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_13_02">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_13_02_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_13_02_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_33_22: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_33_22">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd16ss_33_22_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd16ss_33_22_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h0_l1: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h0_l1">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h0_l1_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h2_l3: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h2_l3">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h2_l3_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzaafd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzasd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzasd24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasd24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzasd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzasd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzasfd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzsad24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsad24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzsad32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsad32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzsad32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsad32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsad32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzsafd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzssd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzssd24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssd24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzssd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzssd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_11_00: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_11_00">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_11_00_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_11_00_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_13_02: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_13_02">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_13_02_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_13_02_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_33_22: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_33_22">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd16ss_33_22_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd16ss_33_22_s2">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd24_hh_ll: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd24_hh_ll">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd24_hh_ll_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd24_hh_ll_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd24_hl_lh: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd24_hl_lh">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd24_hl_lh_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd24_hl_lh_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd32x16_h1_l0: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd32x16_h1_l0">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd32x16_h1_l0_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd32x16_h3_l2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd32x16_h3_l2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_mulzssfd32x16_h3_l2_s2: ClangBuiltin<"__builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_nand: ClangBuiltin<"__builtin_xtensa_ae_nand">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg16s: ClangBuiltin<"__builtin_xtensa_ae_neg16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg24s: ClangBuiltin<"__builtin_xtensa_ae_neg24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg32: ClangBuiltin<"__builtin_xtensa_ae_neg32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg32s: ClangBuiltin<"__builtin_xtensa_ae_neg32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg64: ClangBuiltin<"__builtin_xtensa_ae_neg64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_neg64s: ClangBuiltin<"__builtin_xtensa_ae_neg64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_nsa64: ClangBuiltin<"__builtin_xtensa_ae_nsa64">, + Intrinsic<[llvm_i32_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_nsaz16_0: ClangBuiltin<"__builtin_xtensa_ae_nsaz16_0">, + Intrinsic<[llvm_i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_nsaz32_l: ClangBuiltin<"__builtin_xtensa_ae_nsaz32_l">, + Intrinsic<[llvm_i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_or: ClangBuiltin<"__builtin_xtensa_ae_or">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_pksr24: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_pksr32: + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_round16x4f32sasym: ClangBuiltin<"__builtin_xtensa_ae_round16x4f32sasym">, + Intrinsic<[llvm_v4i16_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_round16x4f32ssym: ClangBuiltin<"__builtin_xtensa_ae_round16x4f32ssym">, + Intrinsic<[llvm_v4i16_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_round24x2f48sasym: ClangBuiltin<"__builtin_xtensa_ae_round24x2f48sasym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_round24x2f48ssym: ClangBuiltin<"__builtin_xtensa_ae_round24x2f48ssym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_round32x2f48sasym: ClangBuiltin<"__builtin_xtensa_ae_round32x2f48sasym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_round32x2f48ssym: ClangBuiltin<"__builtin_xtensa_ae_round32x2f48ssym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_round32x2f64sasym: ClangBuiltin<"__builtin_xtensa_ae_round32x2f64sasym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_round32x2f64ssym: ClangBuiltin<"__builtin_xtensa_ae_round32x2f64ssym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsp16f24asym: ClangBuiltin<"__builtin_xtensa_ae_roundsp16f24asym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsp16f24sym: ClangBuiltin<"__builtin_xtensa_ae_roundsp16f24sym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsp16q48x2asym: ClangBuiltin<"__builtin_xtensa_ae_roundsp16q48x2asym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsp16q48x2sym: ClangBuiltin<"__builtin_xtensa_ae_roundsp16q48x2sym">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsq32f48asym: ClangBuiltin<"__builtin_xtensa_ae_roundsq32f48asym">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_roundsq32f48sym: ClangBuiltin<"__builtin_xtensa_ae_roundsq32f48sym">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_s16_0_i: ClangBuiltin<"__builtin_xtensa_ae_s16_0_i">, + Intrinsic<[], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16_0_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16_0_x: ClangBuiltin<"__builtin_xtensa_ae_s16_0_x">, + Intrinsic<[], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16_0_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16_0_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16m_l_i: ClangBuiltin<"__builtin_xtensa_ae_s16m_l_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16m_l_iu: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16m_l_x: ClangBuiltin<"__builtin_xtensa_ae_s16m_l_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16m_l_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16m_l_xu: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x2m_i: ClangBuiltin<"__builtin_xtensa_ae_s16x2m_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16x2m_iu: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16x2m_x: ClangBuiltin<"__builtin_xtensa_ae_s16x2m_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x2m_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x2m_xu: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x4_i: ClangBuiltin<"__builtin_xtensa_ae_s16x4_i">, + Intrinsic<[], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16x4_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s16x4_ric: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s16x4_rip: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s16x4_x: ClangBuiltin<"__builtin_xtensa_ae_s16x4_x">, + Intrinsic<[], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x4_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s16x4_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v4i16_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s24ra64s_i: ClangBuiltin<"__builtin_xtensa_ae_s24ra64s_i">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s24ra64s_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s24ra64s_x: ClangBuiltin<"__builtin_xtensa_ae_s24ra64s_x">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s24ra64s_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s24ra64s_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s24x2ra64s_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s32_l_i: ClangBuiltin<"__builtin_xtensa_ae_s32_l_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32_l_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32_l_x: ClangBuiltin<"__builtin_xtensa_ae_s32_l_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32_l_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32_l_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32f24_l_i: ClangBuiltin<"__builtin_xtensa_ae_s32f24_l_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32f24_l_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32f24_l_x: ClangBuiltin<"__builtin_xtensa_ae_s32f24_l_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32f24_l_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32f24_l_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32m_i: ClangBuiltin<"__builtin_xtensa_ae_s32m_i">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32m_iu: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32m_x: ClangBuiltin<"__builtin_xtensa_ae_s32m_x">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32m_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32m_xu: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32ra64s_i: ClangBuiltin<"__builtin_xtensa_ae_s32ra64s_i">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32ra64s_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32ra64s_x: ClangBuiltin<"__builtin_xtensa_ae_s32ra64s_x">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32ra64s_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32ra64s_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2_i: ClangBuiltin<"__builtin_xtensa_ae_s32x2_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32x2_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32x2_ric: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s32x2_rip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s32x2_x: ClangBuiltin<"__builtin_xtensa_ae_s32x2_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2f24_i: ClangBuiltin<"__builtin_xtensa_ae_s32x2f24_i">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32x2f24_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s32x2f24_ric: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s32x2f24_rip: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s32x2f24_x: ClangBuiltin<"__builtin_xtensa_ae_s32x2f24_x">, + Intrinsic<[], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2f24_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2f24_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v2i32_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s32x2ra64s_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_s64_i: ClangBuiltin<"__builtin_xtensa_ae_s64_i">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s64_ip: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_s64_x: ClangBuiltin<"__builtin_xtensa_ae_s64_x">, + Intrinsic<[], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s64_xc: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_s64_xp: + Intrinsic<[llvm_ptr_ty], [llvm_v1i64_ty, llvm_ptr_ty, llvm_i32_ty], []>; + +def int_xtensa_ae_sa16x4_ic: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa16x4_ip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa16x4_ric: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa16x4_rip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v4i16_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24_l_ic: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24_l_ip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24_l_ric: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24_l_rip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24x2_ic: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24x2_ip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24x2_ric: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa24x2_rip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2_ic: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2_ip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2_ric: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2_rip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2f24_ic: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2f24_ip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2f24_ric: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa32x2f24_rip: + Intrinsic<[llvm_v8i8_ty, llvm_ptr_ty], [llvm_v2i32_ty, llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa64neg_fp: + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_sa64pos_fp: + Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_ptr_ty], []>; + +def int_xtensa_ae_salign64_i: ClangBuiltin<"__builtin_xtensa_ae_salign64_i">, + Intrinsic<[], [llvm_v8i8_ty, llvm_ptr_ty, llvm_i32_ty], [ImmArg>]>; + +def int_xtensa_ae_sat16x4: ClangBuiltin<"__builtin_xtensa_ae_sat16x4">, + Intrinsic<[llvm_v4i16_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sat24s: ClangBuiltin<"__builtin_xtensa_ae_sat24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sat48s: ClangBuiltin<"__builtin_xtensa_ae_sat48s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_satq56s: ClangBuiltin<"__builtin_xtensa_ae_satq56s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_sb: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sb_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sb_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sbf: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_sbf_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_sbf_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_sbi: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sbi_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sbi_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sel16i: ClangBuiltin<"__builtin_xtensa_ae_sel16i">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sel16i_n: ClangBuiltin<"__builtin_xtensa_ae_sel16i_n">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sext32: ClangBuiltin<"__builtin_xtensa_ae_sext32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sext32x2d16_10: ClangBuiltin<"__builtin_xtensa_ae_sext32x2d16_10">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_sext32x2d16_32: ClangBuiltin<"__builtin_xtensa_ae_sext32x2d16_32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_sha32: ClangBuiltin<"__builtin_xtensa_ae_sha32">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_shortswap: ClangBuiltin<"__builtin_xtensa_ae_shortswap">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaa16s: ClangBuiltin<"__builtin_xtensa_ae_slaa16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaa32: ClangBuiltin<"__builtin_xtensa_ae_slaa32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaa32s: ClangBuiltin<"__builtin_xtensa_ae_slaa32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaa64: ClangBuiltin<"__builtin_xtensa_ae_slaa64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaa64s: ClangBuiltin<"__builtin_xtensa_ae_slaa64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slaaq56: ClangBuiltin<"__builtin_xtensa_ae_slaaq56">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slai16s: ClangBuiltin<"__builtin_xtensa_ae_slai16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai24: ClangBuiltin<"__builtin_xtensa_ae_slai24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai24s: ClangBuiltin<"__builtin_xtensa_ae_slai24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai32: ClangBuiltin<"__builtin_xtensa_ae_slai32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai32s: ClangBuiltin<"__builtin_xtensa_ae_slai32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai64: ClangBuiltin<"__builtin_xtensa_ae_slai64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slai64s: ClangBuiltin<"__builtin_xtensa_ae_slai64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slaisq56s: ClangBuiltin<"__builtin_xtensa_ae_slaisq56s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_slas24: ClangBuiltin<"__builtin_xtensa_ae_slas24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slas24s: ClangBuiltin<"__builtin_xtensa_ae_slas24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slas32: ClangBuiltin<"__builtin_xtensa_ae_slas32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slas32s: ClangBuiltin<"__builtin_xtensa_ae_slas32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_slas64: ClangBuiltin<"__builtin_xtensa_ae_slas64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_slas64s: ClangBuiltin<"__builtin_xtensa_ae_slas64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_slasq56: ClangBuiltin<"__builtin_xtensa_ae_slasq56">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_slassq56s: ClangBuiltin<"__builtin_xtensa_ae_slassq56s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_sra64_32: ClangBuiltin<"__builtin_xtensa_ae_sra64_32">, + Intrinsic<[llvm_v1i64_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa16rs: ClangBuiltin<"__builtin_xtensa_ae_sraa16rs">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa16s: ClangBuiltin<"__builtin_xtensa_ae_sraa16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa32: ClangBuiltin<"__builtin_xtensa_ae_sraa32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa32rs: ClangBuiltin<"__builtin_xtensa_ae_sraa32rs">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa32s: ClangBuiltin<"__builtin_xtensa_ae_sraa32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sraa64: ClangBuiltin<"__builtin_xtensa_ae_sraa64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_srai16: ClangBuiltin<"__builtin_xtensa_ae_srai16">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srai16r: ClangBuiltin<"__builtin_xtensa_ae_srai16r">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srai24: ClangBuiltin<"__builtin_xtensa_ae_srai24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srai32: ClangBuiltin<"__builtin_xtensa_ae_srai32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srai32r: ClangBuiltin<"__builtin_xtensa_ae_srai32r">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srai64: ClangBuiltin<"__builtin_xtensa_ae_srai64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_sras24: ClangBuiltin<"__builtin_xtensa_ae_sras24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sras32: ClangBuiltin<"__builtin_xtensa_ae_sras32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sras64: ClangBuiltin<"__builtin_xtensa_ae_sras64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_srla32: ClangBuiltin<"__builtin_xtensa_ae_srla32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_srla64: ClangBuiltin<"__builtin_xtensa_ae_srla64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_srli24: ClangBuiltin<"__builtin_xtensa_ae_srli24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srli32: ClangBuiltin<"__builtin_xtensa_ae_srli32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srli64: ClangBuiltin<"__builtin_xtensa_ae_srli64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_srls24: ClangBuiltin<"__builtin_xtensa_ae_srls24">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_srls32: ClangBuiltin<"__builtin_xtensa_ae_srls32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_srls64: ClangBuiltin<"__builtin_xtensa_ae_srls64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub16: ClangBuiltin<"__builtin_xtensa_ae_sub16">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub16s: ClangBuiltin<"__builtin_xtensa_ae_sub16s">, + Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub24s: ClangBuiltin<"__builtin_xtensa_ae_sub24s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub32: ClangBuiltin<"__builtin_xtensa_ae_sub32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub32s: ClangBuiltin<"__builtin_xtensa_ae_sub32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub64: ClangBuiltin<"__builtin_xtensa_ae_sub64">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_sub64s: ClangBuiltin<"__builtin_xtensa_ae_sub64s">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_subadd32: ClangBuiltin<"__builtin_xtensa_ae_subadd32">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_subadd32s: ClangBuiltin<"__builtin_xtensa_ae_subadd32s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_trunca32f64s_l: ClangBuiltin<"__builtin_xtensa_ae_trunca32f64s_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_trunca32x2f64s: ClangBuiltin<"__builtin_xtensa_ae_trunca32x2f64s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_xtensa_ae_trunci32f64s_l: ClangBuiltin<"__builtin_xtensa_ae_trunci32f64s_l">, + Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_trunci32x2f64s: ClangBuiltin<"__builtin_xtensa_ae_trunci32x2f64s">, + Intrinsic<[llvm_v2i32_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; + +def int_xtensa_ae_vldl16c: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vldl16c_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vldl16c_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vldl16t: + Intrinsic<[llvm_v1i1_ty, llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vldl32t: + Intrinsic<[llvm_v1i1_ty, llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vldsht: ClangBuiltin<"__builtin_xtensa_ae_vldsht">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_ae_vlel16t: + Intrinsic<[llvm_v1i1_ty, llvm_i32_ty], [llvm_i32_ty, llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vlel32t: + Intrinsic<[llvm_v1i1_ty, llvm_i32_ty], [llvm_i32_ty, llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vles16c: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vles16c_ic: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_vles16c_ip: + Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>; + +def int_xtensa_ae_xor: ClangBuiltin<"__builtin_xtensa_ae_xor">, + Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>; + +def int_xtensa_ae_zalign64: ClangBuiltin<"__builtin_xtensa_ae_zalign64">, + Intrinsic<[llvm_v8i8_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_bithead: ClangBuiltin<"__builtin_xtensa_rur_ae_bithead">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_bitptr: ClangBuiltin<"__builtin_xtensa_rur_ae_bitptr">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_bitsused: ClangBuiltin<"__builtin_xtensa_rur_ae_bitsused">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_cbegin0: ClangBuiltin<"__builtin_xtensa_rur_ae_cbegin0">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_cend0: ClangBuiltin<"__builtin_xtensa_rur_ae_cend0">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_cw_sd_no: ClangBuiltin<"__builtin_xtensa_rur_ae_cw_sd_no">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_cwrap: ClangBuiltin<"__builtin_xtensa_rur_ae_cwrap">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_first_ts: ClangBuiltin<"__builtin_xtensa_rur_ae_first_ts">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_nextoffset: ClangBuiltin<"__builtin_xtensa_rur_ae_nextoffset">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_overflow: ClangBuiltin<"__builtin_xtensa_rur_ae_overflow">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_ovf_sar: ClangBuiltin<"__builtin_xtensa_rur_ae_ovf_sar">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_sar: ClangBuiltin<"__builtin_xtensa_rur_ae_sar">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_searchdone: ClangBuiltin<"__builtin_xtensa_rur_ae_searchdone">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_tablesize: ClangBuiltin<"__builtin_xtensa_rur_ae_tablesize">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_rur_ae_ts_fts_bu_bp: ClangBuiltin<"__builtin_xtensa_rur_ae_ts_fts_bu_bp">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + +def int_xtensa_wur_ae_bithead: ClangBuiltin<"__builtin_xtensa_wur_ae_bithead">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_bitptr: ClangBuiltin<"__builtin_xtensa_wur_ae_bitptr">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_bitsused: ClangBuiltin<"__builtin_xtensa_wur_ae_bitsused">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_cbegin0: ClangBuiltin<"__builtin_xtensa_wur_ae_cbegin0">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_cend0: ClangBuiltin<"__builtin_xtensa_wur_ae_cend0">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_cw_sd_no: ClangBuiltin<"__builtin_xtensa_wur_ae_cw_sd_no">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_cwrap: ClangBuiltin<"__builtin_xtensa_wur_ae_cwrap">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_first_ts: ClangBuiltin<"__builtin_xtensa_wur_ae_first_ts">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_nextoffset: ClangBuiltin<"__builtin_xtensa_wur_ae_nextoffset">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_overflow: ClangBuiltin<"__builtin_xtensa_wur_ae_overflow">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_ovf_sar: ClangBuiltin<"__builtin_xtensa_wur_ae_ovf_sar">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_sar: ClangBuiltin<"__builtin_xtensa_wur_ae_sar">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_searchdone: ClangBuiltin<"__builtin_xtensa_wur_ae_searchdone">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_tablesize: ClangBuiltin<"__builtin_xtensa_wur_ae_tablesize">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_xtensa_wur_ae_ts_fts_bu_bp: ClangBuiltin<"__builtin_xtensa_wur_ae_ts_fts_bu_bp">, + Intrinsic<[], [llvm_i32_ty], []>; + From a33a0195f1b3d1c4d0081b8ef3f700b54b979c29 Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 10:33:40 +0000 Subject: [PATCH 196/289] [Xtensa] Add HIFI3 register classes --- .../Disassembler/XtensaDisassembler.cpp | 66 +++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.td | 61 +++++++++++++++++ 2 files changed, 127 insertions(+) diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 5d76b2c88fead..2265dd99b609d 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -68,6 +68,38 @@ static const unsigned ARDecoderTable[] = { Xtensa::A6, Xtensa::A7, Xtensa::A8, Xtensa::A9, Xtensa::A10, Xtensa::A11, Xtensa::A12, Xtensa::A13, Xtensa::A14, Xtensa::A15}; +static const unsigned AE_DRDecoderTable[] = { + Xtensa::AED0, Xtensa::AED1, Xtensa::AED2, Xtensa::AED3, + Xtensa::AED4, Xtensa::AED5, Xtensa::AED6, Xtensa::AED7, + Xtensa::AED8, Xtensa::AED9, Xtensa::AED10, Xtensa::AED11, + Xtensa::AED12, Xtensa::AED13, Xtensa::AED14, Xtensa::AED15}; + +static const unsigned AE_VALIGNDecoderTable[] = {Xtensa::U0, Xtensa::U1, + Xtensa::U2, Xtensa::U3}; + + +static DecodeStatus DecodeAE_DRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= std::size(AE_DRDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = AE_DRDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeAE_VALIGNRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= std::size(AE_VALIGNDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = AE_VALIGNDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeARRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, const void *Decoder) { @@ -115,6 +147,40 @@ static const unsigned BRDecoderTable[] = { Xtensa::B6, Xtensa::B7, Xtensa::B8, Xtensa::B9, Xtensa::B10, Xtensa::B11, Xtensa::B12, Xtensa::B13, Xtensa::B14, Xtensa::B15}; +static const unsigned BR2DecoderTable[] = { + Xtensa::B0_B1, Xtensa::B2_B3, Xtensa::B4_B5, Xtensa::B6_B7, + Xtensa::B8_B9, Xtensa::B10_B11, Xtensa::B12_B13, Xtensa::B14_B15}; + + +static const unsigned BR4DecoderTable[] = { + Xtensa::B0_B1_B2_B3, Xtensa::B4_B5_B6_B7, + Xtensa::B8_B9_B10_B11, Xtensa::B12_B13_B14_B15}; + + +static DecodeStatus DecodeXtensaRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder, + ArrayRef DecoderTable) { + if (RegNo >= DecoderTable.size()) + return MCDisassembler::Fail; + + unsigned Reg = DecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeBR2RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + return DecodeXtensaRegisterClass(Inst,RegNo,Address,Decoder,ArrayRef(BR2DecoderTable)); +} + +static DecodeStatus DecodeBR4RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + return DecodeXtensaRegisterClass(Inst,RegNo,Address,Decoder,ArrayRef(BR4DecoderTable)); +} + static DecodeStatus DecodeBRRegisterClass(MCInst &Inst, uint64_t RegNo, uint64_t Address, const void *Decoder) { diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td index 7f2de9814da90..45a3dfddf53be 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.td @@ -313,5 +313,66 @@ B2, B3, B4, B5, B6, B7, B8, B9, B10, B11, B12, B13, B14, B15)> { let Size = 8; } +let Namespace = "Xtensa" in { + def bsub0 : SubRegIndex<1>; + def bsub1 : SubRegIndex<1, 1>; + def bsub2 : SubRegIndex<1, 2>; + def bsub3 : SubRegIndex<1, 3>; + + +let SubRegIndices = [bsub0, bsub1] in { + def B0_B1 : RegisterWithSubRegs<"", [B0, B1]>; + def B2_B3 : RegisterWithSubRegs<"", [B2, B3]>; + def B4_B5 : RegisterWithSubRegs<"", [B4, B5]>; + def B6_B7 : RegisterWithSubRegs<"", [B6, B7]>; + def B8_B9 : RegisterWithSubRegs<"", [B8, B9]>; + def B10_B11 : RegisterWithSubRegs<"", [B10, B11]>; + def B12_B13 : RegisterWithSubRegs<"", [B12, B13]>; + def B14_B15 : RegisterWithSubRegs<"", [B14, B15]>; +} + +let SubRegIndices = [bsub0, bsub1, bsub2, bsub3] in { + def B0_B1_B2_B3 : RegisterWithSubRegs<"", [B0, B1, B2, B3]>; + def B4_B5_B6_B7 : RegisterWithSubRegs<"", [B4, B5, B6, B7]>; + def B8_B9_B10_B11 : RegisterWithSubRegs<"", [B8, B9, B10, B11]>; + def B12_B13_B14_B15 : RegisterWithSubRegs<"", [B12, B13, B14, B15]>; +} + +} + +def BR2 : RegisterClass<"Xtensa", [v2i1], 8, (add B0_B1, B2_B3, B4_B5, + B6_B7, B8_B9, B10_B11, + B12_B13, B14_B15)> { + let Size = 8; +} + +def BR4 : RegisterClass<"Xtensa", [v4i1], 8, (add B0_B1_B2_B3, B4_B5_B6_B7, + B8_B9_B10_B11, B12_B13_B14_B15)> { + let Size = 8; +} +//===----------------------------------------------------------------------===// +// HIFI3 vector registers AE_DR +//===----------------------------------------------------------------------===// +class AEDReg num, string n> : XtensaReg { + let HWEncoding{3-0} = num; +} + +foreach i = 0-15 in { + def AED#i : AEDReg, DwarfRegNum<[-1]>; +} +def AE_DR : RegisterClass<"Xtensa",[v4i16,v2i32,v1i64,v1i32],64, + (sequence "AED%u", 0, 15)>; + +//===----------------------------------------------------------------------===// +// HIFI3 vector alignment registers AE_VALIGN +//===----------------------------------------------------------------------===// +class AEVALIGNReg num, string n> : XtensaReg { + let HWEncoding{1-0} = num; +} + +foreach i = 0-3 in { + def U#i : AEVALIGNReg, DwarfRegNum<[-1]>; +} +def AE_VALIGN : RegisterClass<"Xtensa",[v8i8], 64, (sequence "U%u", 0, 3)>; \ No newline at end of file From 10891bff8238245ba167ba0ea643abce394edb74 Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 10:36:54 +0000 Subject: [PATCH 197/289] [Xtensa] Add HIFI3 target feature --- clang/lib/Basic/Targets/Xtensa.cpp | 3 +++ clang/lib/Basic/Targets/Xtensa.h | 1 + llvm/include/llvm/TargetParser/XtensaTargetParser.def | 4 +++- llvm/include/llvm/TargetParser/XtensaTargetParser.h | 3 ++- llvm/lib/Target/Xtensa/Xtensa.td | 7 ++++++- llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 1 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 5 +++++ 7 files changed, 21 insertions(+), 3 deletions(-) diff --git a/clang/lib/Basic/Targets/Xtensa.cpp b/clang/lib/Basic/Targets/Xtensa.cpp index 3c00be659c092..cded885966c89 100644 --- a/clang/lib/Basic/Targets/Xtensa.cpp +++ b/clang/lib/Basic/Targets/Xtensa.cpp @@ -76,6 +76,7 @@ bool XtensaTargetInfo::hasFeature(StringRef Feature) const { .Case("fp", HasFP) .Case("windowed", HasWindowed) .Case("bool", HasBoolean) + .Case("hifi3", HasHIFI3) .Default(false); } @@ -89,6 +90,8 @@ bool XtensaTargetInfo::handleTargetFeatures(std::vector &Features, HasBoolean = true; else if (Feature == "+windowed") HasWindowed = true; + else if (Feature == "+hifi3") + HasHIFI3 = true; } return true; diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index c969f182c63d6..b2c923b2cd24a 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -34,6 +34,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { bool HasFP = false; bool HasWindowed = false; bool HasBoolean = false; + bool HasHIFI3 = false; public: XtensaTargetInfo(const llvm::Triple &Triple, const TargetOptions &) diff --git a/llvm/include/llvm/TargetParser/XtensaTargetParser.def b/llvm/include/llvm/TargetParser/XtensaTargetParser.def index 3fe9b2760b773..edc178ac559f5 100644 --- a/llvm/include/llvm/TargetParser/XtensaTargetParser.def +++ b/llvm/include/llvm/TargetParser/XtensaTargetParser.def @@ -45,6 +45,7 @@ XTENSA_FEATURE(FK_REGPROTECT, "regprotect") XTENSA_FEATURE(FK_MISCSR, "miscsr") XTENSA_FEATURE(FK_ESP32S2OPS, "esp32s2") XTENSA_FEATURE(FK_ESP32S3OPS, "esp32s3") +XTENSA_FEATURE(FK_HIFI3, "hifi3") #undef XTENSA_FEATURE @@ -77,7 +78,8 @@ XTENSA_CPU(CNL, {"cnl"}, (FK_DENSITY | FK_FP | FK_LOOP | FK_MAC16 | FK_WINDOWED FK_SEXT | FK_NSA | FK_MUL32 | FK_MUL32HIGH | FK_S32C1I | FK_THREADPTR | FK_DIV32 | FK_ATOMCTL | FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | - FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR)) + FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | + FK_HIFI3)) #undef XTENSA_CPU diff --git a/llvm/include/llvm/TargetParser/XtensaTargetParser.h b/llvm/include/llvm/TargetParser/XtensaTargetParser.h index d4e639005a5a2..8e1c55c6f9e21 100644 --- a/llvm/include/llvm/TargetParser/XtensaTargetParser.h +++ b/llvm/include/llvm/TargetParser/XtensaTargetParser.h @@ -59,7 +59,8 @@ enum FeatureKind : uint64_t { FK_REGPROTECT = 1 << 28, FK_MISCSR = 1 << 29, FK_ESP32S2OPS = 1 << 30, - FK_ESP32S3OPS = 1ULL << 31 + FK_ESP32S3OPS = 1ULL << 31, + FK_HIFI3 = 1ULL << 32 }; CPUKind parseCPUKind(StringRef CPU); diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 2171433b0fba0..2204fff9a7e61 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -178,6 +178,11 @@ def FeatureESP32S3Ops : SubtargetFeature<"esp32s3", "HasESP32S3Ops", "tru def HasESP32S3Ops : Predicate<"Subtarget->hasESP32S3Ops()">, AssemblerPredicate<(all_of FeatureESP32S3Ops)>; +def FeatureHIFI3 : SubtargetFeature<"hifi3", "HasHIFI3", "true", + "Enable Xtensa HIFI3 instructions">; +def HasHIFI3 : Predicate<"Subtarget->hasHIFI3()">, + AssemblerPredicate<(all_of FeatureHIFI3)>; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// @@ -211,7 +216,7 @@ def : Proc<"cnl", [FeatureDensity, FeatureSingleFloat, FeatureLoop, FeatureWind FeatureTHREADPTR, FeatureDiv32, FeatureATOMCTL, FeatureMEMCTL, FeatureDebug, FeatureException, FeatureHighPriInterrupts, FeatureCoprocessor, FeatureInterrupt, FeatureRelocatableVector, FeatureTimerInt, FeaturePRID, - FeatureRegionProtection, FeatureMiscSR]>; + FeatureRegionProtection, FeatureMiscSR, FeatureHIFI3]>; //===----------------------------------------------------------------------===// // Register File Description diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 64d61b7bbd83d..8544ee0352d03 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -75,6 +75,7 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasMiscSR = false; HasESP32S2Ops = false; HasESP32S3Ops = false; + HasHIFI3 = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index 32773eb6a23ae..c6e054399be26 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -132,6 +132,9 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa esp32-s3 ISA extension bool HasESP32S3Ops; + // Enable Xtensa HIFI3 Extension + bool HasHIFI3; + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -223,6 +226,8 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool useTextSectionLiterals() const; + bool hasHIFI3() const { return HasHIFI3; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; From 064fbbced29fef3858b21ad9294db8aed1285670 Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Mon, 13 Nov 2023 12:29:24 +0000 Subject: [PATCH 198/289] [Xtensa] Add HIFI3 instructions --- .../Xtensa/AsmParser/XtensaAsmParser.cpp | 4 + .../Xtensa/MCTargetDesc/XtensaInstPrinter.h | 14 + .../Target/Xtensa/XtensaHIFIInstrFormats.td | 48 + llvm/lib/Target/Xtensa/XtensaHIFIInstrInfo.td | 34720 ++++++++++++++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 5 + llvm/lib/Target/Xtensa/XtensaOperands.td | 28 + 6 files changed, 34819 insertions(+) create mode 100644 llvm/lib/Target/Xtensa/XtensaHIFIInstrFormats.td create mode 100644 llvm/lib/Target/Xtensa/XtensaHIFIInstrInfo.td diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 9c79b18965ee6..11a7c8576fc8f 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -252,6 +252,10 @@ struct XtensaOperand : public MCParsedAsmOperand { bool isImm() const override { return Kind == Immediate; } bool isMem() const override { return false; } + template bool isImmInRange() const { + return Kind == Immediate && inRange(getImm(), Lo, Hi); + } + bool isImm(int64_t MinValue, int64_t MaxValue) const { return Kind == Immediate && inRange(getImm(), MinValue, MaxValue); } diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index 9224d0a98c14b..756554bcf09b9 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -16,7 +16,9 @@ #define LLVM_LIB_TARGET_XTENSA_MCTARGETDESC_XTENSAINSTPRINTER_H #include "llvm/MC/MCInstPrinter.h" +#include "llvm/MC/MCInst.h" #include "llvm/Support/Compiler.h" +#include "llvm/Support/raw_ostream.h" namespace llvm { class MCOperand; @@ -82,6 +84,18 @@ class XtensaInstPrinter : public MCInstPrinter { void printOffset_128_2_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset_128_1_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printOffset_64_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + + template + void printImmOperand(const MCInst *MI, int OpNum, raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= lo && Value <= hi && ((Value % step) == 0)) && + "Invalid argument"); + O << Value; + } else { + printOperand(MI, OpNum, O); + } + } }; } // end namespace llvm diff --git a/llvm/lib/Target/Xtensa/XtensaHIFIInstrFormats.td b/llvm/lib/Target/Xtensa/XtensaHIFIInstrFormats.td new file mode 100644 index 0000000000000..f06847a0da7dd --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaHIFIInstrFormats.td @@ -0,0 +1,48 @@ +//===- XtensaHIFIInstrFormats.td - Instruction formats for Xtensa HIFI -*- tablegen -*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains Tablegen instruction formats for Xtensa HIFI extension +// +//===----------------------------------------------------------------------===// + +// Base class for Xtensa 64 bit Format +class XtensaInst64 pattern, + InstrItinClass itin = NoItinerary> + : XtensaInst<8, outs, ins, asmstr, pattern, itin> +{ + bits<64> Inst = 0; + bits<64> SoftFail = 0; + let DecoderNamespace = "HIFI3"; +} + +class XtensaInst48 pattern, + InstrItinClass itin = NoItinerary> + : XtensaInst<6, outs, ins, asmstr, pattern, itin> +{ + bits<48> Inst = 0; + bits<48> SoftFail = 0; + let DecoderNamespace = "HIFI3"; +} + +class XtensaInst88 pattern, + InstrItinClass itin = NoItinerary> + : XtensaInst<11, outs, ins, asmstr, pattern, itin> +{ + bits<88> Inst = 0; + bits<88> SoftFail = 0; + let DecoderNamespace = "HIFI3"; +} + +class XtensaAEInst24 pattern, + InstrItinClass itin = NoItinerary> + : XtensaInst24 { + let DecoderNamespace = "HIFI3"; + let Inst = 0; +} \ No newline at end of file diff --git a/llvm/lib/Target/Xtensa/XtensaHIFIInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaHIFIInstrInfo.td new file mode 100644 index 0000000000000..de373ede0efe1 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaHIFIInstrInfo.td @@ -0,0 +1,34720 @@ +//===- XtensaHIFIInstrInfo.td - Instruction definitions for Xtensa HIFI -*- tablegen -*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains Tablegen instruction definitiona for Xtensa HIFI extension +// +//===----------------------------------------------------------------------===// + +class AE_ABS16S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs16s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ABS16S : AE_ABS16S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs16s AE_DR:$ae_arth_v1))]>; + +class AE_ABS24S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs24s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ABS24S : AE_ABS24S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs24s AE_DR:$ae_arth_v1))]>; + +class AE_ABS32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs32 $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{25} = 1; +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ABS32 : AE_ABS32_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs32 AE_DR:$ae_arth_v1))]>; + +class AE_ABS32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs32s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ABS32S : AE_ABS32S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs32s AE_DR:$ae_arth_v1))]>; + +class AE_ABS64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs64 $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ABS64 : AE_ABS64_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs64 AE_DR:$ae_arth_v1))]>; + +class AE_ABS64S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_abs64s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{24} = 1; +let Inst{26} = 1; +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ABS64S : AE_ABS64S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_abs64s AE_DR:$ae_arth_v1))]>; + +class AE_ADD16_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add16 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{50} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ADD16 : AE_ADD16_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add16 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD16S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add16s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ADD16S : AE_ADD16S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add16s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD24S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add24s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ADD24S : AE_ADD24S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add24s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add32 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ADD32 : AE_ADD32_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add32 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD32_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add32_hl_lh $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ADD32_HL_LH : AE_ADD32_HL_LH_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add32_hl_lh AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add32s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ADD32S : AE_ADD32S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add32s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add64 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ADD64 : AE_ADD64_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add64 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADD64S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_add64s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ADD64S : AE_ADD64S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_add64s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADDBRBA32_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$arr), (ins AR:$art, AR:$ars), "ae_addbrba32 $arr, $art, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> art; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{20} = arr{0}; +let Inst{21} = arr{1}; +let Inst{22} = arr{2}; +let Inst{23} = arr{3}; +let Inst{24} = art{0}; +let Inst{25} = art{1}; +let Inst{26} = art{2}; +let Inst{27} = art{3}; +let Inst{16} = ars{0}; +let Inst{17} = ars{1}; +let Inst{18} = ars{2}; +let Inst{19} = ars{3}; +} + + + +def AE_ADDBRBA32 : AE_ADDBRBA32_AE_FORMAT48<[(set AR:$arr, (int_xtensa_ae_addbrba32 AR:$art, AR:$ars))]>; + +class AE_ADDSUB32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_addsub32 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ADDSUB32 : AE_ADDSUB32_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_addsub32 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ADDSUB32S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_addsub32s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_ADDSUB32S : AE_ADDSUB32S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_addsub32s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_AND_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1), "ae_and $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +let Inst{4} = ae_dr_to_dr_v1{0}; +let Inst{5} = ae_dr_to_dr_v1{1}; +let Inst{6} = ae_dr_to_dr_v1{2}; +let Inst{7} = ae_dr_to_dr_v1{3}; +} + + + +def AE_AND : AE_AND_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_and AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1))]>; + +class AE_CVT32X2F16_10_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_cvt32x2f16.10 $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_to_dr_v{0}; +let Inst{13} = ae_to_dr_v{1}; +let Inst{14} = ae_to_dr_v{2}; +let Inst{15} = ae_to_dr_v{3}; +let Inst{8} = ae_to_dr_v0{0}; +let Inst{9} = ae_to_dr_v0{1}; +let Inst{10} = ae_to_dr_v0{2}; +let Inst{11} = ae_to_dr_v0{3}; +} + + + +def AE_CVT32X2F16_10 : AE_CVT32X2F16_10_AE_FORMAT<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_cvt32x2f16_10 AE_DR:$ae_to_dr_v0))]>; + +class AE_CVT32X2F16_32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_cvt32x2f16.32 $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_to_dr_v{0}; +let Inst{13} = ae_to_dr_v{1}; +let Inst{14} = ae_to_dr_v{2}; +let Inst{15} = ae_to_dr_v{3}; +let Inst{8} = ae_to_dr_v0{0}; +let Inst{9} = ae_to_dr_v0{1}; +let Inst{10} = ae_to_dr_v0{2}; +let Inst{11} = ae_to_dr_v0{3}; +} + + + +def AE_CVT32X2F16_32 : AE_CVT32X2F16_32_AE_FORMAT<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_cvt32x2f16_32 AE_DR:$ae_to_dr_v0))]>; + +class AE_CVT48A32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars), "ae_cvt48a32 $ae_ar_to_dr_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_CVT48A32 : AE_CVT48A32_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_cvt48a32 AR:$ars))]>; + +class AE_CVT64A32_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars), "ae_cvt64a32 $ae_ar_to_dr_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{25} = 1; +let Inst{27} = 1; +let Inst{38} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{20} = ae_ar_to_dr_v{0}; +let Inst{21} = ae_ar_to_dr_v{1}; +let Inst{22} = ae_ar_to_dr_v{2}; +let Inst{23} = ae_ar_to_dr_v{3}; +let Inst{16} = ars{0}; +let Inst{17} = ars{1}; +let Inst{18} = ars{2}; +let Inst{19} = ars{3}; +} + + + +def AE_CVT64A32 : AE_CVT64A32_AE_FORMAT48<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_cvt64a32 AR:$ars))]>; + +class AE_CVT64F32_H_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0), "ae_cvt64f32.h $ae_dr_to_dr_v, $ae_dr_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{28} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_dr_to_dr_v{0}; +let Inst{21} = ae_dr_to_dr_v{1}; +let Inst{22} = ae_dr_to_dr_v{2}; +let Inst{23} = ae_dr_to_dr_v{3}; +let Inst{36} = ae_dr_to_dr_v0{0}; +let Inst{37} = ae_dr_to_dr_v0{1}; +let Inst{38} = ae_dr_to_dr_v0{2}; +let Inst{39} = ae_dr_to_dr_v0{3}; +} + + + +def AE_CVT64F32_H : AE_CVT64F32_H_AE_FORMAT1<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_cvt64f32_h AE_DR:$ae_dr_to_dr_v0))]>; + +class AE_CVTA32F24S_H_AE_FORMAT pattern> + : XtensaInst64<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_cvta32f24s.h $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{5} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_CVTA32F24S_H : AE_CVTA32F24S_H_AE_FORMAT<[(set AR:$arr, (int_xtensa_ae_cvta32f24s_h AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_CVTA32F24S_L_AE_FORMAT pattern> + : XtensaInst64<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_cvta32f24s.l $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_CVTA32F24S_L : AE_CVTA32F24S_L_AE_FORMAT<[(set AR:$arr, (int_xtensa_ae_cvta32f24s_l AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_CVTQ56A32S_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars), "ae_cvtq56a32s $ae_ar_to_dr_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{24} = 1; +let Inst{25} = 1; +let Inst{27} = 1; +let Inst{38} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{20} = ae_ar_to_dr_v{0}; +let Inst{21} = ae_ar_to_dr_v{1}; +let Inst{22} = ae_ar_to_dr_v{2}; +let Inst{23} = ae_ar_to_dr_v{3}; +let Inst{16} = ars{0}; +let Inst{17} = ars{1}; +let Inst{18} = ars{2}; +let Inst{19} = ars{3}; +} + + + +def AE_CVTQ56A32S : AE_CVTQ56A32S_AE_FORMAT48<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_cvtq56a32s AR:$ars))]>; + +class AE_CVTQ56P32S_H_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0), "ae_cvtq56p32s.h $ae_dr_to_dr_v, $ae_dr_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +} + + + +def AE_CVTQ56P32S_H : AE_CVTQ56P32S_H_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_cvtq56p32s_h AE_DR:$ae_dr_to_dr_v0))]>; + +class AE_CVTQ56P32S_L_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0), "ae_cvtq56p32s.l $ae_dr_to_dr_v, $ae_dr_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +} + + + +def AE_CVTQ56P32S_L : AE_CVTQ56P32S_L_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_cvtq56p32s_l AE_DR:$ae_dr_to_dr_v0))]>; + +class AE_DB_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_db $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{14} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_DB : AE_DB_X24<[(set AR:$ars_out, (int_xtensa_ae_db AR:$ars, AR:$art))]>; + +class AE_DB_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_db.ic $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_DB_IC : AE_DB_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_db_ic AR:$ars, AR:$art))]>; + +class AE_DB_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_db.ip $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_DB_IP : AE_DB_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_db_ip AR:$ars, AR:$art))]>; + +class AE_DBI_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, imm1_16:$ae_ohba), "ae_dbi $ars, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> ae_ohba; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{14} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_DBI : AE_DBI_X24<[(set AR:$ars_out, (int_xtensa_ae_dbi AR:$ars, timm:$ae_ohba))]>; + +class AE_DBI_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, imm1_16:$ae_ohba), "ae_dbi.ic $ars, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> ae_ohba; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_DBI_IC : AE_DBI_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_dbi_ic AR:$ars, timm:$ae_ohba))]>; + +class AE_DBI_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, imm1_16:$ae_ohba), "ae_dbi.ip $ars, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> ae_ohba; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_DBI_IP : AE_DBI_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_dbi_ip AR:$ars, timm:$ae_ohba))]>; + +class AE_DIV64D32_H_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v_out), (ins AE_DR:$ae_arth_v, AE_DR:$ae_arth_v1), "ae_div64d32.h $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; +let Constraints = "$ae_arth_v = $ae_arth_v_out,@earlyclobber $ae_arth_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_DIV64D32_H : AE_DIV64D32_H_AE_FORMAT1<[(set AE_DR:$ae_arth_v_out, (int_xtensa_ae_div64d32_h AE_DR:$ae_arth_v, AE_DR:$ae_arth_v1))]>; + +class AE_DIV64D32_L_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v_out), (ins AE_DR:$ae_arth_v, AE_DR:$ae_arth_v1), "ae_div64d32.l $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; +let Constraints = "$ae_arth_v = $ae_arth_v_out,@earlyclobber $ae_arth_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_DIV64D32_L : AE_DIV64D32_L_X24<[(set AE_DR:$ae_arth_v_out, (int_xtensa_ae_div64d32_l AE_DR:$ae_arth_v, AE_DR:$ae_arth_v1))]>; + +class AE_EQ16_AE_FORMAT1 pattern> + : XtensaInst64<(outs BR4:$br4), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_eq16 $br4, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> br4; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{20} = 1; +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{62} = 1; +//operands +let Inst{22} = br4{0}; +let Inst{23} = br4{1}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_EQ16 : AE_EQ16_AE_FORMAT1<[(set BR4:$br4, (int_xtensa_ae_eq16 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_EQ32_X24 pattern> + : XtensaAEInst24<(outs BR2:$br2), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_eq32 $br2, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<3> br2; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{13} = br2{0}; +let Inst{14} = br2{1}; +let Inst{15} = br2{2}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_EQ32 : AE_EQ32_X24<[(set BR2:$br2, (int_xtensa_ae_eq32 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_EQ64_AE_FORMAT1 pattern> + : XtensaInst64<(outs BR:$br), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_eq64 $br, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = br{0}; +let Inst{21} = br{1}; +let Inst{22} = br{2}; +let Inst{23} = br{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_EQ64 : AE_EQ64_AE_FORMAT1<[(set BR:$br, (int_xtensa_ae_eq64 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_L16_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm16n_14:$ae_immls16), "ae_l16.i $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_L16_I : AE_L16_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16_i AR:$ars, timm:$ae_immls16))]>; + +class AE_L16_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm16n_14:$ae_immls16), "ae_l16.ip $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_L16_IP : AE_L16_IP_X24<[]>; + +class AE_L16_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l16.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16_X : AE_L16_X_AE_FORMAT48<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16_x AR:$ars, AR:$art))]>; + +class AE_L16_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16_XC : AE_L16_XC_AE_FORMAT48<[]>; + +class AE_L16_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16_XP : AE_L16_XP_AE_FORMAT48<[]>; + +class AE_L16M_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm16n_14:$ae_immls16), "ae_l16m.i $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_L16M_I : AE_L16M_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16m_i AR:$ars, timm:$ae_immls16))]>; + +class AE_L16M_IU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm16n_14:$ae_immls16), "ae_l16m.iu $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_L16M_IU : AE_L16M_IU_X24<[]>; + +class AE_L16M_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l16m.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16M_X : AE_L16M_X_AE_FORMAT48<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16m_x AR:$ars, AR:$art))]>; + +class AE_L16M_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16m.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16M_XC : AE_L16M_XC_AE_FORMAT48<[]>; + +class AE_L16M_XU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16m.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16M_XU : AE_L16M_XU_X24<[]>; + +class AE_L16X2M_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l16x2m.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L16X2M_I : AE_L16X2M_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16x2m_i AR:$ars, timm:$ae_immls32))]>; + +class AE_L16X2M_IU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l16x2m.iu $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L16X2M_IU : AE_L16X2M_IU_X24<[]>; + +class AE_L16X2M_X_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l16x2m.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X2M_X : AE_L16X2M_X_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16x2m_x AR:$ars, AR:$art))]>; + +class AE_L16X2M_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16x2m.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X2M_XC : AE_L16X2M_XC_AE_FORMAT48<[]>; + +class AE_L16X2M_XU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16x2m.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X2M_XU : AE_L16X2M_XU_X24<[]>; + +class AE_L16X4_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_l16x4.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_L16X4_I : AE_L16X4_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16x4_i AR:$ars, timm:$ae_immls64))]>; + +class AE_L16X4_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm0_56:$ae_immls64pos), "ae_l16x4.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_L16X4_IP : AE_L16X4_IP_X24<[]>; + +class AE_L16X4_RIC_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l16x4.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{5} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L16X4_RIC : AE_L16X4_RIC_AE_FORMAT<[]>; + +class AE_L16X4_RIP_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l16x4.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L16X4_RIP : AE_L16X4_RIP_AE_FORMAT<[]>; + +class AE_L16X4_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l16x4.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X4_X : AE_L16X4_X_AE_FORMAT48<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l16x4_x AR:$ars, AR:$art))]>; + +class AE_L16X4_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16x4.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X4_XC : AE_L16X4_XC_AE_FORMAT48<[]>; + +class AE_L16X4_XP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l16x4.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L16X4_XP : AE_L16X4_XP_X24<[]>; + +class AE_L32_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32_I : AE_L32_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32_i AR:$ars, timm:$ae_immls32))]>; + +class AE_L32_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32.ip $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32_IP : AE_L32_IP_X24<[]>; + +class AE_L32_X_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l32.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32_X : AE_L32_X_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32_x AR:$ars, AR:$art))]>; + +class AE_L32_XC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32_XC : AE_L32_XC_X24<[]>; + +class AE_L32_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32_XP : AE_L32_XP_AE_FORMAT48<[]>; + +class AE_L32F24_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32f24.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32F24_I : AE_L32F24_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32f24_i AR:$ars, timm:$ae_immls32))]>; + +class AE_L32F24_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32f24.ip $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32F24_IP : AE_L32F24_IP_X24<[]>; + +class AE_L32F24_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l32f24.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32F24_X : AE_L32F24_X_AE_FORMAT48<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32f24_x AR:$ars, AR:$art))]>; + +class AE_L32F24_XC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32f24.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32F24_XC : AE_L32F24_XC_X24<[]>; + +class AE_L32F24_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32f24.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{31} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32F24_XP : AE_L32F24_XP_AE_FORMAT48<[]>; + +class AE_L32M_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32m.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32M_I : AE_L32M_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32m_i AR:$ars, timm:$ae_immls32))]>; + +class AE_L32M_IU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm32n_28:$ae_immls32), "ae_l32m.iu $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_L32M_IU : AE_L32M_IU_X24<[]>; + +class AE_L32M_X_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l32m.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32M_X : AE_L32M_X_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32m_x AR:$ars, AR:$art))]>; + +class AE_L32M_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32m.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32M_XC : AE_L32M_XC_AE_FORMAT48<[]>; + +class AE_L32M_XU_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32m.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32M_XU : AE_L32M_XU_X24<[]>; + +class AE_L32X2_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_l32x2.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_L32X2_I : AE_L32X2_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32x2_i AR:$ars, timm:$ae_immls64))]>; + +class AE_L32X2_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm0_56:$ae_immls64pos), "ae_l32x2.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_L32X2_IP : AE_L32X2_IP_X24<[]>; + +class AE_L32X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l32x2.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L32X2_RIC : AE_L32X2_RIC_AE_FORMAT48<[]>; + +class AE_L32X2_RIP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l32x2.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L32X2_RIP : AE_L32X2_RIP_X24<[]>; + +class AE_L32X2_X_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l32x2.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2_X : AE_L32X2_X_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32x2_x AR:$ars, AR:$art))]>; + +class AE_L32X2_XC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32x2.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2_XC : AE_L32X2_XC_X24<[]>; + +class AE_L32X2_XP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32x2.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2_XP : AE_L32X2_XP_X24<[]>; + +class AE_L32X2F24_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_l32x2f24.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_L32X2F24_I : AE_L32X2F24_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32x2f24_i AR:$ars, timm:$ae_immls64))]>; + +class AE_L32X2F24_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm0_56:$ae_immls64pos), "ae_l32x2f24.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_L32X2F24_IP : AE_L32X2F24_IP_X24<[]>; + +class AE_L32X2F24_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l32x2f24.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L32X2F24_RIC : AE_L32X2F24_RIC_AE_FORMAT48<[]>; + +class AE_L32X2F24_RIP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars), "ae_l32x2f24.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_L32X2F24_RIP : AE_L32X2F24_RIP_X24<[]>; + +class AE_L32X2F24_X_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l32x2f24.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2F24_X : AE_L32X2F24_X_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l32x2f24_x AR:$ars, AR:$art))]>; + +class AE_L32X2F24_XC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32x2f24.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2F24_XC : AE_L32X2F24_XC_X24<[]>; + +class AE_L32X2F24_XP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l32x2f24.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L32X2F24_XP : AE_L32X2F24_XP_X24<[]>; + +class AE_L64_I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_l64.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_L64_I : AE_L64_I_X24<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l64_i AR:$ars, timm:$ae_immls64))]>; + +class AE_L64_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_l64.ip $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_L64_IP : AE_L64_IP_X24<[]>; + +class AE_L64_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v), (ins AR:$ars, AR:$art), "ae_l64.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L64_X : AE_L64_X_AE_FORMAT48<[(set AE_DR:$ae_ls_v, (int_xtensa_ae_l64_x AR:$ars, AR:$art))]>; + +class AE_L64_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l64.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L64_XC : AE_L64_XC_AE_FORMAT48<[]>; + +class AE_L64_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_v, AR:$ars_out), (ins AR:$ars, AR:$art), "ae_l64.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_L64_XP : AE_L64_XP_AE_FORMAT48<[]>; + +class AE_LA16X4_IC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la16x4.ic $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4_IC : AE_LA16X4_IC_X24<[]>; + +class AE_LA16X4_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la16x4.ip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4_IP : AE_LA16X4_IP_X24<[]>; + +class AE_LA16X4_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la16x4.ric $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4_RIC : AE_LA16X4_RIC_AE_FORMAT48<[]>; + +class AE_LA16X4_RIP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la16x4.rip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4_RIP : AE_LA16X4_RIP_X24<[]>; + +class AE_LA16X4NEG_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la16x4neg.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4NEG_PC : AE_LA16X4NEG_PC_AE_FORMAT48<[]>; + +class AE_LA16X4POS_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la16x4pos.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA16X4POS_PC : AE_LA16X4POS_PC_AE_FORMAT48<[]>; + +class AE_LA24_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24.ic $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24_IC : AE_LA24_IC_AE_FORMAT48<[]>; + +class AE_LA24_IP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24.ip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24_IP : AE_LA24_IP_AE_FORMAT48<[]>; + +class AE_LA24_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24.ric $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24_RIC : AE_LA24_RIC_AE_FORMAT48<[]>; + +class AE_LA24_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24.rip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24_RIP : AE_LA24_RIP_AE_FORMAT48<[]>; + +class AE_LA24NEG_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la24neg.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24NEG_PC : AE_LA24NEG_PC_AE_FORMAT48<[]>; + +class AE_LA24POS_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la24pos.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24POS_PC : AE_LA24POS_PC_AE_FORMAT48<[]>; + +class AE_LA24X2_IC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24x2.ic $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2_IC : AE_LA24X2_IC_X24<[]>; + +class AE_LA24X2_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24x2.ip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2_IP : AE_LA24X2_IP_X24<[]>; + +class AE_LA24X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24x2.ric $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2_RIC : AE_LA24X2_RIC_AE_FORMAT48<[]>; + +class AE_LA24X2_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la24x2.rip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2_RIP : AE_LA24X2_RIP_AE_FORMAT48<[]>; + +class AE_LA24X2NEG_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la24x2neg.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2NEG_PC : AE_LA24X2NEG_PC_AE_FORMAT48<[]>; + +class AE_LA24X2POS_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la24x2pos.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA24X2POS_PC : AE_LA24X2POS_PC_AE_FORMAT48<[]>; + +class AE_LA32X2_IC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2.ic $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2_IC : AE_LA32X2_IC_X24<[]>; + +class AE_LA32X2_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2.ip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2_IP : AE_LA32X2_IP_X24<[]>; + +class AE_LA32X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2.ric $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2_RIC : AE_LA32X2_RIC_AE_FORMAT48<[]>; + +class AE_LA32X2_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2.rip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2_RIP : AE_LA32X2_RIP_AE_FORMAT48<[]>; + +class AE_LA32X2F24_IC_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2f24.ic $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2F24_IC : AE_LA32X2F24_IC_X24<[]>; + +class AE_LA32X2F24_IP_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2f24.ip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2F24_IP : AE_LA32X2F24_IP_X24<[]>; + +class AE_LA32X2F24_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2f24.ric $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2F24_RIC : AE_LA32X2F24_RIC_AE_FORMAT48<[]>; + +class AE_LA32X2F24_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_DR:$ae_ls_av, AE_VALIGN:$ae_ls_uu_out, AR:$ars_out), (ins AE_VALIGN:$ae_ls_uu, AR:$ars), "ae_la32x2f24.rip $ae_ls_av, $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_av; +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ae_ls_uu = $ae_ls_uu_out,@earlyclobber $ae_ls_uu_out, $ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_av{0}; +let Inst{13} = ae_ls_av{1}; +let Inst{14} = ae_ls_av{2}; +let Inst{15} = ae_ls_av{3}; +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2F24_RIP : AE_LA32X2F24_RIP_AE_FORMAT48<[]>; + +class AE_LA32X2NEG_PC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la32x2neg.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2NEG_PC : AE_LA32X2NEG_PC_AE_FORMAT48<[]>; + +class AE_LA32X2POS_PC_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_uu, AR:$ars_out), (ins AR:$ars), "ae_la32x2pos.pc $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{12} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA32X2POS_PC : AE_LA32X2POS_PC_X24<[]>; + +class AE_LA64_PP_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_uu), (ins AR:$ars), "ae_la64.pp $ae_ls_uu, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{13} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = ae_ls_uu{0}; +let Inst{5} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_LA64_PP : AE_LA64_PP_X24<[(set AE_VALIGN:$ae_ls_uu, (int_xtensa_ae_la64_pp AR:$ars))]>; + +class AE_LALIGN64_I_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_uu), (ins AR:$ars, imm64n_56:$ae_immls64), "ae_lalign64.i $ae_ls_uu, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_uu; +bits<4> ars; +bits<4> ae_immls64; + +let mayLoad = 1; +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_uu{0}; +let Inst{7} = ae_ls_uu{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{28} = ae_immls64{2}; +let Inst{29} = ae_immls64{3}; +} + + + +def AE_LALIGN64_I : AE_LALIGN64_I_AE_FORMAT48<[(set AE_VALIGN:$ae_ls_uu, (int_xtensa_ae_lalign64_i AR:$ars, timm:$ae_immls64))]>; + +class AE_LB_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AR:$art), "ae_lb $arr, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_LB : AE_LB_X24<[(set AR:$arr, (int_xtensa_ae_lb AR:$art))]>; + +class AE_LBI_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins imm1_16:$ae_ohba), "ae_lbi $arr, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_ohba; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_LBI : AE_LBI_X24<[(set AR:$arr, (int_xtensa_ae_lbi timm:$ae_ohba))]>; + +class AE_LBK_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AR:$ars, AR:$art), "ae_lbk $arr, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ars; +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_LBK : AE_LBK_X24<[(set AR:$arr, (int_xtensa_ae_lbk AR:$ars, AR:$art))]>; + +class AE_LBKI_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AR:$ars, imm1_16:$ae_ohba), "ae_lbki $arr, $ars, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ars; +bits<4> ae_ohba; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_LBKI : AE_LBKI_X24<[(set AR:$arr, (int_xtensa_ae_lbki AR:$ars, timm:$ae_ohba))]>; + +class AE_LBS_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AR:$art), "ae_lbs $arr, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_LBS : AE_LBS_X24<[(set AR:$arr, (int_xtensa_ae_lbs AR:$art))]>; + +class AE_LBSI_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins imm1_16:$ae_ohba), "ae_lbsi $arr, $ae_ohba", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_ohba; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{4} = ae_ohba{0}; +let Inst{5} = ae_ohba{1}; +let Inst{6} = ae_ohba{2}; +let Inst{7} = ae_ohba{3}; +} + + + +def AE_LBSI : AE_LBSI_X24<[(set AR:$arr, (int_xtensa_ae_lbsi timm:$ae_ohba))]>; + +class AE_LE16_AE_FORMAT1 pattern> + : XtensaInst64<(outs BR4:$br4), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_le16 $br4, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> br4; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{62} = 1; +//operands +let Inst{22} = br4{0}; +let Inst{23} = br4{1}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_LE16 : AE_LE16_AE_FORMAT1<[(set BR4:$br4, (int_xtensa_ae_le16 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_LE32_AE_FORMAT1 pattern> + : XtensaInst64<(outs BR2:$br2), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_le32 $br2, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<3> br2; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{20} = 1; +let Inst{57} = 1; +let Inst{62} = 1; +//operands +let Inst{21} = br2{0}; +let Inst{22} = br2{1}; +let Inst{23} = br2{2}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_LE32 : AE_LE32_AE_FORMAT1<[(set BR2:$br2, (int_xtensa_ae_le32 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_LE64_X24 pattern> + : XtensaAEInst24<(outs BR:$br), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_le64 $br, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_LE64 : AE_LE64_X24<[(set BR:$br, (int_xtensa_ae_le64 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_LT16_AE_FORMAT1 pattern> + : XtensaInst64<(outs BR4:$br4), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_lt16 $br4, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> br4; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{22} = br4{0}; +let Inst{23} = br4{1}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_LT16 : AE_LT16_AE_FORMAT1<[(set BR4:$br4, (int_xtensa_ae_lt16 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_LT32_X24 pattern> + : XtensaAEInst24<(outs BR2:$br2), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_lt32 $br2, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<3> br2; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{13} = br2{0}; +let Inst{14} = br2{1}; +let Inst{15} = br2{2}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_LT32 : AE_LT32_X24<[(set BR2:$br2, (int_xtensa_ae_lt32 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_LT64_X24 pattern> + : XtensaAEInst24<(outs BR:$br), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_lt64 $br, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_LT64 : AE_LT64_X24<[(set BR:$br, (int_xtensa_ae_lt64 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MAX32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_max32 $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_cmpp_v{0}; +let Inst{13} = ae_cmpp_v{1}; +let Inst{14} = ae_cmpp_v{2}; +let Inst{15} = ae_cmpp_v{3}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_MAX32 : AE_MAX32_X24<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_max32 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MAX64_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_max64 $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MAX64 : AE_MAX64_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_max64 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MAXABS32S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_maxabs32s $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MAXABS32S : AE_MAXABS32S_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_maxabs32s AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MAXABS64S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_maxabs64s $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MAXABS64S : AE_MAXABS64S_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_maxabs64s AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MIN32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_min32 $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_cmpp_v{0}; +let Inst{13} = ae_cmpp_v{1}; +let Inst{14} = ae_cmpp_v{2}; +let Inst{15} = ae_cmpp_v{3}; +let Inst{8} = ae_cmpp_v0{0}; +let Inst{9} = ae_cmpp_v0{1}; +let Inst{10} = ae_cmpp_v0{2}; +let Inst{11} = ae_cmpp_v0{3}; +let Inst{4} = ae_cmpp_v1{0}; +let Inst{5} = ae_cmpp_v1{1}; +let Inst{6} = ae_cmpp_v1{2}; +let Inst{7} = ae_cmpp_v1{3}; +} + + + +def AE_MIN32 : AE_MIN32_X24<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_min32 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MIN64_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_min64 $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MIN64 : AE_MIN64_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_min64 AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MINABS32S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_minabs32s $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MINABS32S : AE_MINABS32S_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_minabs32s AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MINABS64S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmpp_v), (ins AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1), "ae_minabs64s $ae_cmpp_v, $ae_cmpp_v0, $ae_cmpp_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmpp_v; +bits<4> ae_cmpp_v0; +bits<4> ae_cmpp_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_cmpp_v{0}; +let Inst{21} = ae_cmpp_v{1}; +let Inst{22} = ae_cmpp_v{2}; +let Inst{23} = ae_cmpp_v{3}; +let Inst{36} = ae_cmpp_v0{0}; +let Inst{37} = ae_cmpp_v0{1}; +let Inst{38} = ae_cmpp_v0{2}; +let Inst{39} = ae_cmpp_v0{3}; +let Inst{28} = ae_cmpp_v1{0}; +let Inst{29} = ae_cmpp_v1{1}; +let Inst{30} = ae_cmpp_v1{2}; +let Inst{31} = ae_cmpp_v1{3}; +} + + + +def AE_MINABS64S : AE_MINABS64S_AE_FORMAT1<[(set AE_DR:$ae_cmpp_v, (int_xtensa_ae_minabs64s AE_DR:$ae_cmpp_v0, AE_DR:$ae_cmpp_v1))]>; + +class AE_MOV_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_mov $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_to_dr_v{0}; +let Inst{13} = ae_to_dr_v{1}; +let Inst{14} = ae_to_dr_v{2}; +let Inst{15} = ae_to_dr_v{3}; +let Inst{8} = ae_to_dr_v0{0}; +let Inst{9} = ae_to_dr_v0{1}; +let Inst{10} = ae_to_dr_v0{2}; +let Inst{11} = ae_to_dr_v0{3}; +} + + + +def AE_MOV : AE_MOV_X24<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_mov AE_DR:$ae_to_dr_v0))]>; + +class AE_MOVAD16_0_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad16.0 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD16_0 : AE_MOVAD16_0_X24<[(set AR:$arr, (int_xtensa_ae_movad16_0 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVAD16_1_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad16.1 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{11} = 1; +let Inst{31} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{4} = ae_dr_to_ar_v0{0}; +let Inst{5} = ae_dr_to_ar_v0{1}; +let Inst{6} = ae_dr_to_ar_v0{2}; +let Inst{7} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD16_1 : AE_MOVAD16_1_AE_FORMAT48<[(set AR:$arr, (int_xtensa_ae_movad16_1 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVAD16_2_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad16.2 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD16_2 : AE_MOVAD16_2_X24<[(set AR:$arr, (int_xtensa_ae_movad16_2 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVAD16_3_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad16.3 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD16_3 : AE_MOVAD16_3_X24<[(set AR:$arr, (int_xtensa_ae_movad16_3 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVAD32_H_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad32.h $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD32_H : AE_MOVAD32_H_X24<[(set AR:$arr, (int_xtensa_ae_movad32_h AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVAD32_L_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_movad32.l $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_MOVAD32_L : AE_MOVAD32_L_X24<[(set AR:$arr, (int_xtensa_ae_movad32_l AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_MOVALIGN_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_uu_uu), (ins AE_VALIGN:$ae_uu_v), "ae_movalign $ae_uu_uu, $ae_uu_v", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_uu_uu; +bits<2> ae_uu_v; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{6} = ae_uu_uu{0}; +let Inst{7} = ae_uu_uu{1}; +let Inst{4} = ae_uu_v{0}; +let Inst{5} = ae_uu_v{1}; +} + + + +def AE_MOVALIGN : AE_MOVALIGN_X24<[(set AE_VALIGN:$ae_uu_uu, (int_xtensa_ae_movalign AE_VALIGN:$ae_uu_v))]>; + +class AE_MOVDA16_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars), "ae_movda16 $ae_ar_to_dr_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_MOVDA16 : AE_MOVDA16_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_movda16 AR:$ars))]>; + +class AE_MOVDA16X2_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars, AR:$art), "ae_movda16x2 $ae_ar_to_dr_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_MOVDA16X2 : AE_MOVDA16X2_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_movda16x2 AR:$ars, AR:$art))]>; + +class AE_MOVDA32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars), "ae_movda32 $ae_ar_to_dr_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_MOVDA32 : AE_MOVDA32_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_movda32 AR:$ars))]>; + +class AE_MOVDA32X2_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins AR:$ars, AR:$art), "ae_movda32x2 $ae_ar_to_dr_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<4> ars; +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_MOVDA32X2 : AE_MOVDA32X2_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_movda32x2 AR:$ars, AR:$art))]>; + +class AE_MOVF16X4_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR4:$bt4), "ae_movf16x4 $ae_cmov_v, $ae_cmov_v0, $bt4", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<2> bt4; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_cmov_v{0}; +let Inst{21} = ae_cmov_v{1}; +let Inst{22} = ae_cmov_v{2}; +let Inst{23} = ae_cmov_v{3}; +let Inst{36} = ae_cmov_v0{0}; +let Inst{37} = ae_cmov_v0{1}; +let Inst{38} = ae_cmov_v0{2}; +let Inst{39} = ae_cmov_v0{3}; +let Inst{30} = bt4{0}; +let Inst{31} = bt4{1}; +} + + + +def AE_MOVF16X4 : AE_MOVF16X4_AE_FORMAT1<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movf16x4 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR4:$bt4))]>; + +class AE_MOVF32X2_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR2:$bt2), "ae_movf32x2 $ae_cmov_v, $ae_cmov_v0, $bt2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<3> bt2; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_cmov_v{0}; +let Inst{13} = ae_cmov_v{1}; +let Inst{14} = ae_cmov_v{2}; +let Inst{15} = ae_cmov_v{3}; +let Inst{8} = ae_cmov_v0{0}; +let Inst{9} = ae_cmov_v0{1}; +let Inst{10} = ae_cmov_v0{2}; +let Inst{11} = ae_cmov_v0{3}; +let Inst{5} = bt2{0}; +let Inst{6} = bt2{1}; +let Inst{7} = bt2{2}; +} + + + +def AE_MOVF32X2 : AE_MOVF32X2_X24<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movf32x2 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR2:$bt2))]>; + +class AE_MOVF64_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR:$bt), "ae_movf64 $ae_cmov_v, $ae_cmov_v0, $bt", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<4> bt; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{62} = 1; +//operands +let Inst{20} = ae_cmov_v{0}; +let Inst{21} = ae_cmov_v{1}; +let Inst{22} = ae_cmov_v{2}; +let Inst{23} = ae_cmov_v{3}; +let Inst{36} = ae_cmov_v0{0}; +let Inst{37} = ae_cmov_v0{1}; +let Inst{38} = ae_cmov_v0{2}; +let Inst{39} = ae_cmov_v0{3}; +let Inst{28} = bt{0}; +let Inst{29} = bt{1}; +let Inst{30} = bt{2}; +let Inst{31} = bt{3}; +} + + + +def AE_MOVF64 : AE_MOVF64_AE_FORMAT1<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movf64 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR:$bt))]>; + +class AE_MOVI_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_ar_to_dr_v), (ins imm16n_47:$movi_imm), "ae_movi $ae_ar_to_dr_v, $movi_imm", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ar_to_dr_v; +bits<6> movi_imm; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ar_to_dr_v{0}; +let Inst{13} = ae_ar_to_dr_v{1}; +let Inst{14} = ae_ar_to_dr_v{2}; +let Inst{15} = ae_ar_to_dr_v{3}; +let Inst{4} = movi_imm{0}; +let Inst{5} = movi_imm{1}; +let Inst{8} = movi_imm{2}; +let Inst{9} = movi_imm{3}; +let Inst{10} = movi_imm{4}; +let Inst{11} = movi_imm{5}; +} + + + +def AE_MOVI : AE_MOVI_X24<[(set AE_DR:$ae_ar_to_dr_v, (int_xtensa_ae_movi timm:$movi_imm))]>; + +class AE_MOVT16X4_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR4:$bt4), "ae_movt16x4 $ae_cmov_v, $ae_cmov_v0, $bt4", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<2> bt4; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{28} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_cmov_v{0}; +let Inst{21} = ae_cmov_v{1}; +let Inst{22} = ae_cmov_v{2}; +let Inst{23} = ae_cmov_v{3}; +let Inst{36} = ae_cmov_v0{0}; +let Inst{37} = ae_cmov_v0{1}; +let Inst{38} = ae_cmov_v0{2}; +let Inst{39} = ae_cmov_v0{3}; +let Inst{30} = bt4{0}; +let Inst{31} = bt4{1}; +} + + + +def AE_MOVT16X4 : AE_MOVT16X4_AE_FORMAT1<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movt16x4 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR4:$bt4))]>; + +class AE_MOVT32X2_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR2:$bt2), "ae_movt32x2 $ae_cmov_v, $ae_cmov_v0, $bt2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<3> bt2; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_cmov_v{0}; +let Inst{13} = ae_cmov_v{1}; +let Inst{14} = ae_cmov_v{2}; +let Inst{15} = ae_cmov_v{3}; +let Inst{8} = ae_cmov_v0{0}; +let Inst{9} = ae_cmov_v0{1}; +let Inst{10} = ae_cmov_v0{2}; +let Inst{11} = ae_cmov_v0{3}; +let Inst{5} = bt2{0}; +let Inst{6} = bt2{1}; +let Inst{7} = bt2{2}; +} + + + +def AE_MOVT32X2 : AE_MOVT32X2_X24<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movt32x2 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR2:$bt2))]>; + +class AE_MOVT64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_cmov_v_out), (ins AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR:$bt), "ae_movt64 $ae_cmov_v, $ae_cmov_v0, $bt", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_cmov_v; +bits<4> ae_cmov_v0; +bits<4> bt; +let Constraints = "$ae_cmov_v = $ae_cmov_v_out,@earlyclobber $ae_cmov_v_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_cmov_v{0}; +let Inst{13} = ae_cmov_v{1}; +let Inst{14} = ae_cmov_v{2}; +let Inst{15} = ae_cmov_v{3}; +let Inst{8} = ae_cmov_v0{0}; +let Inst{9} = ae_cmov_v0{1}; +let Inst{10} = ae_cmov_v0{2}; +let Inst{11} = ae_cmov_v0{3}; +let Inst{4} = bt{0}; +let Inst{5} = bt{1}; +let Inst{6} = bt{2}; +let Inst{7} = bt{3}; +} + + + +def AE_MOVT64 : AE_MOVT64_X24<[(set AE_DR:$ae_cmov_v_out, (int_xtensa_ae_movt64 AE_DR:$ae_cmov_v, AE_DR:$ae_cmov_v0, BR:$bt))]>; + +class AE_MUL16X4_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_mul16x4 $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MUL16X4 : AE_MUL16X4_AE_FORMAT2<[]>; + +class AE_MUL32_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32_HH : AE_MUL32_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32_hh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32_LH : AE_MUL32_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32_lh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32_LL : AE_MUL32_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32_ll AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32_LL_S2 : AE_MUL32_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32U_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32u.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32U_LL : AE_MUL32U_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32u_ll AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_H0 : AE_MUL32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_H0_S2 : AE_MUL32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_h0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_H1 : AE_MUL32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_H1_S2 : AE_MUL32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_h1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_H2 : AE_MUL32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_H2_S2 : AE_MUL32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_h2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_H3 : AE_MUL32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_H3_S2 : AE_MUL32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_h3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_L0 : AE_MUL32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_L0_S2 : AE_MUL32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_L1 : AE_MUL32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_L1_S2 : AE_MUL32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_l1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_L2 : AE_MUL32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_L2_S2 : AE_MUL32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MUL32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mul32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MUL32X16_L3 : AE_MUL32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mul32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MUL32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mul32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MUL32X16_L3_S2 : AE_MUL32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mul32x16_l3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA16X4_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1_out, AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_mula16x4 $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; +let Constraints = "$ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out, $ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MULA16X4 : AE_MULA16X4_AE_FORMAT2<[]>; + +class AE_MULA32_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32_HH : AE_MULA32_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32_LH : AE_MULA32_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32_LL : AE_MULA32_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32_LL_S2 : AE_MULA32_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32U_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32u.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32U_LL : AE_MULA32U_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32u_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_H0 : AE_MULA32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_H0_S2 : AE_MULA32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_h0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_H1 : AE_MULA32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_H1_S2 : AE_MULA32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_h1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_H2 : AE_MULA32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_H2_S2 : AE_MULA32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_h2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_H3 : AE_MULA32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_H3_S2 : AE_MULA32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_h3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_L0 : AE_MULA32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_L0_S2 : AE_MULA32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_L1 : AE_MULA32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_L1_S2 : AE_MULA32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_L2 : AE_MULA32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_L2_S2 : AE_MULA32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULA32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mula32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULA32X16_L3 : AE_MULA32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mula32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULA32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mula32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULA32X16_L3_S2 : AE_MULA32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mula32x16_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaad24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAAD24_HH_LL : AE_MULAAD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaad24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAAD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD24_HH_LL_S2 : AE_MULAAD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaad24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAAD24_HL_LH : AE_MULAAD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaad24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAAD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD24_HL_LH_S2 : AE_MULAAD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD32X16_H0_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaad32x16.h0.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAD32X16_H0_L1 : AE_MULAAD32X16_H0_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaad32x16_h0_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAD32X16_H0_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad32x16.h0.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD32X16_H0_L1_S2 : AE_MULAAD32X16_H0_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad32x16_h0_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaad32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAD32X16_H1_L0 : AE_MULAAD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaad32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD32X16_H1_L0_S2 : AE_MULAAD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD32X16_H2_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaad32x16.h2.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAD32X16_H2_L3 : AE_MULAAD32X16_H2_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaad32x16_h2_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAD32X16_H2_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad32x16.h2.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD32X16_H2_L3_S2 : AE_MULAAD32X16_H2_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad32x16_h2_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaad32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAD32X16_H3_L2 : AE_MULAAD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaad32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaad32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAD32X16_H3_L2_S2 : AE_MULAAD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaad32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD16SS_11_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd16ss.11_00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD16SS_11_00 : AE_MULAAFD16SS_11_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd16ss_11_00 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD16SS_11_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd16ss.11_00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD16SS_11_00_S2 : AE_MULAAFD16SS_11_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd16ss_11_00_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD16SS_13_02_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd16ss.13_02 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD16SS_13_02 : AE_MULAAFD16SS_13_02_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd16ss_13_02 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD16SS_13_02_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd16ss.13_02_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD16SS_13_02_S2 : AE_MULAAFD16SS_13_02_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd16ss_13_02_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD16SS_33_22_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd16ss.33_22 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD16SS_33_22 : AE_MULAAFD16SS_33_22_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd16ss_33_22 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD16SS_33_22_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd16ss.33_22_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD16SS_33_22_S2 : AE_MULAAFD16SS_33_22_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd16ss_33_22_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaafd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{53} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAAFD24_HH_LL : AE_MULAAFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaafd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAAFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD24_HH_LL_S2 : AE_MULAAFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaafd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{53} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAAFD24_HL_LH : AE_MULAAFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaafd24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAAFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD24_HL_LH_S2 : AE_MULAAFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD32X16_H0_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd32x16.h0.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD32X16_H0_L1 : AE_MULAAFD32X16_H0_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd32x16_h0_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD32X16_H0_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd32x16.h0.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD32X16_H0_L1_S2 : AE_MULAAFD32X16_H0_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd32x16_h0_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD32X16_H1_L0 : AE_MULAAFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD32X16_H1_L0_S2 : AE_MULAAFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD32X16_H2_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd32x16.h2.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD32X16_H2_L3 : AE_MULAAFD32X16_H2_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd32x16_h2_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD32X16_H2_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd32x16.h2.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD32X16_H2_L3_S2 : AE_MULAAFD32X16_H2_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd32x16_h2_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAAFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaafd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAAFD32X16_H3_L2 : AE_MULAAFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaafd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAAFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaafd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAAFD32X16_H3_L2_S2 : AE_MULAAFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaafd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAC24_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulac24 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAC24 : AE_MULAC24_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulac24 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAC32X16_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulac32x16.h $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; +let Constraints = "$opnd_ae_sem_mul_x4_q0 = $opnd_ae_sem_mul_x4_q0_out,@earlyclobber $opnd_ae_sem_mul_x4_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULAC32X16_H : AE_MULAC32X16_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0_out, (int_xtensa_ae_mulac32x16_h AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULAC32X16_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulac32x16.l $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; +let Constraints = "$opnd_ae_sem_mul_x4_q0 = $opnd_ae_sem_mul_x4_q0_out,@earlyclobber $opnd_ae_sem_mul_x4_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULAC32X16_L : AE_MULAC32X16_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0_out, (int_xtensa_ae_mulac32x16_l AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULAF16SS_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf16ss.00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF16SS_00 : AE_MULAF16SS_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf16ss_00 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF16SS_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf16ss.00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF16SS_00_S2 : AE_MULAF16SS_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf16ss_00_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF16SS_10_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.10 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_10 : AE_MULAF16SS_10_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_10 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_11_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.11 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_11 : AE_MULAF16SS_11_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_11 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_20_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.20 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_20 : AE_MULAF16SS_20_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_20 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_21_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.21 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_21 : AE_MULAF16SS_21_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_21 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_22_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.22 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_22 : AE_MULAF16SS_22_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_22 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_30_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.30 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_30 : AE_MULAF16SS_30_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_30 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_31_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.31 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_31 : AE_MULAF16SS_31_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_31 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_32_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.32 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{36} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_32 : AE_MULAF16SS_32_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_32 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16SS_33_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulaf16ss.33 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{36} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAF16SS_33 : AE_MULAF16SS_33_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulaf16ss_33 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAF16X4SS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1_out, AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_mulaf16x4ss $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; +let Constraints = "$ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out, $ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MULAF16X4SS : AE_MULAF16X4SS_AE_FORMAT2<[]>; + +class AE_MULAF32R_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32r.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32R_HH : AE_MULAF32R_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32r_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32R_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32r.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32R_LH : AE_MULAF32R_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32r_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32R_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32r.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32R_LL : AE_MULAF32R_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32r_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32R_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32r.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32R_LL_S2 : AE_MULAF32R_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32r_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32S_HH : AE_MULAF32S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32S_LH : AE_MULAF32S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32S_LL : AE_MULAF32S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32S_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32s.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32S_LL_S2 : AE_MULAF32S_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32s_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_H0 : AE_MULAF32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_H0_S2 : AE_MULAF32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_h0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_H1 : AE_MULAF32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_H1_S2 : AE_MULAF32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_h1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_H2 : AE_MULAF32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_H2_S2 : AE_MULAF32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_h2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_H3 : AE_MULAF32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_H3_S2 : AE_MULAF32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_h3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_L0 : AE_MULAF32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_L0_S2 : AE_MULAF32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_L1 : AE_MULAF32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_L1_S2 : AE_MULAF32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_L2 : AE_MULAF32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_L2_S2 : AE_MULAF32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF32X16_L3 : AE_MULAF32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF32X16_L3_S2 : AE_MULAF32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf32x16_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF48Q32SP16S_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf48q32sp16s.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF48Q32SP16S_L : AE_MULAF48Q32SP16S_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf48q32sp16s_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF48Q32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf48q32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF48Q32SP16S_L_S2 : AE_MULAF48Q32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf48q32sp16s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAF48Q32SP16U_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulaf48q32sp16u.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAF48Q32SP16U_L : AE_MULAF48Q32SP16U_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulaf48q32sp16u_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAF48Q32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaf48q32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAF48Q32SP16U_L_S2 : AE_MULAF48Q32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaf48q32sp16u_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFC24RA_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulafc24ra $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; +let Constraints = "$opnd_ae_sem_mul_x4_q0 = $opnd_ae_sem_mul_x4_q0_out,@earlyclobber $opnd_ae_sem_mul_x4_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULAFC24RA : AE_MULAFC24RA_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0_out, (int_xtensa_ae_mulafc24ra AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULAFC32X16RAS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulafc32x16ras.h $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; +let Constraints = "$opnd_ae_sem_mul_x4_q0 = $opnd_ae_sem_mul_x4_q0_out,@earlyclobber $opnd_ae_sem_mul_x4_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULAFC32X16RAS_H : AE_MULAFC32X16RAS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0_out, (int_xtensa_ae_mulafc32x16ras_h AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULAFC32X16RAS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulafc32x16ras.l $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; +let Constraints = "$opnd_ae_sem_mul_x4_q0 = $opnd_ae_sem_mul_x4_q0_out,@earlyclobber $opnd_ae_sem_mul_x4_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULAFC32X16RAS_L : AE_MULAFC32X16RAS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0_out, (int_xtensa_ae_mulafc32x16ras_l AE_DR:$opnd_ae_sem_mul_x4_q0, AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULAFD24X2_FIR_H_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd24x2.fir.h $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode + +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD24X2_FIR_H : AE_MULAFD24X2_FIR_H_AE_FORMAT2<[]>; + +class AE_MULAFD24X2_FIR_L_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd24x2.fir.l $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD24X2_FIR_L : AE_MULAFD24X2_FIR_L_AE_FORMAT2<[]>; + +class AE_MULAFD32X16X2_FIR_HH_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd32x16x2.fir.hh $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD32X16X2_FIR_HH : AE_MULAFD32X16X2_FIR_HH_AE_FORMAT2<[]>; + +class AE_MULAFD32X16X2_FIR_HL_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd32x16x2.fir.hl $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD32X16X2_FIR_HL : AE_MULAFD32X16X2_FIR_HL_AE_FORMAT2<[]>; + +class AE_MULAFD32X16X2_FIR_LH_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd32x16x2.fir.lh $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD32X16X2_FIR_LH : AE_MULAFD32X16X2_FIR_LH_AE_FORMAT2<[]>; + +class AE_MULAFD32X16X2_FIR_LL_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out, AE_DR:$ae_mul_q1_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulafd32x16x2.fir.ll $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out, $ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULAFD32X16X2_FIR_LL : AE_MULAFD32X16X2_FIR_LL_AE_FORMAT2<[]>; + +class AE_MULAFP24X2R_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp24x2r $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP24X2R : AE_MULAFP24X2R_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp24x2r AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP24X2R_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp24x2r_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP24X2R_S2 : AE_MULAFP24X2R_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp24x2r_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP24X2RA_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp24x2ra $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP24X2RA : AE_MULAFP24X2RA_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp24x2ra AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP24X2RA_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp24x2ra_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP24X2RA_S2 : AE_MULAFP24X2RA_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp24x2ra_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP32X16X2RAS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x16x2ras.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X16X2RAS_H : AE_MULAFP32X16X2RAS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x16x2ras_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP32X16X2RAS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp32x16x2ras.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP32X16X2RAS_H_S2 : AE_MULAFP32X16X2RAS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp32x16x2ras_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP32X16X2RAS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x16x2ras.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X16X2RAS_L : AE_MULAFP32X16X2RAS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x16x2ras_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP32X16X2RAS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp32x16x2ras.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP32X16X2RAS_L_S2 : AE_MULAFP32X16X2RAS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp32x16x2ras_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP32X16X2RS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x16x2rs.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X16X2RS_H : AE_MULAFP32X16X2RS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x16x2rs_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP32X16X2RS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp32x16x2rs.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP32X16X2RS_H_S2 : AE_MULAFP32X16X2RS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp32x16x2rs_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP32X16X2RS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x16x2rs.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X16X2RS_L : AE_MULAFP32X16X2RS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x16x2rs_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP32X16X2RS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafp32x16x2rs.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFP32X16X2RS_L_S2 : AE_MULAFP32X16X2RS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafp32x16x2rs_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFP32X2RAS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x2ras $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X2RAS : AE_MULAFP32X2RAS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x2ras AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFP32X2RS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulafp32x2rs $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAFP32X2RS : AE_MULAFP32X2RS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulafp32x2rs AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFQ32SP24S_H_S2 : AE_MULAFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafq32sp24s_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulafq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAFQ32SP24S_L_S2 : AE_MULAFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulafq32sp24s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAP24X2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulap24x2 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULAP24X2 : AE_MULAP24X2_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulap24x2 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULAP24X2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulap24x2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAP24X2_S2 : AE_MULAP24X2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulap24x2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAP32X16X2_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulap32x16x2.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAP32X16X2_H : AE_MULAP32X16X2_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulap32x16x2_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAP32X16X2_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulap32x16x2.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAP32X16X2_L : AE_MULAP32X16X2_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulap32x16x2_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAP32X2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulap32x2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAP32X2 : AE_MULAP32X2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulap32x2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAQ32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaq32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAQ32SP16S_L_S2 : AE_MULAQ32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaq32sp16s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAQ32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulaq32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAQ32SP16U_L_S2 : AE_MULAQ32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulaq32sp16u_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULARFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mularfq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULARFQ32SP24S_H_S2 : AE_MULARFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mularfq32sp24s_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULARFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mularfq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULARFQ32SP24S_L_S2 : AE_MULARFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mularfq32sp24s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAS32F48P16S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulas32f48p16s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAS32F48P16S_HH : AE_MULAS32F48P16S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulas32f48p16s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAS32F48P16S_HH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulas32f48p16s.hh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAS32F48P16S_HH_S2 : AE_MULAS32F48P16S_HH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulas32f48p16s_hh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAS32F48P16S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulas32f48p16s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAS32F48P16S_LH : AE_MULAS32F48P16S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulas32f48p16s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAS32F48P16S_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulas32f48p16s.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAS32F48P16S_LH_S2 : AE_MULAS32F48P16S_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulas32f48p16s_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULAS32F48P16S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulas32f48p16s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULAS32F48P16S_LL : AE_MULAS32F48P16S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulas32f48p16s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULAS32F48P16S_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulas32f48p16s.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULAS32F48P16S_LL_S2 : AE_MULAS32F48P16S_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulas32f48p16s_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulasd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULASD24_HH_LL : AE_MULASD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulasd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULASD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASD24_HH_LL_S2 : AE_MULASD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulasd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULASD24_HL_LH : AE_MULASD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulasd24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULASD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASD24_HL_LH_S2 : AE_MULASD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasd24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulasd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULASD32X16_H1_L0 : AE_MULASD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulasd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULASD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASD32X16_H1_L0_S2 : AE_MULASD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulasd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULASD32X16_H3_L2 : AE_MULASD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulasd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULASD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASD32X16_H3_L2_S2 : AE_MULASD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulasfd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULASFD24_HH_LL : AE_MULASFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulasfd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULASFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasfd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASFD24_HH_LL_S2 : AE_MULASFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasfd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulasfd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULASFD24_HL_LH : AE_MULASFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulasfd24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULASFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasfd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASFD24_HL_LH_S2 : AE_MULASFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasfd24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulasfd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULASFD32X16_H1_L0 : AE_MULASFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulasfd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULASFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasfd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASFD32X16_H1_L0_S2 : AE_MULASFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasfd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULASFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulasfd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULASFD32X16_H3_L2 : AE_MULASFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulasfd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULASFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulasfd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULASFD32X16_H3_L2_S2 : AE_MULASFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulasfd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULC24_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulc24 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULC24 : AE_MULC24_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulc24 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULC32X16_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0), (ins AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulc32x16.h $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULC32X16_H : AE_MULC32X16_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0, (int_xtensa_ae_mulc32x16_h AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULC32X16_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0), (ins AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulc32x16.l $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULC32X16_L : AE_MULC32X16_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0, (int_xtensa_ae_mulc32x16_l AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULF16SS_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf16ss.00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF16SS_00 : AE_MULF16SS_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf16ss_00 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF16SS_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf16ss.00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF16SS_00_S2 : AE_MULF16SS_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf16ss_00_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF16SS_10_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.10 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_10 : AE_MULF16SS_10_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_10 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_11_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.11 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_11 : AE_MULF16SS_11_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_11 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_20_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.20 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_20 : AE_MULF16SS_20_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_20 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_21_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.21 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_21 : AE_MULF16SS_21_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_21 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_22_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.22 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_22 : AE_MULF16SS_22_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_22 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_30_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.30 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_30 : AE_MULF16SS_30_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_30 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_31_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.31 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_31 : AE_MULF16SS_31_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_31 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_32_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.32 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_32 : AE_MULF16SS_32_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_32 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16SS_33_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulf16ss.33 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULF16SS_33 : AE_MULF16SS_33_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulf16ss_33 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULF16X4SS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_mulf16x4ss $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MULF16X4SS : AE_MULF16X4SS_AE_FORMAT2<[]>; + +class AE_MULF32R_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32r.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32R_HH : AE_MULF32R_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32r_hh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32R_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32r.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32R_LH : AE_MULF32R_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32r_lh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32R_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32r.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32R_LL : AE_MULF32R_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32r_ll AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32R_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32r.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32R_LL_S2 : AE_MULF32R_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32r_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32S_HH : AE_MULF32S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32S_LH : AE_MULF32S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32S_LL : AE_MULF32S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32S_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32s.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32S_LL_S2 : AE_MULF32S_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32s_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_H0 : AE_MULF32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_H0_S2 : AE_MULF32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_h0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_H1 : AE_MULF32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_H1_S2 : AE_MULF32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_h1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_H2 : AE_MULF32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_H2_S2 : AE_MULF32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_h2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_H3 : AE_MULF32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_H3_S2 : AE_MULF32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_h3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_L0 : AE_MULF32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_L0_S2 : AE_MULF32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_L1 : AE_MULF32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_L1_S2 : AE_MULF32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_l1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_L2 : AE_MULF32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_L2_S2 : AE_MULF32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF32X16_L3 : AE_MULF32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF32X16_L3_S2 : AE_MULF32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf32x16_l3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF48Q32SP16S_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf48q32sp16s.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF48Q32SP16S_L : AE_MULF48Q32SP16S_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf48q32sp16s_l AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF48Q32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf48q32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF48Q32SP16S_L_S2 : AE_MULF48Q32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf48q32sp16s_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULF48Q32SP16U_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulf48q32sp16u.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULF48Q32SP16U_L : AE_MULF48Q32SP16U_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulf48q32sp16u_l AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULF48Q32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulf48q32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULF48Q32SP16U_L_S2 : AE_MULF48Q32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulf48q32sp16u_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFC24RA_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0), (ins AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulfc24ra $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULFC24RA : AE_MULFC24RA_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0, (int_xtensa_ae_mulfc24ra AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULFC32X16RAS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0), (ins AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulfc32x16ras.h $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULFC32X16RAS_H : AE_MULFC32X16RAS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0, (int_xtensa_ae_mulfc32x16ras_h AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULFC32X16RAS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x4_q0), (ins AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1), "ae_mulfc32x16ras.l $opnd_ae_sem_mul_x4_q0, $opnd_ae_sem_mul_x4_d0, $opnd_ae_sem_mul_x4_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x4_q0; +bits<4> opnd_ae_sem_mul_x4_d0; +bits<4> opnd_ae_sem_mul_x4_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x4_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x4_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x4_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x4_q0{3}; +let Inst{24} = opnd_ae_sem_mul_x4_d0{0}; +let Inst{25} = opnd_ae_sem_mul_x4_d0{1}; +let Inst{26} = opnd_ae_sem_mul_x4_d0{2}; +let Inst{27} = opnd_ae_sem_mul_x4_d0{3}; +let Inst{20} = opnd_ae_sem_mul_x4_d1{0}; +let Inst{21} = opnd_ae_sem_mul_x4_d1{1}; +let Inst{22} = opnd_ae_sem_mul_x4_d1{2}; +let Inst{23} = opnd_ae_sem_mul_x4_d1{3}; +} + + + +def AE_MULFC32X16RAS_L : AE_MULFC32X16RAS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x4_q0, (int_xtensa_ae_mulfc32x16ras_l AE_DR:$opnd_ae_sem_mul_x4_d0, AE_DR:$opnd_ae_sem_mul_x4_d1))]>; + +class AE_MULFD24X2_FIR_H_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd24x2.fir.h $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD24X2_FIR_H : AE_MULFD24X2_FIR_H_AE_FORMAT2<[]>; + +class AE_MULFD24X2_FIR_L_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd24x2.fir.l $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD24X2_FIR_L : AE_MULFD24X2_FIR_L_AE_FORMAT2<[]>; + +class AE_MULFD32X16X2_FIR_HH_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd32x16x2.fir.hh $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD32X16X2_FIR_HH : AE_MULFD32X16X2_FIR_HH_AE_FORMAT2<[]>; + +class AE_MULFD32X16X2_FIR_HL_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd32x16x2.fir.hl $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD32X16X2_FIR_HL : AE_MULFD32X16X2_FIR_HL_AE_FORMAT2<[]>; + +class AE_MULFD32X16X2_FIR_LH_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd32x16x2.fir.lh $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD32X16X2_FIR_LH : AE_MULFD32X16X2_FIR_LH_AE_FORMAT2<[]>; + +class AE_MULFD32X16X2_FIR_LL_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0, AE_DR:$ae_mul_q1), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d2), "ae_mulfd32x16x2.fir.ll $ae_mul_q0, $ae_mul_q1, $ae_mul_d0, $ae_mul_d1, $ae_mul_d2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_q1; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d2; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{36} = ae_mul_d2{0}; +let Inst{37} = ae_mul_d2{1}; +let Inst{38} = ae_mul_d2{2}; +let Inst{39} = ae_mul_d2{3}; +} + + + +def AE_MULFD32X16X2_FIR_LL : AE_MULFD32X16X2_FIR_LL_AE_FORMAT2<[]>; + +class AE_MULFP16X4RAS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulfp16x4ras $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{23} = 1; +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULFP16X4RAS : AE_MULFP16X4RAS_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulfp16x4ras AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULFP16X4S_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulfp16x4s $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{23} = 1; +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULFP16X4S : AE_MULFP16X4S_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulfp16x4s AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULFP24X2R_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp24x2r $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP24X2R : AE_MULFP24X2R_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp24x2r AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP24X2R_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp24x2r_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP24X2R_S2 : AE_MULFP24X2R_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp24x2r_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP24X2RA_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp24x2ra $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP24X2RA : AE_MULFP24X2RA_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp24x2ra AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP24X2RA_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp24x2ra_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP24X2RA_S2 : AE_MULFP24X2RA_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp24x2ra_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP32X16X2RAS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x16x2ras.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X16X2RAS_H : AE_MULFP32X16X2RAS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x16x2ras_h AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP32X16X2RAS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp32x16x2ras.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP32X16X2RAS_H_S2 : AE_MULFP32X16X2RAS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp32x16x2ras_h_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP32X16X2RAS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x16x2ras.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X16X2RAS_L : AE_MULFP32X16X2RAS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x16x2ras_l AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP32X16X2RAS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp32x16x2ras.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP32X16X2RAS_L_S2 : AE_MULFP32X16X2RAS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp32x16x2ras_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP32X16X2RS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x16x2rs.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X16X2RS_H : AE_MULFP32X16X2RS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x16x2rs_h AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP32X16X2RS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp32x16x2rs.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP32X16X2RS_H_S2 : AE_MULFP32X16X2RS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp32x16x2rs_h_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP32X16X2RS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x16x2rs.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X16X2RS_L : AE_MULFP32X16X2RS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x16x2rs_l AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP32X16X2RS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfp32x16x2rs.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFP32X16X2RS_L_S2 : AE_MULFP32X16X2RS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfp32x16x2rs_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFP32X2RAS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x2ras $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X2RAS : AE_MULFP32X2RAS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x2ras AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFP32X2RS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulfp32x2rs $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULFP32X2RS : AE_MULFP32X2RS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulfp32x2rs AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFQ32SP24S_H_S2 : AE_MULFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfq32sp24s_h_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulfq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULFQ32SP24S_L_S2 : AE_MULFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulfq32sp24s_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULP24X2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulp24x2 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULP24X2 : AE_MULP24X2_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulp24x2 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULP24X2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulp24x2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULP24X2_S2 : AE_MULP24X2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulp24x2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULP32X16X2_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulp32x16x2.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULP32X16X2_H : AE_MULP32X16X2_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulp32x16x2_h AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULP32X16X2_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulp32x16x2.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULP32X16X2_L : AE_MULP32X16X2_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulp32x16x2_l AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULP32X2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulp32x2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULP32X2 : AE_MULP32X2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulp32x2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULQ32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulq32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULQ32SP16S_L_S2 : AE_MULQ32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulq32sp16s_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULQ32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulq32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULQ32SP16U_L_S2 : AE_MULQ32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulq32sp16u_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULRFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulrfq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULRFQ32SP24S_H_S2 : AE_MULRFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulrfq32sp24s_h_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULRFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulrfq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULRFQ32SP24S_L_S2 : AE_MULRFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulrfq32sp24s_l_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS16X4_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1_out, AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_muls16x4 $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; +let Constraints = "$ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out, $ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{37} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MULS16X4 : AE_MULS16X4_AE_FORMAT2<[]>; + +class AE_MULS32_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32_HH : AE_MULS32_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32_LH : AE_MULS32_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32_LL : AE_MULS32_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32F48P16S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32f48p16s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32F48P16S_HH : AE_MULS32F48P16S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_muls32f48p16s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32F48P16S_HH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32f48p16s.hh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32F48P16S_HH_S2 : AE_MULS32F48P16S_HH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_muls32f48p16s_hh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32F48P16S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32f48p16s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32F48P16S_LH : AE_MULS32F48P16S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_muls32f48p16s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32F48P16S_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32f48p16s.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32F48P16S_LH_S2 : AE_MULS32F48P16S_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_muls32f48p16s_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32F48P16S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32f48p16s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32F48P16S_LL : AE_MULS32F48P16S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_muls32f48p16s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32F48P16S_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32f48p16s.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32F48P16S_LL_S2 : AE_MULS32F48P16S_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_muls32f48p16s_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32U_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32u.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32U_LL : AE_MULS32U_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32u_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_H0 : AE_MULS32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_H0_S2 : AE_MULS32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_h0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_H1 : AE_MULS32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_H1_S2 : AE_MULS32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_h1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_H2 : AE_MULS32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_H2_S2 : AE_MULS32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_h2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_H3 : AE_MULS32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_H3_S2 : AE_MULS32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_h3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_L0 : AE_MULS32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_L0_S2 : AE_MULS32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_L1 : AE_MULS32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_L1_S2 : AE_MULS32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_L2 : AE_MULS32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_L2_S2 : AE_MULS32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULS32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_muls32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULS32X16_L3 : AE_MULS32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_muls32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULS32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_muls32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULS32X16_L3_S2 : AE_MULS32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_muls32x16_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsad24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSAD24_HH_LL : AE_MULSAD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsad24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSAD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsad24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAD24_HH_LL_S2 : AE_MULSAD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsad24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAD32X16_H1_L0_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsad32x16.h1.l0 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSAD32X16_H1_L0 : AE_MULSAD32X16_H1_L0_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsad32x16_h1_l0 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSAD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsad32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAD32X16_H1_L0_S2 : AE_MULSAD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsad32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAD32X16_H3_L2_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsad32x16.h3.l2 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSAD32X16_H3_L2 : AE_MULSAD32X16_H3_L2_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsad32x16_h3_l2 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSAD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsad32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAD32X16_H3_L2_S2 : AE_MULSAD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsad32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsafd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSAFD24_HH_LL : AE_MULSAFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsafd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSAFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsafd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAFD24_HH_LL_S2 : AE_MULSAFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsafd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsafd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSAFD32X16_H1_L0 : AE_MULSAFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsafd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSAFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsafd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAFD32X16_H1_L0_S2 : AE_MULSAFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsafd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSAFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsafd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSAFD32X16_H3_L2 : AE_MULSAFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsafd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSAFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsafd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSAFD32X16_H3_L2_S2 : AE_MULSAFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsafd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF16SS_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf16ss.00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF16SS_00 : AE_MULSF16SS_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf16ss_00 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF16SS_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf16ss.00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF16SS_00_S2 : AE_MULSF16SS_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf16ss_00_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF16SS_10_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.10 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_10 : AE_MULSF16SS_10_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_10 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_11_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.11 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_11 : AE_MULSF16SS_11_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_11 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_20_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.20 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_20 : AE_MULSF16SS_20_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_20 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_21_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.21 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_21 : AE_MULSF16SS_21_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_21 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_22_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.22 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{36} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_22 : AE_MULSF16SS_22_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_22 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_30_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.30 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{36} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_30 : AE_MULSF16SS_30_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_30 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_31_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.31 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{36} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_31 : AE_MULSF16SS_31_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_31 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_32_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.32 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{21} = 1; +let Inst{36} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_32 : AE_MULSF16SS_32_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_32 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16SS_33_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsf16ss.33 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{36} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSF16SS_33 : AE_MULSF16SS_33_AE_FORMAT2<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsf16ss_33 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSF16X4SS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q1_out, AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q1, AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d1, AE_DR:$ae_mul_d0), "ae_mulsf16x4ss $ae_mul_q1, $ae_mul_q0, $ae_mul_d1, $ae_mul_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q1; +bits<4> ae_mul_q0; +bits<4> ae_mul_d1; +bits<4> ae_mul_d0; +let Constraints = "$ae_mul_q1 = $ae_mul_q1_out,@earlyclobber $ae_mul_q1_out, $ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{37} = 1; +let Inst{39} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{20} = ae_mul_q1{0}; +let Inst{21} = ae_mul_q1{1}; +let Inst{22} = ae_mul_q1{2}; +let Inst{23} = ae_mul_q1{3}; +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +} + + + +def AE_MULSF16X4SS : AE_MULSF16X4SS_AE_FORMAT2<[]>; + +class AE_MULSF32R_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32r.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32R_HH : AE_MULSF32R_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32r_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32R_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32r.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32R_LH : AE_MULSF32R_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32r_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32R_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32r.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32R_LL : AE_MULSF32R_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32r_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32R_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32r.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32R_LL_S2 : AE_MULSF32R_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32r_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32S_HH : AE_MULSF32S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32S_LH : AE_MULSF32S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32S_LL : AE_MULSF32S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_H0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.h0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_H0 : AE_MULSF32X16_H0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_h0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_H0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.h0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_H0_S2 : AE_MULSF32X16_H0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_h0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_H1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.h1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_H1 : AE_MULSF32X16_H1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_h1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_H1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.h1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_H1_S2 : AE_MULSF32X16_H1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_h1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_H2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.h2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_H2 : AE_MULSF32X16_H2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_h2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_H2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.h2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_H2_S2 : AE_MULSF32X16_H2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_h2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_H3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.h3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_H3 : AE_MULSF32X16_H3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_h3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_H3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.h3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_H3_S2 : AE_MULSF32X16_H3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_h3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_L0 : AE_MULSF32X16_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_L0_S2 : AE_MULSF32X16_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_L1 : AE_MULSF32X16_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_L1_S2 : AE_MULSF32X16_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_l1_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_L2 : AE_MULSF32X16_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_L2_S2 : AE_MULSF32X16_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF32X16_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf32x16.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF32X16_L3 : AE_MULSF32X16_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf32x16_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF32X16_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf32x16.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF32X16_L3_S2 : AE_MULSF32X16_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf32x16_l3_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF48Q32SP16S_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf48q32sp16s.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF48Q32SP16S_L : AE_MULSF48Q32SP16S_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf48q32sp16s_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF48Q32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf48q32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF48Q32SP16S_L_S2 : AE_MULSF48Q32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf48q32sp16s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSF48Q32SP16U_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsf48q32sp16u.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSF48Q32SP16U_L : AE_MULSF48Q32SP16U_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsf48q32sp16u_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSF48Q32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsf48q32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSF48Q32SP16U_L_S2 : AE_MULSF48Q32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsf48q32sp16u_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP24X2R_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp24x2r $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP24X2R : AE_MULSFP24X2R_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp24x2r AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP24X2R_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp24x2r_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP24X2R_S2 : AE_MULSFP24X2R_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp24x2r_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP24X2RA_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp24x2ra $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP24X2RA : AE_MULSFP24X2RA_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp24x2ra AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP24X2RA_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp24x2ra_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP24X2RA_S2 : AE_MULSFP24X2RA_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp24x2ra_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP32X16X2RAS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x16x2ras.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X16X2RAS_H : AE_MULSFP32X16X2RAS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x16x2ras_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP32X16X2RAS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp32x16x2ras.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP32X16X2RAS_H_S2 : AE_MULSFP32X16X2RAS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp32x16x2ras_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP32X16X2RAS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x16x2ras.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X16X2RAS_L : AE_MULSFP32X16X2RAS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x16x2ras_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP32X16X2RAS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp32x16x2ras.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP32X16X2RAS_L_S2 : AE_MULSFP32X16X2RAS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp32x16x2ras_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP32X16X2RS_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x16x2rs.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X16X2RS_H : AE_MULSFP32X16X2RS_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x16x2rs_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP32X16X2RS_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp32x16x2rs.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP32X16X2RS_H_S2 : AE_MULSFP32X16X2RS_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp32x16x2rs_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP32X16X2RS_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x16x2rs.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X16X2RS_L : AE_MULSFP32X16X2RS_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x16x2rs_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP32X16X2RS_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfp32x16x2rs.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFP32X16X2RS_L_S2 : AE_MULSFP32X16X2RS_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfp32x16x2rs_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFP32X2RAS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x2ras $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X2RAS : AE_MULSFP32X2RAS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x2ras AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFP32X2RS_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsfp32x2rs $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSFP32X2RS : AE_MULSFP32X2RS_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsfp32x2rs AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFQ32SP24S_H_S2 : AE_MULSFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfq32sp24s_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsfq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSFQ32SP24S_L_S2 : AE_MULSFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsfq32sp24s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSP24X2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulsp24x2 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSP24X2 : AE_MULSP24X2_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulsp24x2 AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSP24X2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsp24x2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSP24X2_S2 : AE_MULSP24X2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsp24x2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSP32X16X2_H_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsp32x16x2.h $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSP32X16X2_H : AE_MULSP32X16X2_H_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsp32x16x2_h AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSP32X16X2_L_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsp32x16x2.l $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSP32X16X2_L : AE_MULSP32X16X2_L_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsp32x16x2_l AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSP32X2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulsp32x2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSP32X2 : AE_MULSP32X2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulsp32x2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSQ32SP16S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsq32sp16s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSQ32SP16S_L_S2 : AE_MULSQ32SP16S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsq32sp16s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSQ32SP16U_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsq32sp16u.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSQ32SP16U_L_S2 : AE_MULSQ32SP16U_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsq32sp16u_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSRFQ32SP24S_H_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsrfq32sp24s.h_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSRFQ32SP24S_H_S2 : AE_MULSRFQ32SP24S_H_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsrfq32sp24s_h_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSRFQ32SP24S_L_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulsrfq32sp24s.l_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSRFQ32SP24S_L_S2 : AE_MULSRFQ32SP24S_L_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulsrfq32sp24s_l_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSS32F48P16S_HH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulss32f48p16s.hh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSS32F48P16S_HH : AE_MULSS32F48P16S_HH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulss32f48p16s_hh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSS32F48P16S_HH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulss32f48p16s.hh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSS32F48P16S_HH_S2 : AE_MULSS32F48P16S_HH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulss32f48p16s_hh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSS32F48P16S_LH_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulss32f48p16s.lh $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSS32F48P16S_LH : AE_MULSS32F48P16S_LH_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulss32f48p16s_lh AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSS32F48P16S_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulss32f48p16s.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSS32F48P16S_LH_S2 : AE_MULSS32F48P16S_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulss32f48p16s_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSS32F48P16S_LL_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulss32f48p16s.ll $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSS32F48P16S_LL : AE_MULSS32F48P16S_LL_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulss32f48p16s_ll AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSS32F48P16S_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulss32f48p16s.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSS32F48P16S_LL_S2 : AE_MULSS32F48P16S_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulss32f48p16s_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulssd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSSD24_HH_LL : AE_MULSSD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulssd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSSD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSD24_HH_LL_S2 : AE_MULSSD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulssd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSSD24_HL_LH : AE_MULSSD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulssd24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSSD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSD24_HL_LH_S2 : AE_MULSSD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssd24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSD32X16_H1_L0 : AE_MULSSD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSD32X16_H1_L0_S2 : AE_MULSSD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSD32X16_H3_L2 : AE_MULSSD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSD32X16_H3_L2_S2 : AE_MULSSD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD16SS_11_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssfd16ss.11_00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSFD16SS_11_00 : AE_MULSSFD16SS_11_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssfd16ss_11_00 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSFD16SS_11_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd16ss.11_00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD16SS_11_00_S2 : AE_MULSSFD16SS_11_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd16ss_11_00_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD16SS_13_02_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssfd16ss.13_02 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSFD16SS_13_02 : AE_MULSSFD16SS_13_02_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssfd16ss_13_02 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSFD16SS_13_02_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd16ss.13_02_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD16SS_13_02_S2 : AE_MULSSFD16SS_13_02_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd16ss_13_02_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD16SS_33_22_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssfd16ss.33_22 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSFD16SS_33_22 : AE_MULSSFD16SS_33_22_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssfd16ss_33_22 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSFD16SS_33_22_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd16ss.33_22_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD16SS_33_22_S2 : AE_MULSSFD16SS_33_22_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd16ss_33_22_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulssfd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSSFD24_HH_LL : AE_MULSSFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulssfd24_hh_ll AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSSFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD24_HH_LL_S2 : AE_MULSSFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd24_hh_ll_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0_out), (ins AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulssfd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; +let Constraints = "$ae_mul_q0 = $ae_mul_q0_out,@earlyclobber $ae_mul_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULSSFD24_HL_LH : AE_MULSSFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0_out, (int_xtensa_ae_mulssfd24_hl_lh AE_DR:$ae_mul_q0, AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULSSFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD24_HL_LH_S2 : AE_MULSSFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd24_hl_lh_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssfd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSFD32X16_H1_L0 : AE_MULSSFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssfd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD32X16_H1_L0_S2 : AE_MULSSFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULSSFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulssfd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; +let Constraints = "$opnd_ae_sem_mul_x2_S1_q0 = $opnd_ae_sem_mul_x2_S1_q0_out,@earlyclobber $opnd_ae_sem_mul_x2_S1_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULSSFD32X16_H3_L2 : AE_MULSSFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0_out, (int_xtensa_ae_mulssfd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_q0, AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULSSFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0_out), (ins AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulssfd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; +let Constraints = "$ae_mul_S2_q0 = $ae_mul_S2_q0_out,@earlyclobber $ae_mul_S2_q0_out"; + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULSSFD32X16_H3_L2_S2 : AE_MULSSFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0_out, (int_xtensa_ae_mulssfd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_q0, AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzaad24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZAAD24_HH_LL : AE_MULZAAD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzaad24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZAAD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD24_HH_LL_S2 : AE_MULZAAD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzaad24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZAAD24_HL_LH : AE_MULZAAD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzaad24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZAAD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD24_HL_LH_S2 : AE_MULZAAD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD32X16_H0_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaad32x16.h0.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAD32X16_H0_L1 : AE_MULZAAD32X16_H0_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaad32x16_h0_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAD32X16_H0_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad32x16.h0.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD32X16_H0_L1_S2 : AE_MULZAAD32X16_H0_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad32x16_h0_l1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaad32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAD32X16_H1_L0 : AE_MULZAAD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaad32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD32X16_H1_L0_S2 : AE_MULZAAD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD32X16_H2_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaad32x16.h2.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAD32X16_H2_L3 : AE_MULZAAD32X16_H2_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaad32x16_h2_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAD32X16_H2_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad32x16.h2.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD32X16_H2_L3_S2 : AE_MULZAAD32X16_H2_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad32x16_h2_l3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaad32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAD32X16_H3_L2 : AE_MULZAAD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaad32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaad32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAD32X16_H3_L2_S2 : AE_MULZAAD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaad32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD16SS_11_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd16ss.11_00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD16SS_11_00 : AE_MULZAAFD16SS_11_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd16ss_11_00 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD16SS_11_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd16ss.11_00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD16SS_11_00_S2 : AE_MULZAAFD16SS_11_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd16ss_11_00_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD16SS_13_02_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd16ss.13_02 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD16SS_13_02 : AE_MULZAAFD16SS_13_02_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd16ss_13_02 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD16SS_13_02_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd16ss.13_02_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD16SS_13_02_S2 : AE_MULZAAFD16SS_13_02_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd16ss_13_02_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD16SS_33_22_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd16ss.33_22 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD16SS_33_22 : AE_MULZAAFD16SS_33_22_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd16ss_33_22 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD16SS_33_22_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd16ss.33_22_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD16SS_33_22_S2 : AE_MULZAAFD16SS_33_22_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd16ss_33_22_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzaafd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZAAFD24_HH_LL : AE_MULZAAFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzaafd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZAAFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD24_HH_LL_S2 : AE_MULZAAFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzaafd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{51} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZAAFD24_HL_LH : AE_MULZAAFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzaafd24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZAAFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD24_HL_LH_S2 : AE_MULZAAFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD32X16_H0_L1_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd32x16.h0.l1 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD32X16_H0_L1 : AE_MULZAAFD32X16_H0_L1_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd32x16_h0_l1 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD32X16_H0_L1_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd32x16.h0.l1_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD32X16_H0_L1_S2 : AE_MULZAAFD32X16_H0_L1_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd32x16_h0_l1_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD32X16_H1_L0 : AE_MULZAAFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD32X16_H1_L0_S2 : AE_MULZAAFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD32X16_H2_L3_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd32x16.h2.l3 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD32X16_H2_L3 : AE_MULZAAFD32X16_H2_L3_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd32x16_h2_l3 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD32X16_H2_L3_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd32x16.h2.l3_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD32X16_H2_L3_S2 : AE_MULZAAFD32X16_H2_L3_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd32x16_h2_l3_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZAAFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzaafd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZAAFD32X16_H3_L2 : AE_MULZAAFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzaafd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZAAFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzaafd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZAAFD32X16_H3_L2_S2 : AE_MULZAAFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzaafd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzasd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZASD24_HH_LL : AE_MULZASD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzasd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZASD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASD24_HH_LL_S2 : AE_MULZASD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzasd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZASD24_HL_LH : AE_MULZASD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzasd24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZASD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASD24_HL_LH_S2 : AE_MULZASD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasd24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzasd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZASD32X16_H1_L0 : AE_MULZASD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzasd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZASD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASD32X16_H1_L0_S2 : AE_MULZASD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzasd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZASD32X16_H3_L2 : AE_MULZASD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzasd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZASD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASD32X16_H3_L2_S2 : AE_MULZASD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzasfd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZASFD24_HH_LL : AE_MULZASFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzasfd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZASFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasfd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASFD24_HH_LL_S2 : AE_MULZASFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasfd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzasfd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZASFD24_HL_LH : AE_MULZASFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzasfd24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZASFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasfd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASFD24_HL_LH_S2 : AE_MULZASFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasfd24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzasfd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZASFD32X16_H1_L0 : AE_MULZASFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzasfd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZASFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasfd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASFD32X16_H1_L0_S2 : AE_MULZASFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasfd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZASFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzasfd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZASFD32X16_H3_L2 : AE_MULZASFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzasfd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZASFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzasfd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZASFD32X16_H3_L2_S2 : AE_MULZASFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzasfd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzsad24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSAD24_HH_LL : AE_MULZSAD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzsad24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSAD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsad24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAD24_HH_LL_S2 : AE_MULZSAD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsad24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAD32X16_H1_L0_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzsad32x16.h1.l0 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{39} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSAD32X16_H1_L0 : AE_MULZSAD32X16_H1_L0_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzsad32x16_h1_l0 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSAD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsad32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAD32X16_H1_L0_S2 : AE_MULZSAD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsad32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAD32X16_H3_L2_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzsad32x16.h3.l2 $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{23} = 1; +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSAD32X16_H3_L2 : AE_MULZSAD32X16_H3_L2_AE_FORMAT2<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzsad32x16_h3_l2 AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSAD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsad32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAD32X16_H3_L2_S2 : AE_MULZSAD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsad32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzsafd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSAFD24_HH_LL : AE_MULZSAFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzsafd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSAFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsafd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAFD24_HH_LL_S2 : AE_MULZSAFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsafd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzsafd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSAFD32X16_H1_L0 : AE_MULZSAFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzsafd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSAFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsafd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAFD32X16_H1_L0_S2 : AE_MULZSAFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsafd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSAFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzsafd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSAFD32X16_H3_L2 : AE_MULZSAFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzsafd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSAFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzsafd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSAFD32X16_H3_L2_S2 : AE_MULZSAFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzsafd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzssd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSSD24_HH_LL : AE_MULZSSD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzssd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSSD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSD24_HH_LL_S2 : AE_MULZSSD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzssd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSSD24_HL_LH : AE_MULZSSD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzssd24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSSD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSD24_HL_LH_S2 : AE_MULZSSD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssd24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSD32X16_H1_L0 : AE_MULZSSD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSD32X16_H1_L0_S2 : AE_MULZSSD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSD32X16_H3_L2 : AE_MULZSSD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSD32X16_H3_L2_S2 : AE_MULZSSD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD16SS_11_00_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssfd16ss.11_00 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSFD16SS_11_00 : AE_MULZSSFD16SS_11_00_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssfd16ss_11_00 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSFD16SS_11_00_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd16ss.11_00_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD16SS_11_00_S2 : AE_MULZSSFD16SS_11_00_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd16ss_11_00_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD16SS_13_02_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssfd16ss.13_02 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{40} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSFD16SS_13_02 : AE_MULZSSFD16SS_13_02_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssfd16ss_13_02 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSFD16SS_13_02_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd16ss.13_02_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD16SS_13_02_S2 : AE_MULZSSFD16SS_13_02_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd16ss_13_02_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD16SS_33_22_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssfd16ss.33_22 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSFD16SS_33_22 : AE_MULZSSFD16SS_33_22_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssfd16ss_33_22 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSFD16SS_33_22_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd16ss.33_22_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD16SS_33_22_S2 : AE_MULZSSFD16SS_33_22_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd16ss_33_22_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD24_HH_LL_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzssfd24.hh.ll $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSSFD24_HH_LL : AE_MULZSSFD24_HH_LL_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzssfd24_hh_ll AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSSFD24_HH_LL_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd24.hh.ll_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD24_HH_LL_S2 : AE_MULZSSFD24_HH_LL_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd24_hh_ll_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD24_HL_LH_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_q0), (ins AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1), "ae_mulzssfd24.hl.lh $ae_mul_q0, $ae_mul_d0, $ae_mul_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_q0; +bits<4> ae_mul_d0; +bits<4> ae_mul_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_mul_q0{0}; +let Inst{17} = ae_mul_q0{1}; +let Inst{18} = ae_mul_q0{2}; +let Inst{19} = ae_mul_q0{3}; +let Inst{24} = ae_mul_d0{0}; +let Inst{25} = ae_mul_d0{1}; +let Inst{26} = ae_mul_d0{2}; +let Inst{27} = ae_mul_d0{3}; +let Inst{32} = ae_mul_d1{0}; +let Inst{33} = ae_mul_d1{1}; +let Inst{34} = ae_mul_d1{2}; +let Inst{35} = ae_mul_d1{3}; +} + + + +def AE_MULZSSFD24_HL_LH : AE_MULZSSFD24_HL_LH_AE_FORMAT<[(set AE_DR:$ae_mul_q0, (int_xtensa_ae_mulzssfd24_hl_lh AE_DR:$ae_mul_d0, AE_DR:$ae_mul_d1))]>; + +class AE_MULZSSFD24_HL_LH_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd24.hl.lh_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD24_HL_LH_S2 : AE_MULZSSFD24_HL_LH_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd24_hl_lh_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD32X16_H1_L0_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssfd32x16.h1.l0 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{39} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSFD32X16_H1_L0 : AE_MULZSSFD32X16_H1_L0_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssfd32x16_h1_l0 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSFD32X16_H1_L0_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd32x16.h1.l0_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD32X16_H1_L0_S2 : AE_MULZSSFD32X16_H1_L0_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd32x16_h1_l0_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_MULZSSFD32X16_H3_L2_AE_FORMAT48_3 pattern> + : XtensaInst48<(outs AE_DR:$opnd_ae_sem_mul_x2_S1_q0), (ins AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1), "ae_mulzssfd32x16.h3.l2 $opnd_ae_sem_mul_x2_S1_q0, $opnd_ae_sem_mul_x2_S1_d0, $opnd_ae_sem_mul_x2_S1_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> opnd_ae_sem_mul_x2_S1_q0; +bits<4> opnd_ae_sem_mul_x2_S1_d0; +bits<4> opnd_ae_sem_mul_x2_S1_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{42} = 1; +let Inst{44} = 1; +let Inst{45} = 1; +let Inst{46} = 1; +//operands +let Inst{16} = opnd_ae_sem_mul_x2_S1_q0{0}; +let Inst{17} = opnd_ae_sem_mul_x2_S1_q0{1}; +let Inst{18} = opnd_ae_sem_mul_x2_S1_q0{2}; +let Inst{19} = opnd_ae_sem_mul_x2_S1_q0{3}; +let Inst{20} = opnd_ae_sem_mul_x2_S1_d0{0}; +let Inst{21} = opnd_ae_sem_mul_x2_S1_d0{1}; +let Inst{22} = opnd_ae_sem_mul_x2_S1_d0{2}; +let Inst{23} = opnd_ae_sem_mul_x2_S1_d0{3}; +let Inst{24} = opnd_ae_sem_mul_x2_S1_d1{0}; +let Inst{25} = opnd_ae_sem_mul_x2_S1_d1{1}; +let Inst{26} = opnd_ae_sem_mul_x2_S1_d1{2}; +let Inst{27} = opnd_ae_sem_mul_x2_S1_d1{3}; +} + + + +def AE_MULZSSFD32X16_H3_L2 : AE_MULZSSFD32X16_H3_L2_AE_FORMAT48_3<[(set AE_DR:$opnd_ae_sem_mul_x2_S1_q0, (int_xtensa_ae_mulzssfd32x16_h3_l2 AE_DR:$opnd_ae_sem_mul_x2_S1_d0, AE_DR:$opnd_ae_sem_mul_x2_S1_d1))]>; + +class AE_MULZSSFD32X16_H3_L2_S2_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_mul_S2_q0), (ins AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1), "ae_mulzssfd32x16.h3.l2_s2 $ae_mul_S2_q0, $ae_mul_S2_d0, $ae_mul_S2_d1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_mul_S2_q0; +bits<4> ae_mul_S2_d0; +bits<4> ae_mul_S2_d1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_mul_S2_q0{0}; +let Inst{21} = ae_mul_S2_q0{1}; +let Inst{22} = ae_mul_S2_q0{2}; +let Inst{23} = ae_mul_S2_q0{3}; +let Inst{36} = ae_mul_S2_d0{0}; +let Inst{37} = ae_mul_S2_d0{1}; +let Inst{38} = ae_mul_S2_d0{2}; +let Inst{39} = ae_mul_S2_d0{3}; +let Inst{28} = ae_mul_S2_d1{0}; +let Inst{29} = ae_mul_S2_d1{1}; +let Inst{30} = ae_mul_S2_d1{2}; +let Inst{31} = ae_mul_S2_d1{3}; +} + + + +def AE_MULZSSFD32X16_H3_L2_S2 : AE_MULZSSFD32X16_H3_L2_S2_AE_FORMAT<[(set AE_DR:$ae_mul_S2_q0, (int_xtensa_ae_mulzssfd32x16_h3_l2_s2 AE_DR:$ae_mul_S2_d0, AE_DR:$ae_mul_S2_d1))]>; + +class AE_NAND_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1), "ae_nand $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_dr_to_dr_v{0}; +let Inst{21} = ae_dr_to_dr_v{1}; +let Inst{22} = ae_dr_to_dr_v{2}; +let Inst{23} = ae_dr_to_dr_v{3}; +let Inst{36} = ae_dr_to_dr_v0{0}; +let Inst{37} = ae_dr_to_dr_v0{1}; +let Inst{38} = ae_dr_to_dr_v0{2}; +let Inst{39} = ae_dr_to_dr_v0{3}; +let Inst{28} = ae_dr_to_dr_v1{0}; +let Inst{29} = ae_dr_to_dr_v1{1}; +let Inst{30} = ae_dr_to_dr_v1{2}; +let Inst{31} = ae_dr_to_dr_v1{3}; +} + + + +def AE_NAND : AE_NAND_AE_FORMAT1<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_nand AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1))]>; + +class AE_NEG16S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg16s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_NEG16S : AE_NEG16S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg16s AE_DR:$ae_arth_v1))]>; + +class AE_NEG24S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg24s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{27} = 1; +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_NEG24S : AE_NEG24S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg24s AE_DR:$ae_arth_v1))]>; + +class AE_NEG32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg32 $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{24} = 1; +let Inst{27} = 1; +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_NEG32 : AE_NEG32_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg32 AE_DR:$ae_arth_v1))]>; + +class AE_NEG32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg32s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_NEG32S : AE_NEG32S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg32s AE_DR:$ae_arth_v1))]>; + +class AE_NEG64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg64 $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{10} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_NEG64 : AE_NEG64_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg64 AE_DR:$ae_arth_v1))]>; + +class AE_NEG64S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_neg64s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{26} = 1; +let Inst{27} = 1; +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{51} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_NEG64S : AE_NEG64S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_neg64s AE_DR:$ae_arth_v1))]>; + +class AE_NSA64_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_nsa64 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_NSA64 : AE_NSA64_X24<[(set AR:$arr, (int_xtensa_ae_nsa64 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_NSAZ16_0_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_nsaz16.0 $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_NSAZ16_0 : AE_NSAZ16_0_X24<[(set AR:$arr, (int_xtensa_ae_nsaz16_0 AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_NSAZ32_L_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AE_DR:$ae_dr_to_ar_v0), "ae_nsaz32.l $arr, $ae_dr_to_ar_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ae_dr_to_ar_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ae_dr_to_ar_v0{0}; +let Inst{9} = ae_dr_to_ar_v0{1}; +let Inst{10} = ae_dr_to_ar_v0{2}; +let Inst{11} = ae_dr_to_ar_v0{3}; +} + + + +def AE_NSAZ32_L : AE_NSAZ32_L_X24<[(set AR:$arr, (int_xtensa_ae_nsaz32_l AE_DR:$ae_dr_to_ar_v0))]>; + +class AE_OR_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1), "ae_or $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +let Inst{4} = ae_dr_to_dr_v1{0}; +let Inst{5} = ae_dr_to_dr_v1{1}; +let Inst{6} = ae_dr_to_dr_v1{2}; +let Inst{7} = ae_dr_to_dr_v1{3}; +} + + + +def AE_OR : AE_OR_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_or AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1))]>; + +class AE_PKSR24_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_pks_d_out), (ins AE_DR:$ae_pks_d, AE_DR:$ae_pks_s, uimm2:$ae_imm2), "ae_pksr24 $ae_pks_d, $ae_pks_s, $ae_imm2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_pks_d; +bits<4> ae_pks_s; +bits<2> ae_imm2; +let Constraints = "$ae_pks_d = $ae_pks_d_out,@earlyclobber $ae_pks_d_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_pks_d{0}; +let Inst{13} = ae_pks_d{1}; +let Inst{14} = ae_pks_d{2}; +let Inst{15} = ae_pks_d{3}; +let Inst{4} = ae_pks_s{0}; +let Inst{5} = ae_pks_s{1}; +let Inst{6} = ae_pks_s{2}; +let Inst{7} = ae_pks_s{3}; +let Inst{10} = ae_imm2{0}; +let Inst{11} = ae_imm2{1}; +} + + + +def AE_PKSR24 : AE_PKSR24_X24<[(set AE_DR:$ae_pks_d_out, (int_xtensa_ae_pksr24 AE_DR:$ae_pks_d, AE_DR:$ae_pks_s, timm:$ae_imm2))]>; + +class AE_PKSR32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_pks_d_out), (ins AE_DR:$ae_pks_d, AE_DR:$ae_pks_s, uimm2:$ae_imm2), "ae_pksr32 $ae_pks_d, $ae_pks_s, $ae_imm2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_pks_d; +bits<4> ae_pks_s; +bits<2> ae_imm2; +let Constraints = "$ae_pks_d = $ae_pks_d_out,@earlyclobber $ae_pks_d_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_pks_d{0}; +let Inst{13} = ae_pks_d{1}; +let Inst{14} = ae_pks_d{2}; +let Inst{15} = ae_pks_d{3}; +let Inst{4} = ae_pks_s{0}; +let Inst{5} = ae_pks_s{1}; +let Inst{6} = ae_pks_s{2}; +let Inst{7} = ae_pks_s{3}; +let Inst{10} = ae_imm2{0}; +let Inst{11} = ae_imm2{1}; +} + + + +def AE_PKSR32 : AE_PKSR32_X24<[(set AE_DR:$ae_pks_d_out, (int_xtensa_ae_pksr32 AE_DR:$ae_pks_d, AE_DR:$ae_pks_s, timm:$ae_imm2))]>; + +class AE_ROUND16X4F32SASYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1, AE_DR:$ae_arth_v0), "ae_round16x4f32sasym $ae_arth_v, $ae_arth_v1, $ae_arth_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; +bits<4> ae_arth_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +} + + + +def AE_ROUND16X4F32SASYM : AE_ROUND16X4F32SASYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round16x4f32sasym AE_DR:$ae_arth_v1, AE_DR:$ae_arth_v0))]>; + +class AE_ROUND16X4F32SSYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1, AE_DR:$ae_arth_v0), "ae_round16x4f32ssym $ae_arth_v, $ae_arth_v1, $ae_arth_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; +bits<4> ae_arth_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +} + + + +def AE_ROUND16X4F32SSYM : AE_ROUND16X4F32SSYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round16x4f32ssym AE_DR:$ae_arth_v1, AE_DR:$ae_arth_v0))]>; + +class AE_ROUND24X2F48SASYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round24x2f48sasym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUND24X2F48SASYM : AE_ROUND24X2F48SASYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round24x2f48sasym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUND24X2F48SSYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round24x2f48ssym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUND24X2F48SSYM : AE_ROUND24X2F48SSYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round24x2f48ssym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUND32X2F48SASYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round32x2f48sasym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{59} = 1; +let Inst{60} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_ROUND32X2F48SASYM : AE_ROUND32X2F48SASYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round32x2f48sasym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUND32X2F48SSYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round32x2f48ssym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{61} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_ROUND32X2F48SSYM : AE_ROUND32X2F48SSYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round32x2f48ssym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUND32X2F64SASYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round32x2f64sasym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUND32X2F64SASYM : AE_ROUND32X2F64SASYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round32x2f64sasym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUND32X2F64SSYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_round32x2f64ssym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{57} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_ROUND32X2F64SSYM : AE_ROUND32X2F64SSYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_round32x2f64ssym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUNDSP16F24ASYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0), "ae_roundsp16f24asym $ae_arth_v, $ae_arth_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +} + + + +def AE_ROUNDSP16F24ASYM : AE_ROUNDSP16F24ASYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsp16f24asym AE_DR:$ae_arth_v0))]>; + +class AE_ROUNDSP16F24SYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0), "ae_roundsp16f24sym $ae_arth_v, $ae_arth_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{36} = 1; +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +} + + + +def AE_ROUNDSP16F24SYM : AE_ROUNDSP16F24SYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsp16f24sym AE_DR:$ae_arth_v0))]>; + +class AE_ROUNDSP16Q48X2ASYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_roundsp16q48x2asym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUNDSP16Q48X2ASYM : AE_ROUNDSP16Q48X2ASYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsp16q48x2asym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUNDSP16Q48X2SYM_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_roundsp16q48x2sym $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{58} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_ROUNDSP16Q48X2SYM : AE_ROUNDSP16Q48X2SYM_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsp16q48x2sym AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_ROUNDSQ32F48ASYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_roundsq32f48asym $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUNDSQ32F48ASYM : AE_ROUNDSQ32F48ASYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsq32f48asym AE_DR:$ae_arth_v1))]>; + +class AE_ROUNDSQ32F48SYM_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_roundsq32f48sym $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_ROUNDSQ32F48SYM : AE_ROUNDSQ32F48SYM_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_roundsq32f48sym AE_DR:$ae_arth_v1))]>; + +class AE_S16_0_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm16n_14:$ae_immls16), "ae_s16.0.i $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_S16_0_I : AE_S16_0_I_X24<[(int_xtensa_ae_s16_0_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls16)]>; + +class AE_S16_0_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm16n_14:$ae_immls16), "ae_s16.0.ip $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_S16_0_IP : AE_S16_0_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s16_0_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls16))]>; + +class AE_S16_0_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16.0.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16_0_X : AE_S16_0_X_AE_FORMAT48<[(int_xtensa_ae_s16_0_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S16_0_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16.0.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16_0_XC : AE_S16_0_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16_0_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16_0_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16.0.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16_0_XP : AE_S16_0_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s16_0_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16M_L_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm16n_14:$ae_immls16), "ae_s16m.l.i $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_S16M_L_I : AE_S16M_L_I_X24<[(int_xtensa_ae_s16m_l_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls16)]>; + +class AE_S16M_L_IU_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm16n_14:$ae_immls16), "ae_s16m.l.iu $ae_ls_v, $ars, $ae_immls16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls16; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls16{0}; +let Inst{5} = ae_immls16{1}; +let Inst{6} = ae_immls16{2}; +let Inst{7} = ae_immls16{3}; +} + + + +def AE_S16M_L_IU : AE_S16M_L_IU_X24<[(set AR:$ars_out, (int_xtensa_ae_s16m_l_iu AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls16))]>; + +class AE_S16M_L_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16m.l.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16M_L_X : AE_S16M_L_X_X24<[(int_xtensa_ae_s16m_l_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S16M_L_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16m.l.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16M_L_XC : AE_S16M_L_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16m_l_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16M_L_XU_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16m.l.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16M_L_XU : AE_S16M_L_XU_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16m_l_xu AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16X2M_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s16x2m.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S16X2M_I : AE_S16X2M_I_X24<[(int_xtensa_ae_s16x2m_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32)]>; + +class AE_S16X2M_IU_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s16x2m.iu $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S16X2M_IU : AE_S16X2M_IU_X24<[(set AR:$ars_out, (int_xtensa_ae_s16x2m_iu AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32))]>; + +class AE_S16X2M_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x2m.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X2M_X : AE_S16X2M_X_X24<[(int_xtensa_ae_s16x2m_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S16X2M_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x2m.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X2M_XC : AE_S16X2M_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16x2m_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16X2M_XU_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x2m.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X2M_XU : AE_S16X2M_XU_X24<[(set AR:$ars_out, (int_xtensa_ae_s16x2m_xu AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16X4_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm64n_56:$ae_immls64), "ae_s16x4.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_S16X4_I : AE_S16X4_I_X24<[(int_xtensa_ae_s16x4_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64)]>; + +class AE_S16X4_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm0_56:$ae_immls64pos), "ae_s16x4.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_S16X4_IP : AE_S16X4_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s16x4_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64pos))]>; + +class AE_S16X4_RIC_AE_FORMAT pattern> + : XtensaInst64<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s16x4.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S16X4_RIC : AE_S16X4_RIC_AE_FORMAT<[(set AR:$ars_out, (int_xtensa_ae_s16x4_ric AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S16X4_RIP_AE_FORMAT pattern> + : XtensaInst64<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s16x4.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S16X4_RIP : AE_S16X4_RIP_AE_FORMAT<[(set AR:$ars_out, (int_xtensa_ae_s16x4_rip AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S16X4_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x4.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{35} = 1; +let Inst{36} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X4_X : AE_S16X4_X_AE_FORMAT48<[(int_xtensa_ae_s16x4_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S16X4_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x4.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X4_XC : AE_S16X4_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16x4_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S16X4_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s16x4.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S16X4_XP : AE_S16X4_XP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s16x4_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S24RA64S_I_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v1, AR:$ars, imm32n_28:$ae_immls32), "ae_s24ra64s.i $ae_ls_v1, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S24RA64S_I : AE_S24RA64S_I_AE_FORMAT48<[(int_xtensa_ae_s24ra64s_i AE_DR:$ae_ls_v1, AR:$ars, timm:$ae_immls32)]>; + +class AE_S24RA64S_IP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, imm32n_28:$ae_immls32), "ae_s24ra64s.ip $ae_ls_v1, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S24RA64S_IP : AE_S24RA64S_IP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s24ra64s_ip AE_DR:$ae_ls_v1, AR:$ars, timm:$ae_immls32))]>; + +class AE_S24RA64S_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s24ra64s.x $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S24RA64S_X : AE_S24RA64S_X_AE_FORMAT48<[(int_xtensa_ae_s24ra64s_x AE_DR:$ae_ls_v1, AR:$ars, AR:$art)]>; + +class AE_S24RA64S_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s24ra64s.xc $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S24RA64S_XC : AE_S24RA64S_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s24ra64s_xc AE_DR:$ae_ls_v1, AR:$ars, AR:$art))]>; + +class AE_S24RA64S_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s24ra64s.xp $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S24RA64S_XP : AE_S24RA64S_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s24ra64s_xp AE_DR:$ae_ls_v1, AR:$ars, AR:$art))]>; + +class AE_S24X2RA64S_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v2, AE_DR:$ae_ls_v1, AR:$ars), "ae_s24x2ra64s.ip $ae_ls_v2, $ae_ls_v1, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v2; +bits<4> ae_ls_v1; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +//operands +let Inst{4} = ae_ls_v2{0}; +let Inst{5} = ae_ls_v2{1}; +let Inst{6} = ae_ls_v2{2}; +let Inst{7} = ae_ls_v2{3}; +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S24X2RA64S_IP : AE_S24X2RA64S_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s24x2ra64s_ip AE_DR:$ae_ls_v2, AE_DR:$ae_ls_v1, AR:$ars))]>; + +class AE_S32_L_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32.l.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32_L_I : AE_S32_L_I_X24<[(int_xtensa_ae_s32_l_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32)]>; + +class AE_S32_L_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32.l.ip $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32_L_IP : AE_S32_L_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32_l_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32))]>; + +class AE_S32_L_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32.l.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32_L_X : AE_S32_L_X_X24<[(int_xtensa_ae_s32_l_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S32_L_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32.l.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32_L_XC : AE_S32_L_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32_l_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32_L_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32.l.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32_L_XP : AE_S32_L_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32_l_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32F24_L_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32f24.l.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32F24_L_I : AE_S32F24_L_I_X24<[(int_xtensa_ae_s32f24_l_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32)]>; + +class AE_S32F24_L_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32f24.l.ip $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32F24_L_IP : AE_S32F24_L_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32f24_l_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32))]>; + +class AE_S32F24_L_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32f24.l.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32F24_L_X : AE_S32F24_L_X_AE_FORMAT48<[(int_xtensa_ae_s32f24_l_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S32F24_L_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32f24.l.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32F24_L_XC : AE_S32F24_L_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32f24_l_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32F24_L_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32f24.l.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32F24_L_XP : AE_S32F24_L_XP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32f24_l_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32M_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32m.i $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32M_I : AE_S32M_I_X24<[(int_xtensa_ae_s32m_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32)]>; + +class AE_S32M_IU_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm32n_28:$ae_immls32), "ae_s32m.iu $ae_ls_v, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32M_IU : AE_S32M_IU_X24<[(set AR:$ars_out, (int_xtensa_ae_s32m_iu AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls32))]>; + +class AE_S32M_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32m.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32M_X : AE_S32M_X_X24<[(int_xtensa_ae_s32m_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S32M_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32m.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32M_XC : AE_S32M_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32m_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32M_XU_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32m.xu $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32M_XU : AE_S32M_XU_X24<[(set AR:$ars_out, (int_xtensa_ae_s32m_xu AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32RA64S_I_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v1, AR:$ars, imm32n_28:$ae_immls32), "ae_s32ra64s.i $ae_ls_v1, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> ae_immls32; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32RA64S_I : AE_S32RA64S_I_AE_FORMAT48<[(int_xtensa_ae_s32ra64s_i AE_DR:$ae_ls_v1, AR:$ars, timm:$ae_immls32)]>; + +class AE_S32RA64S_IP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, imm32n_28:$ae_immls32), "ae_s32ra64s.ip $ae_ls_v1, $ars, $ae_immls32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> ae_immls32; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{33} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls32{0}; +let Inst{5} = ae_immls32{1}; +let Inst{6} = ae_immls32{2}; +let Inst{7} = ae_immls32{3}; +} + + + +def AE_S32RA64S_IP : AE_S32RA64S_IP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32ra64s_ip AE_DR:$ae_ls_v1, AR:$ars, timm:$ae_immls32))]>; + +class AE_S32RA64S_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s32ra64s.x $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32RA64S_X : AE_S32RA64S_X_AE_FORMAT48<[(int_xtensa_ae_s32ra64s_x AE_DR:$ae_ls_v1, AR:$ars, AR:$art)]>; + +class AE_S32RA64S_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s32ra64s.xc $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32RA64S_XC : AE_S32RA64S_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32ra64s_xc AE_DR:$ae_ls_v1, AR:$ars, AR:$art))]>; + +class AE_S32RA64S_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v1, AR:$ars, AR:$art), "ae_s32ra64s.xp $ae_ls_v1, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v1; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +//operands +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32RA64S_XP : AE_S32RA64S_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32ra64s_xp AE_DR:$ae_ls_v1, AR:$ars, AR:$art))]>; + +class AE_S32X2_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm64n_56:$ae_immls64), "ae_s32x2.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_S32X2_I : AE_S32X2_I_X24<[(int_xtensa_ae_s32x2_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64)]>; + +class AE_S32X2_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm0_56:$ae_immls64pos), "ae_s32x2.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_S32X2_IP : AE_S32X2_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64pos))]>; + +class AE_S32X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s32x2.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S32X2_RIC : AE_S32X2_RIC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32x2_ric AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S32X2_RIP_AE_FORMAT pattern> + : XtensaInst64<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s32x2.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S32X2_RIP : AE_S32X2_RIP_AE_FORMAT<[(set AR:$ars_out, (int_xtensa_ae_s32x2_rip AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S32X2_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2_X : AE_S32X2_X_X24<[(int_xtensa_ae_s32x2_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S32X2_XC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2_XC : AE_S32X2_XC_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32X2_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2_XP : AE_S32X2_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32X2F24_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm64n_56:$ae_immls64), "ae_s32x2f24.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_S32X2F24_I : AE_S32X2F24_I_X24<[(int_xtensa_ae_s32x2f24_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64)]>; + +class AE_S32X2F24_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm0_56:$ae_immls64pos), "ae_s32x2f24.ip $ae_ls_v, $ars, $ae_immls64pos", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<3> ae_immls64pos; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64pos{0}; +let Inst{5} = ae_immls64pos{1}; +let Inst{6} = ae_immls64pos{2}; +} + + + +def AE_S32X2F24_IP : AE_S32X2F24_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2f24_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64pos))]>; + +class AE_S32X2F24_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s32x2f24.ric $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S32X2F24_RIC : AE_S32X2F24_RIC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32x2f24_ric AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S32X2F24_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars), "ae_s32x2f24.rip $ae_ls_v, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{7} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S32X2F24_RIP : AE_S32X2F24_RIP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s32x2f24_rip AE_DR:$ae_ls_v, AR:$ars))]>; + +class AE_S32X2F24_X_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2f24.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2F24_X : AE_S32X2F24_X_X24<[(int_xtensa_ae_s32x2f24_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S32X2F24_XC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2f24.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2F24_XC : AE_S32X2F24_XC_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2f24_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32X2F24_XP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s32x2f24.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S32X2F24_XP : AE_S32X2F24_XP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2f24_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S32X2RA64S_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v2, AE_DR:$ae_ls_v1, AR:$ars), "ae_s32x2ra64s.ip $ae_ls_v2, $ae_ls_v1, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v2; +bits<4> ae_ls_v1; +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +//operands +let Inst{4} = ae_ls_v2{0}; +let Inst{5} = ae_ls_v2{1}; +let Inst{6} = ae_ls_v2{2}; +let Inst{7} = ae_ls_v2{3}; +let Inst{12} = ae_ls_v1{0}; +let Inst{13} = ae_ls_v1{1}; +let Inst{14} = ae_ls_v1{2}; +let Inst{15} = ae_ls_v1{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_S32X2RA64S_IP : AE_S32X2RA64S_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s32x2ra64s_ip AE_DR:$ae_ls_v2, AE_DR:$ae_ls_v1, AR:$ars))]>; + +class AE_S64_I_X24 pattern> + : XtensaAEInst24<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, imm64n_56:$ae_immls64), "ae_s64.i $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_S64_I : AE_S64_I_X24<[(int_xtensa_ae_s64_i AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64)]>; + +class AE_S64_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, imm64n_56:$ae_immls64), "ae_s64.ip $ae_ls_v, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> ae_immls64; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{6} = ae_immls64{2}; +let Inst{7} = ae_immls64{3}; +} + + + +def AE_S64_IP : AE_S64_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_s64_ip AE_DR:$ae_ls_v, AR:$ars, timm:$ae_immls64))]>; + +class AE_S64_X_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s64.x $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S64_X : AE_S64_X_AE_FORMAT48<[(int_xtensa_ae_s64_x AE_DR:$ae_ls_v, AR:$ars, AR:$art)]>; + +class AE_S64_XC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s64.xc $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S64_XC : AE_S64_XC_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s64_xc AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_S64_XP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AR:$ars_out), (ins AE_DR:$ae_ls_v, AR:$ars, AR:$art), "ae_s64.xp $ae_ls_v, $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_S64_XP : AE_S64_XP_AE_FORMAT48<[(set AR:$ars_out, (int_xtensa_ae_s64_xp AE_DR:$ae_ls_v, AR:$ars, AR:$art))]>; + +class AE_SA16X4_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa16x4.ic $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA16X4_IC : AE_SA16X4_IC_AE_FORMAT48<[]>; + +class AE_SA16X4_IP_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa16x4.ip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{4} = ae_ls_su{0}; +let Inst{5} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA16X4_IP : AE_SA16X4_IP_X24<[]>; + +class AE_SA16X4_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa16x4.ric $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA16X4_RIC : AE_SA16X4_RIC_AE_FORMAT48<[]>; + +class AE_SA16X4_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa16x4.rip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA16X4_RIP : AE_SA16X4_RIP_AE_FORMAT48<[]>; + +class AE_SA24_L_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24.l.ic $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24_L_IC : AE_SA24_L_IC_AE_FORMAT48<[]>; + +class AE_SA24_L_IP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24.l.ip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24_L_IP : AE_SA24_L_IP_AE_FORMAT48<[]>; + +class AE_SA24_L_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24.l.ric $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24_L_RIC : AE_SA24_L_RIC_AE_FORMAT48<[]>; + +class AE_SA24_L_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24.l.rip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24_L_RIP : AE_SA24_L_RIP_AE_FORMAT48<[]>; + +class AE_SA24X2_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24x2.ic $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24X2_IC : AE_SA24X2_IC_AE_FORMAT48<[]>; + +class AE_SA24X2_IP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24x2.ip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24X2_IP : AE_SA24X2_IP_AE_FORMAT48<[]>; + +class AE_SA24X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24x2.ric $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24X2_RIC : AE_SA24X2_RIC_AE_FORMAT48<[]>; + +class AE_SA24X2_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa24x2.rip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA24X2_RIP : AE_SA24X2_RIP_AE_FORMAT48<[]>; + +class AE_SA32X2_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2.ic $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{28} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2_IC : AE_SA32X2_IC_AE_FORMAT48<[]>; + +class AE_SA32X2_IP_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2.ip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{4} = ae_ls_su{0}; +let Inst{5} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2_IP : AE_SA32X2_IP_X24<[]>; + +class AE_SA32X2_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2.ric $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2_RIC : AE_SA32X2_RIC_AE_FORMAT48<[]>; + +class AE_SA32X2_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2.rip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{30} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2_RIP : AE_SA32X2_RIP_AE_FORMAT48<[]>; + +class AE_SA32X2F24_IC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2f24.ic $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2F24_IC : AE_SA32X2F24_IC_AE_FORMAT48<[]>; + +class AE_SA32X2F24_IP_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2f24.ip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{4} = ae_ls_su{0}; +let Inst{5} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2F24_IP : AE_SA32X2F24_IP_X24<[]>; + +class AE_SA32X2F24_RIC_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2f24.ric $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{28} = 1; +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2F24_RIC : AE_SA32X2F24_RIC_AE_FORMAT48<[]>; + +class AE_SA32X2F24_RIP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out, AR:$ars_out), (ins AE_DR:$ae_ls_v, AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa32x2f24.rip $ae_ls_v, $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_ls_v; +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out, $ars = $ars_out,@earlyclobber $ars_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{4} = 1; +let Inst{32} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{12} = ae_ls_v{0}; +let Inst{13} = ae_ls_v{1}; +let Inst{14} = ae_ls_v{2}; +let Inst{15} = ae_ls_v{3}; +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA32X2F24_RIP : AE_SA32X2F24_RIP_AE_FORMAT48<[]>; + +class AE_SA64NEG_FP_AE_FORMAT48 pattern> + : XtensaInst48<(outs AE_VALIGN:$ae_ls_su_out), (ins AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa64neg.fp $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{5} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{15} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA64NEG_FP : AE_SA64NEG_FP_AE_FORMAT48<[(set AE_VALIGN:$ae_ls_su_out, (int_xtensa_ae_sa64neg_fp AE_VALIGN:$ae_ls_su, AR:$ars))]>; + +class AE_SA64POS_FP_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_ls_su_out), (ins AE_VALIGN:$ae_ls_su, AR:$ars), "ae_sa64pos.fp $ae_ls_su, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_su; +bits<4> ars; +let Constraints = "$ae_ls_su = $ae_ls_su_out,@earlyclobber $ae_ls_su_out"; + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = ae_ls_su{0}; +let Inst{5} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SA64POS_FP : AE_SA64POS_FP_X24<[(set AE_VALIGN:$ae_ls_su_out, (int_xtensa_ae_sa64pos_fp AE_VALIGN:$ae_ls_su, AR:$ars))]>; + +class AE_SALIGN64_I_AE_FORMAT48 pattern> + : XtensaInst48<(outs ), (ins AE_VALIGN:$ae_ls_su, AR:$ars, imm64n_56:$ae_immls64), "ae_salign64.i $ae_ls_su, $ars, $ae_immls64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_ls_su; +bits<4> ars; +bits<4> ae_immls64; + + +let mayStore = 1;let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//opcode +let Inst{14} = 1; +let Inst{30} = 1; +let Inst{33} = 1; +let Inst{34} = 1; +let Inst{37} = 1; +//operands +let Inst{6} = ae_ls_su{0}; +let Inst{7} = ae_ls_su{1}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = ae_immls64{0}; +let Inst{5} = ae_immls64{1}; +let Inst{28} = ae_immls64{2}; +let Inst{29} = ae_immls64{3}; +} + + + +def AE_SALIGN64_I : AE_SALIGN64_I_AE_FORMAT48<[(int_xtensa_ae_salign64_i AE_VALIGN:$ae_ls_su, AR:$ars, timm:$ae_immls64)]>; + +class AE_SAT16X4_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sat16x4 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{56} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{28} = ae_arth_v0{0}; +let Inst{29} = ae_arth_v0{1}; +let Inst{30} = ae_arth_v0{2}; +let Inst{31} = ae_arth_v0{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_SAT16X4 : AE_SAT16X4_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sat16x4 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SAT24S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_sat24s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{29} = 1; +let Inst{30} = 1; +let Inst{31} = 1; +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_SAT24S : AE_SAT24S_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sat24s AE_DR:$ae_arth_v1))]>; + +class AE_SAT48S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_sat48s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_SAT48S : AE_SAT48S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sat48s AE_DR:$ae_arth_v1))]>; + +class AE_SATQ56S_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v1), "ae_satq56s $ae_arth_v, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_arth_v{0}; +let Inst{21} = ae_arth_v{1}; +let Inst{22} = ae_arth_v{2}; +let Inst{23} = ae_arth_v{3}; +let Inst{36} = ae_arth_v1{0}; +let Inst{37} = ae_arth_v1{1}; +let Inst{38} = ae_arth_v1{2}; +let Inst{39} = ae_arth_v1{3}; +} + + + +def AE_SATQ56S : AE_SATQ56S_AE_FORMAT1<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_satq56s AE_DR:$ae_arth_v1))]>; + +class AE_SB_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_sb $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{13} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_SB : AE_SB_X24<[(set AR:$ars_out, (int_xtensa_ae_sb AR:$ars, AR:$art))]>; + +class AE_SB_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_sb.ic $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_SB_IC : AE_SB_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_sb_ic AR:$ars, AR:$art))]>; + +class AE_SB_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art), "ae_sb.ip $ars, $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_SB_IP : AE_SB_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_sb_ip AR:$ars, AR:$art))]>; + +class AE_SBF_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_sbf $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SBF : AE_SBF_X24<[(set AR:$ars_out, (int_xtensa_ae_sbf AR:$ars))]>; + +class AE_SBF_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_sbf.ic $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SBF_IC : AE_SBF_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_sbf_ic AR:$ars))]>; + +class AE_SBF_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_sbf.ip $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{6} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SBF_IP : AE_SBF_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_sbf_ip AR:$ars))]>; + +class AE_SBI_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art, imm1_16:$ae_ohba2), "ae_sbi $ars, $art, $ae_ohba2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +bits<4> ae_ohba2; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{12} = ae_ohba2{0}; +let Inst{13} = ae_ohba2{1}; +let Inst{14} = ae_ohba2{2}; +let Inst{15} = ae_ohba2{3}; +} + + + +def AE_SBI : AE_SBI_X24<[(set AR:$ars_out, (int_xtensa_ae_sbi AR:$ars, AR:$art, timm:$ae_ohba2))]>; + +class AE_SBI_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art, imm1_16:$ae_ohba2), "ae_sbi.ic $ars, $art, $ae_ohba2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +bits<4> ae_ohba2; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{12} = ae_ohba2{0}; +let Inst{13} = ae_ohba2{1}; +let Inst{14} = ae_ohba2{2}; +let Inst{15} = ae_ohba2{3}; +} + + + +def AE_SBI_IC : AE_SBI_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_sbi_ic AR:$ars, AR:$art, timm:$ae_ohba2))]>; + +class AE_SBI_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars, AR:$art, imm1_16:$ae_ohba2), "ae_sbi.ip $ars, $art, $ae_ohba2", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +bits<4> art; +bits<4> ae_ohba2; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{12} = ae_ohba2{0}; +let Inst{13} = ae_ohba2{1}; +let Inst{14} = ae_ohba2{2}; +let Inst{15} = ae_ohba2{3}; +} + + + +def AE_SBI_IP : AE_SBI_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_sbi_ip AR:$ars, AR:$art, timm:$ae_ohba2))]>; + +class AE_SEL16I_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1, uimm4:$ae_selimm), "ae_sel16i $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1, $ae_selimm", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; +bits<4> ae_selimm; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{21} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +let Inst{4} = ae_dr_to_dr_v1{0}; +let Inst{5} = ae_dr_to_dr_v1{1}; +let Inst{6} = ae_dr_to_dr_v1{2}; +let Inst{7} = ae_dr_to_dr_v1{3}; +let Inst{16} = ae_selimm{0}; +let Inst{17} = ae_selimm{1}; +let Inst{18} = ae_selimm{2}; +let Inst{19} = ae_selimm{3}; +} + + + +def AE_SEL16I : AE_SEL16I_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_sel16i AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1, timm:$ae_selimm))]>; + +class AE_SEL16I_N_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1, uimm2:$ae_selimm_N), "ae_sel16i.n $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1, $ae_selimm_N", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; +bits<2> ae_selimm_N; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode + +//operands +let Inst{16} = ae_dr_to_dr_v{0}; +let Inst{17} = ae_dr_to_dr_v{1}; +let Inst{18} = ae_dr_to_dr_v{2}; +let Inst{19} = ae_dr_to_dr_v{3}; +let Inst{24} = ae_dr_to_dr_v0{0}; +let Inst{25} = ae_dr_to_dr_v0{1}; +let Inst{26} = ae_dr_to_dr_v0{2}; +let Inst{27} = ae_dr_to_dr_v0{3}; +let Inst{32} = ae_dr_to_dr_v1{0}; +let Inst{33} = ae_dr_to_dr_v1{1}; +let Inst{34} = ae_dr_to_dr_v1{2}; +let Inst{35} = ae_dr_to_dr_v1{3}; +let Inst{48} = ae_selimm_N{0}; +let Inst{49} = ae_selimm_N{1}; +} + + + +def AE_SEL16I_N : AE_SEL16I_N_AE_FORMAT<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_sel16i_n AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1, timm:$ae_selimm_N))]>; + +class AE_SEXT32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, imm7_22:$ae_opnd_tp7), "ae_sext32 $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_opnd_tp7", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_opnd_tp7; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +let Inst{4} = ae_opnd_tp7{0}; +let Inst{5} = ae_opnd_tp7{1}; +let Inst{6} = ae_opnd_tp7{2}; +let Inst{7} = ae_opnd_tp7{3}; +} + + + +def AE_SEXT32 : AE_SEXT32_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_sext32 AE_DR:$ae_dr_to_dr_v0, timm:$ae_opnd_tp7))]>; + +class AE_SEXT32X2D16_10_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_sext32x2d16.10 $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{7} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{46} = 1; +let Inst{47} = 1; +//operands +let Inst{12} = ae_to_dr_v{0}; +let Inst{13} = ae_to_dr_v{1}; +let Inst{14} = ae_to_dr_v{2}; +let Inst{15} = ae_to_dr_v{3}; +let Inst{8} = ae_to_dr_v0{0}; +let Inst{9} = ae_to_dr_v0{1}; +let Inst{10} = ae_to_dr_v0{2}; +let Inst{11} = ae_to_dr_v0{3}; +} + + + +def AE_SEXT32X2D16_10 : AE_SEXT32X2D16_10_AE_FORMAT<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_sext32x2d16_10 AE_DR:$ae_to_dr_v0))]>; + +class AE_SEXT32X2D16_32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_sext32x2d16.32 $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_to_dr_v{0}; +let Inst{13} = ae_to_dr_v{1}; +let Inst{14} = ae_to_dr_v{2}; +let Inst{15} = ae_to_dr_v{3}; +let Inst{8} = ae_to_dr_v0{0}; +let Inst{9} = ae_to_dr_v0{1}; +let Inst{10} = ae_to_dr_v0{2}; +let Inst{11} = ae_to_dr_v0{3}; +} + + + +def AE_SEXT32X2D16_32 : AE_SEXT32X2D16_32_X24<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_sext32x2d16_32 AE_DR:$ae_to_dr_v0))]>; + +class AE_SHA32_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins AR:$ars), "ae_sha32 $arr, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SHA32 : AE_SHA32_X24<[(set AR:$arr, (int_xtensa_ae_sha32 AR:$ars))]>; + +class AE_SHORTSWAP_AE_FORMAT1 pattern> + : XtensaInst64<(outs AE_DR:$ae_to_dr_v), (ins AE_DR:$ae_to_dr_v0), "ae_shortswap $ae_to_dr_v, $ae_to_dr_v0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_to_dr_v; +bits<4> ae_to_dr_v0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +let Inst{63} = 1; +//opcode +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{56} = 1; +let Inst{59} = 1; +let Inst{62} = 1; +//operands +let Inst{20} = ae_to_dr_v{0}; +let Inst{21} = ae_to_dr_v{1}; +let Inst{22} = ae_to_dr_v{2}; +let Inst{23} = ae_to_dr_v{3}; +let Inst{28} = ae_to_dr_v0{0}; +let Inst{29} = ae_to_dr_v0{1}; +let Inst{30} = ae_to_dr_v0{2}; +let Inst{31} = ae_to_dr_v0{3}; +} + + + +def AE_SHORTSWAP : AE_SHORTSWAP_AE_FORMAT1<[(set AE_DR:$ae_to_dr_v, (int_xtensa_ae_shortswap AE_DR:$ae_to_dr_v0))]>; + +class AE_SLAA16S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaa16s $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAA16S : AE_SLAA16S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaa16s AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAA32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaa32 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAA32 : AE_SLAA32_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaa32 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAA32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaa32s $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAA32S : AE_SLAA32S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaa32s AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAA64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaa64 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAA64 : AE_SLAA64_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaa64 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAA64S_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaa64s $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{12} = 1; +let Inst{15} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAA64S : AE_SLAA64S_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaa64s AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAAQ56_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_slaaq56 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SLAAQ56 : AE_SLAAQ56_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaaq56 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SLAI16S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm4:$ae_osa16), "ae_slai16s $ae_shift_d, $ae_shift_d0, $ae_osa16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_osa16; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa16{0}; +let Inst{37} = ae_osa16{1}; +let Inst{38} = ae_osa16{2}; +let Inst{39} = ae_osa16{3}; +} + + + +def AE_SLAI16S : AE_SLAI16S_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai16s AE_DR:$ae_shift_d0, timm:$ae_osa16))]>; + +class AE_SLAI24_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_slai24 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SLAI24 : AE_SLAI24_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai24 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SLAI24S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_slai24s $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SLAI24S : AE_SLAI24S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai24s AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SLAI32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_slai32 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SLAI32 : AE_SLAI32_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai32 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SLAI32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_slai32s $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SLAI32S : AE_SLAI32S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai32s AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SLAI64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm6:$ae_osa64), "ae_slai64 $ae_shift_d, $ae_shift_d0, $ae_osa64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<6> ae_osa64; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{19} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa64{0}; +let Inst{9} = ae_osa64{1}; +let Inst{10} = ae_osa64{2}; +let Inst{11} = ae_osa64{3}; +let Inst{16} = ae_osa64{4}; +let Inst{17} = ae_osa64{5}; +} + + + +def AE_SLAI64 : AE_SLAI64_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai64 AE_DR:$ae_shift_d0, timm:$ae_osa64))]>; + +class AE_SLAI64S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm6:$ae_osa64), "ae_slai64s $ae_shift_d, $ae_shift_d0, $ae_osa64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<6> ae_osa64; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{58} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa64{0}; +let Inst{37} = ae_osa64{1}; +let Inst{38} = ae_osa64{2}; +let Inst{39} = ae_osa64{3}; +let Inst{56} = ae_osa64{4}; +let Inst{57} = ae_osa64{5}; +} + + + +def AE_SLAI64S : AE_SLAI64S_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slai64s AE_DR:$ae_shift_d0, timm:$ae_osa64))]>; + +class AE_SLAISQ56S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm6:$ae_osa64), "ae_slaisq56s $ae_shift_d, $ae_shift_d0, $ae_osa64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<6> ae_osa64; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa64{0}; +let Inst{9} = ae_osa64{1}; +let Inst{10} = ae_osa64{2}; +let Inst{11} = ae_osa64{3}; +let Inst{16} = ae_osa64{4}; +let Inst{17} = ae_osa64{5}; +} + + + +def AE_SLAISQ56S : AE_SLAISQ56S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slaisq56s AE_DR:$ae_shift_d0, timm:$ae_osa64))]>; + +class AE_SLAS24_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas24 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +} + + + +def AE_SLAS24 : AE_SLAS24_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas24 AE_DR:$ae_shift_d0))]>; + +class AE_SLAS24S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas24s $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SLAS24S : AE_SLAS24S_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas24s AE_DR:$ae_shift_d0))]>; + +class AE_SLAS32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas32 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{37} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SLAS32 : AE_SLAS32_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas32 AE_DR:$ae_shift_d0))]>; + +class AE_SLAS32S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas32s $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SLAS32S : AE_SLAS32S_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas32s AE_DR:$ae_shift_d0))]>; + +class AE_SLAS64_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas64 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{38} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SLAS64 : AE_SLAS64_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas64 AE_DR:$ae_shift_d0))]>; + +class AE_SLAS64S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slas64s $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +} + + + +def AE_SLAS64S : AE_SLAS64S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slas64s AE_DR:$ae_shift_d0))]>; + +class AE_SLASQ56_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slasq56 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +} + + + +def AE_SLASQ56 : AE_SLASQ56_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slasq56 AE_DR:$ae_shift_d0))]>; + +class AE_SLASSQ56S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_slassq56s $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SLASSQ56S : AE_SLASSQ56S_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_slassq56s AE_DR:$ae_shift_d0))]>; + +class AE_SRA64_32_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sra64_32 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{42} = 1; +let Inst{43} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRA64_32 : AE_SRA64_32_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sra64_32 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA16RS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa16rs $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA16RS : AE_SRAA16RS_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa16rs AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA16S_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa16s $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{14} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA16S : AE_SRAA16S_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa16s AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa32 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA32 : AE_SRAA32_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa32 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA32RS_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa32rs $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA32RS : AE_SRAA32RS_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa32rs AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa32s $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA32S : AE_SRAA32S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa32s AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAA64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_sraa64 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRAA64 : AE_SRAA64_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sraa64 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRAI16_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm4:$ae_osa16), "ae_srai16 $ae_shift_d, $ae_shift_d0, $ae_osa16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_osa16; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{56} = 1; +let Inst{57} = 1; +let Inst{58} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa16{0}; +let Inst{37} = ae_osa16{1}; +let Inst{38} = ae_osa16{2}; +let Inst{39} = ae_osa16{3}; +} + + + +def AE_SRAI16 : AE_SRAI16_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai16 AE_DR:$ae_shift_d0, timm:$ae_osa16))]>; + +class AE_SRAI16R_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm4:$ae_osa16), "ae_srai16r $ae_shift_d, $ae_shift_d0, $ae_osa16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_osa16; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa16{0}; +let Inst{37} = ae_osa16{1}; +let Inst{38} = ae_osa16{2}; +let Inst{39} = ae_osa16{3}; +} + + + +def AE_SRAI16R : AE_SRAI16R_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai16r AE_DR:$ae_shift_d0, timm:$ae_osa16))]>; + +class AE_SRAI24_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_srai24 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SRAI24 : AE_SRAI24_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai24 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SRAI32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_srai32 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SRAI32 : AE_SRAI32_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai32 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SRAI32R_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_srai32r $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{61} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa32{0}; +let Inst{37} = ae_osa32{1}; +let Inst{38} = ae_osa32{2}; +let Inst{39} = ae_osa32{3}; +let Inst{56} = ae_osa32{4}; +} + + + +def AE_SRAI32R : AE_SRAI32R_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai32r AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SRAI64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm6:$ae_osa64), "ae_srai64 $ae_shift_d, $ae_shift_d0, $ae_osa64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<6> ae_osa64; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa64{0}; +let Inst{9} = ae_osa64{1}; +let Inst{10} = ae_osa64{2}; +let Inst{11} = ae_osa64{3}; +let Inst{16} = ae_osa64{4}; +let Inst{17} = ae_osa64{5}; +} + + + +def AE_SRAI64 : AE_SRAI64_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srai64 AE_DR:$ae_shift_d0, timm:$ae_osa64))]>; + +class AE_SRAS24_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_sras24 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +} + + + +def AE_SRAS24 : AE_SRAS24_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sras24 AE_DR:$ae_shift_d0))]>; + +class AE_SRAS32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_sras32 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{39} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SRAS32 : AE_SRAS32_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sras32 AE_DR:$ae_shift_d0))]>; + +class AE_SRAS64_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_sras64 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{37} = 1; +let Inst{39} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SRAS64 : AE_SRAS64_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_sras64 AE_DR:$ae_shift_d0))]>; + +class AE_SRLA32_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_srla32 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRLA32 : AE_SRLA32_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srla32 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRLA64_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AR:$ars), "ae_srla64 $ae_shift_d, $ae_shift_d0, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{12} = 1; +let Inst{40} = 1; +let Inst{43} = 1; +let Inst{44} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_SRLA64 : AE_SRLA64_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srla64 AE_DR:$ae_shift_d0, AR:$ars))]>; + +class AE_SRLI24_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_srli24 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{57} = 1; +let Inst{61} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa32{0}; +let Inst{37} = ae_osa32{1}; +let Inst{38} = ae_osa32{2}; +let Inst{39} = ae_osa32{3}; +let Inst{56} = ae_osa32{4}; +} + + + +def AE_SRLI24 : AE_SRLI24_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srli24 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SRLI32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm5:$ae_osa32), "ae_srli32 $ae_shift_d, $ae_shift_d0, $ae_osa32", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<5> ae_osa32; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{8} = ae_osa32{0}; +let Inst{9} = ae_osa32{1}; +let Inst{10} = ae_osa32{2}; +let Inst{11} = ae_osa32{3}; +let Inst{16} = ae_osa32{4}; +} + + + +def AE_SRLI32 : AE_SRLI32_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srli32 AE_DR:$ae_shift_d0, timm:$ae_osa32))]>; + +class AE_SRLI64_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, uimm6:$ae_osa64), "ae_srli64 $ae_shift_d, $ae_shift_d0, $ae_osa64", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<6> ae_osa64; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{60} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +let Inst{36} = ae_osa64{0}; +let Inst{37} = ae_osa64{1}; +let Inst{38} = ae_osa64{2}; +let Inst{39} = ae_osa64{3}; +let Inst{56} = ae_osa64{4}; +let Inst{57} = ae_osa64{5}; +} + + + +def AE_SRLI64 : AE_SRLI64_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srli64 AE_DR:$ae_shift_d0, timm:$ae_osa64))]>; + +class AE_SRLS24_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_srls24 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SRLS24 : AE_SRLS24_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srls24 AE_DR:$ae_shift_d0))]>; + +class AE_SRLS32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_srls32 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{36} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SRLS32 : AE_SRLS32_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srls32 AE_DR:$ae_shift_d0))]>; + +class AE_SRLS64_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0), "ae_srls64 $ae_shift_d, $ae_shift_d0", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{37} = 1; +let Inst{38} = 1; +let Inst{39} = 1; +let Inst{57} = 1; +let Inst{59} = 1; +let Inst{61} = 1; +let Inst{62} = 1; +let Inst{63} = 1; +//operands +let Inst{20} = ae_shift_d{0}; +let Inst{21} = ae_shift_d{1}; +let Inst{22} = ae_shift_d{2}; +let Inst{23} = ae_shift_d{3}; +let Inst{28} = ae_shift_d0{0}; +let Inst{29} = ae_shift_d0{1}; +let Inst{30} = ae_shift_d0{2}; +let Inst{31} = ae_shift_d0{3}; +} + + + +def AE_SRLS64 : AE_SRLS64_AE_FORMAT<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_srls64 AE_DR:$ae_shift_d0))]>; + +class AE_SUB16_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub16 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{51} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_SUB16 : AE_SUB16_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub16 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB16S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub16s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_SUB16S : AE_SUB16S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub16s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB24S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub24s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_SUB24S : AE_SUB24S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub24s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB32_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub32 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_SUB32 : AE_SUB32_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub32 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB32S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub32s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_SUB32S : AE_SUB32S_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub32s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB64_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub64 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{22} = 1; +//operands +let Inst{12} = ae_arth_v{0}; +let Inst{13} = ae_arth_v{1}; +let Inst{14} = ae_arth_v{2}; +let Inst{15} = ae_arth_v{3}; +let Inst{8} = ae_arth_v0{0}; +let Inst{9} = ae_arth_v0{1}; +let Inst{10} = ae_arth_v0{2}; +let Inst{11} = ae_arth_v0{3}; +let Inst{4} = ae_arth_v1{0}; +let Inst{5} = ae_arth_v1{1}; +let Inst{6} = ae_arth_v1{2}; +let Inst{7} = ae_arth_v1{3}; +} + + + +def AE_SUB64 : AE_SUB64_X24<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub64 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUB64S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_sub64s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_SUB64S : AE_SUB64S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_sub64s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUBADD32_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_subadd32 $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_SUBADD32 : AE_SUBADD32_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_subadd32 AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_SUBADD32S_AE_FORMAT pattern> + : XtensaInst64<(outs AE_DR:$ae_arth_v), (ins AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1), "ae_subadd32s $ae_arth_v, $ae_arth_v0, $ae_arth_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_arth_v; +bits<4> ae_arth_v0; +bits<4> ae_arth_v1; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{0} = 1; +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{48} = 1; +let Inst{49} = 1; +let Inst{50} = 1; +let Inst{52} = 1; +let Inst{53} = 1; +let Inst{54} = 1; +let Inst{55} = 1; +//operands +let Inst{16} = ae_arth_v{0}; +let Inst{17} = ae_arth_v{1}; +let Inst{18} = ae_arth_v{2}; +let Inst{19} = ae_arth_v{3}; +let Inst{24} = ae_arth_v0{0}; +let Inst{25} = ae_arth_v0{1}; +let Inst{26} = ae_arth_v0{2}; +let Inst{27} = ae_arth_v0{3}; +let Inst{32} = ae_arth_v1{0}; +let Inst{33} = ae_arth_v1{1}; +let Inst{34} = ae_arth_v1{2}; +let Inst{35} = ae_arth_v1{3}; +} + + + +def AE_SUBADD32S : AE_SUBADD32S_AE_FORMAT<[(set AE_DR:$ae_arth_v, (int_xtensa_ae_subadd32s AE_DR:$ae_arth_v0, AE_DR:$ae_arth_v1))]>; + +class AE_TRUNCA32F64S_L_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, AR:$ars), "ae_trunca32f64s.l $ae_shift_d, $ae_shift_d0, $ae_shift_sd, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_shift_sd; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{41} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{12} = ae_shift_sd{0}; +let Inst{13} = ae_shift_sd{1}; +let Inst{14} = ae_shift_sd{2}; +let Inst{15} = ae_shift_sd{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_TRUNCA32F64S_L : AE_TRUNCA32F64S_L_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_trunca32f64s_l AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, AR:$ars))]>; + +class AE_TRUNCA32X2F64S_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, AR:$ars), "ae_trunca32x2f64s $ae_shift_d, $ae_shift_d0, $ae_shift_sd, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_shift_sd; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = ae_shift_d{0}; +let Inst{13} = ae_shift_d{1}; +let Inst{14} = ae_shift_d{2}; +let Inst{15} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{16} = ae_shift_sd{0}; +let Inst{17} = ae_shift_sd{1}; +let Inst{18} = ae_shift_sd{2}; +let Inst{19} = ae_shift_sd{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_TRUNCA32X2F64S : AE_TRUNCA32X2F64S_X24<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_trunca32x2f64s AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, AR:$ars))]>; + +class AE_TRUNCI32F64S_L_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, uimm4:$ae_osa16), "ae_trunci32f64s.l $ae_shift_d, $ae_shift_d0, $ae_shift_sd, $ae_osa16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_shift_sd; +bits<4> ae_osa16; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{40} = 1; +let Inst{41} = 1; +let Inst{42} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{12} = ae_shift_sd{0}; +let Inst{13} = ae_shift_sd{1}; +let Inst{14} = ae_shift_sd{2}; +let Inst{15} = ae_shift_sd{3}; +let Inst{8} = ae_osa16{0}; +let Inst{9} = ae_osa16{1}; +let Inst{10} = ae_osa16{2}; +let Inst{11} = ae_osa16{3}; +} + + + +def AE_TRUNCI32F64S_L : AE_TRUNCI32F64S_L_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_trunci32f64s_l AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, timm:$ae_osa16))]>; + +class AE_TRUNCI32X2F64S_AE_FORMAT2 pattern> + : XtensaInst64<(outs AE_DR:$ae_shift_d), (ins AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, uimm4:$ae_osa16), "ae_trunci32x2f64s $ae_shift_d, $ae_shift_d0, $ae_shift_sd, $ae_osa16", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_shift_d; +bits<4> ae_shift_d0; +bits<4> ae_shift_sd; +bits<4> ae_osa16; + + +let DecoderNamespace = "HIFI3"; + +//format +let Inst{1} = 1; +let Inst{2} = 1; +let Inst{3} = 1; +//opcode +let Inst{43} = 1; +let Inst{55} = 1; +//operands +let Inst{28} = ae_shift_d{0}; +let Inst{29} = ae_shift_d{1}; +let Inst{30} = ae_shift_d{2}; +let Inst{31} = ae_shift_d{3}; +let Inst{4} = ae_shift_d0{0}; +let Inst{5} = ae_shift_d0{1}; +let Inst{6} = ae_shift_d0{2}; +let Inst{7} = ae_shift_d0{3}; +let Inst{12} = ae_shift_sd{0}; +let Inst{13} = ae_shift_sd{1}; +let Inst{14} = ae_shift_sd{2}; +let Inst{15} = ae_shift_sd{3}; +let Inst{8} = ae_osa16{0}; +let Inst{9} = ae_osa16{1}; +let Inst{10} = ae_osa16{2}; +let Inst{11} = ae_osa16{3}; +} + + + +def AE_TRUNCI32X2F64S : AE_TRUNCI32X2F64S_AE_FORMAT2<[(set AE_DR:$ae_shift_d, (int_xtensa_ae_trunci32x2f64s AE_DR:$ae_shift_d0, AE_DR:$ae_shift_sd, timm:$ae_osa16))]>; + +class AE_VLDL16C_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vldl16c $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLDL16C : AE_VLDL16C_X24<[(set AR:$ars_out, (int_xtensa_ae_vldl16c AR:$ars))]>; + +class AE_VLDL16C_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vldl16c.ic $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLDL16C_IC : AE_VLDL16C_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_vldl16c_ic AR:$ars))]>; + +class AE_VLDL16C_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vldl16c.ip $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{7} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLDL16C_IP : AE_VLDL16C_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_vldl16c_ip AR:$ars))]>; + +class AE_VLDL16T_X24 pattern> + : XtensaAEInst24<(outs BR:$br, AR:$art), (ins AR:$ars), "ae_vldl16t $br, $art, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> art; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLDL16T : AE_VLDL16T_X24<[]>; + +class AE_VLDL32T_X24 pattern> + : XtensaAEInst24<(outs BR:$br, AR:$art), (ins AR:$ars), "ae_vldl32t $br, $art, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> art; +bits<4> ars; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLDL32T : AE_VLDL32T_X24<[]>; + +class AE_VLDSHT_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "ae_vldsht $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def AE_VLDSHT : AE_VLDSHT_X24<[(int_xtensa_ae_vldsht AR:$art)]>; + +class AE_VLEL16T_X24 pattern> + : XtensaAEInst24<(outs BR:$br, AR:$art_out), (ins AR:$art, AR:$ars), "ae_vlel16t $br, $art, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> art; +bits<4> ars; +let Constraints = "$art = $art_out,@earlyclobber $art_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{16} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLEL16T : AE_VLEL16T_X24<[]>; + +class AE_VLEL32T_X24 pattern> + : XtensaAEInst24<(outs BR:$br, AR:$art_out), (ins AR:$art, AR:$ars), "ae_vlel32t $br, $art, $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> br; +bits<4> art; +bits<4> ars; +let Constraints = "$art = $art_out,@earlyclobber $art_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{20} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = br{0}; +let Inst{13} = br{1}; +let Inst{14} = br{2}; +let Inst{15} = br{3}; +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLEL32T : AE_VLEL32T_X24<[]>; + +class AE_VLES16C_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vles16c $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLES16C : AE_VLES16C_X24<[(set AR:$ars_out, (int_xtensa_ae_vles16c AR:$ars))]>; + +class AE_VLES16C_IC_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vles16c.ic $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{5} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLES16C_IC : AE_VLES16C_IC_X24<[(set AR:$ars_out, (int_xtensa_ae_vles16c_ic AR:$ars))]>; + +class AE_VLES16C_IP_X24 pattern> + : XtensaAEInst24<(outs AR:$ars_out), (ins AR:$ars), "ae_vles16c.ip $ars", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ars; +let Constraints = "$ars = $ars_out,@earlyclobber $ars_out"; + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{6} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{8} = ars{0}; +let Inst{9} = ars{1}; +let Inst{10} = ars{2}; +let Inst{11} = ars{3}; +} + + + +def AE_VLES16C_IP : AE_VLES16C_IP_X24<[(set AR:$ars_out, (int_xtensa_ae_vles16c_ip AR:$ars))]>; + +class AE_XOR_X24 pattern> + : XtensaAEInst24<(outs AE_DR:$ae_dr_to_dr_v), (ins AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1), "ae_xor $ae_dr_to_dr_v, $ae_dr_to_dr_v0, $ae_dr_to_dr_v1", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> ae_dr_to_dr_v; +bits<4> ae_dr_to_dr_v0; +bits<4> ae_dr_to_dr_v1; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{19} = 1; +let Inst{20} = 1; +//operands +let Inst{12} = ae_dr_to_dr_v{0}; +let Inst{13} = ae_dr_to_dr_v{1}; +let Inst{14} = ae_dr_to_dr_v{2}; +let Inst{15} = ae_dr_to_dr_v{3}; +let Inst{8} = ae_dr_to_dr_v0{0}; +let Inst{9} = ae_dr_to_dr_v0{1}; +let Inst{10} = ae_dr_to_dr_v0{2}; +let Inst{11} = ae_dr_to_dr_v0{3}; +let Inst{4} = ae_dr_to_dr_v1{0}; +let Inst{5} = ae_dr_to_dr_v1{1}; +let Inst{6} = ae_dr_to_dr_v1{2}; +let Inst{7} = ae_dr_to_dr_v1{3}; +} + + + +def AE_XOR : AE_XOR_X24<[(set AE_DR:$ae_dr_to_dr_v, (int_xtensa_ae_xor AE_DR:$ae_dr_to_dr_v0, AE_DR:$ae_dr_to_dr_v1))]>; + +class AE_ZALIGN64_X24 pattern> + : XtensaAEInst24<(outs AE_VALIGN:$ae_uu_uu), (ins ), "ae_zalign64 $ae_uu_uu", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<2> ae_uu_uu; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{12} = 1; +let Inst{19} = 1; +let Inst{21} = 1; +//operands +let Inst{6} = ae_uu_uu{0}; +let Inst{7} = ae_uu_uu{1}; +} + + + +def AE_ZALIGN64 : AE_ZALIGN64_X24<[(set AE_VALIGN:$ae_uu_uu, (int_xtensa_ae_zalign64 ))]>; + +class RUR_AE_BITHEAD_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_bithead $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{4} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_BITHEAD : RUR_AE_BITHEAD_X24<[(set AR:$arr, (int_xtensa_rur_ae_bithead ))]>; + +class RUR_AE_BITPTR_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_bitptr $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_BITPTR : RUR_AE_BITPTR_X24<[(set AR:$art, (int_xtensa_rur_ae_bitptr ))]>; + +class RUR_AE_BITSUSED_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_bitsused $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_BITSUSED : RUR_AE_BITSUSED_X24<[(set AR:$art, (int_xtensa_rur_ae_bitsused ))]>; + +class RUR_AE_CBEGIN0_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_cbegin0 $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_CBEGIN0 : RUR_AE_CBEGIN0_X24<[(set AR:$arr, (int_xtensa_rur_ae_cbegin0 ))]>; + +class RUR_AE_CEND0_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_cend0 $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{6} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_CEND0 : RUR_AE_CEND0_X24<[(set AR:$arr, (int_xtensa_rur_ae_cend0 ))]>; + +class RUR_AE_CW_SD_NO_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_cw_sd_no $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{4} = 1; +let Inst{5} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_CW_SD_NO : RUR_AE_CW_SD_NO_X24<[(set AR:$arr, (int_xtensa_rur_ae_cw_sd_no ))]>; + +class RUR_AE_CWRAP_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_cwrap $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_CWRAP : RUR_AE_CWRAP_X24<[(set AR:$art, (int_xtensa_rur_ae_cwrap ))]>; + +class RUR_AE_FIRST_TS_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_first_ts $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_FIRST_TS : RUR_AE_FIRST_TS_X24<[(set AR:$art, (int_xtensa_rur_ae_first_ts ))]>; + +class RUR_AE_NEXTOFFSET_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_nextoffset $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_NEXTOFFSET : RUR_AE_NEXTOFFSET_X24<[(set AR:$art, (int_xtensa_rur_ae_nextoffset ))]>; + +class RUR_AE_OVERFLOW_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_overflow $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_OVERFLOW : RUR_AE_OVERFLOW_X24<[(set AR:$art, (int_xtensa_rur_ae_overflow ))]>; + +class RUR_AE_OVF_SAR_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_ovf_sar $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_OVF_SAR : RUR_AE_OVF_SAR_X24<[(set AR:$arr, (int_xtensa_rur_ae_ovf_sar ))]>; + +class RUR_AE_SAR_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_sar $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_SAR : RUR_AE_SAR_X24<[(set AR:$art, (int_xtensa_rur_ae_sar ))]>; + +class RUR_AE_SEARCHDONE_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_searchdone $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{11} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_SEARCHDONE : RUR_AE_SEARCHDONE_X24<[(set AR:$art, (int_xtensa_rur_ae_searchdone ))]>; + +class RUR_AE_TABLESIZE_X24 pattern> + : XtensaAEInst24<(outs AR:$art), (ins ), "rur.ae_tablesize $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{9} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def RUR_AE_TABLESIZE : RUR_AE_TABLESIZE_X24<[(set AR:$art, (int_xtensa_rur_ae_tablesize ))]>; + +class RUR_AE_TS_FTS_BU_BP_X24 pattern> + : XtensaAEInst24<(outs AR:$arr), (ins ), "rur.ae_ts_fts_bu_bp $arr", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> arr; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{5} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{12} = arr{0}; +let Inst{13} = arr{1}; +let Inst{14} = arr{2}; +let Inst{15} = arr{3}; +} + + + +def RUR_AE_TS_FTS_BU_BP : RUR_AE_TS_FTS_BU_BP_X24<[(set AR:$arr, (int_xtensa_rur_ae_ts_fts_bu_bp ))]>; + +class WUR_AE_BITHEAD_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_bithead $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{8} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_BITHEAD : WUR_AE_BITHEAD_X24<[(int_xtensa_wur_ae_bithead AR:$art)]>; + +class WUR_AE_BITPTR_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_bitptr $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_BITPTR : WUR_AE_BITPTR_X24<[(int_xtensa_wur_ae_bitptr AR:$art)]>; + +class WUR_AE_BITSUSED_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_bitsused $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_BITSUSED : WUR_AE_BITSUSED_X24<[(int_xtensa_wur_ae_bitsused AR:$art)]>; + +class WUR_AE_CBEGIN0_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_cbegin0 $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_CBEGIN0 : WUR_AE_CBEGIN0_X24<[(int_xtensa_wur_ae_cbegin0 AR:$art)]>; + +class WUR_AE_CEND0_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_cend0 $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_CEND0 : WUR_AE_CEND0_X24<[(int_xtensa_wur_ae_cend0 AR:$art)]>; + +class WUR_AE_CW_SD_NO_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_cw_sd_no $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_CW_SD_NO : WUR_AE_CW_SD_NO_X24<[(int_xtensa_wur_ae_cw_sd_no AR:$art)]>; + +class WUR_AE_CWRAP_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_cwrap $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_CWRAP : WUR_AE_CWRAP_X24<[(int_xtensa_wur_ae_cwrap AR:$art)]>; + +class WUR_AE_FIRST_TS_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_first_ts $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_FIRST_TS : WUR_AE_FIRST_TS_X24<[(int_xtensa_wur_ae_first_ts AR:$art)]>; + +class WUR_AE_NEXTOFFSET_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_nextoffset $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{10} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_NEXTOFFSET : WUR_AE_NEXTOFFSET_X24<[(int_xtensa_wur_ae_nextoffset AR:$art)]>; + +class WUR_AE_OVERFLOW_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_overflow $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_OVERFLOW : WUR_AE_OVERFLOW_X24<[(int_xtensa_wur_ae_overflow AR:$art)]>; + +class WUR_AE_OVF_SAR_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_ovf_sar $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_OVF_SAR : WUR_AE_OVF_SAR_X24<[(int_xtensa_wur_ae_ovf_sar AR:$art)]>; + +class WUR_AE_SAR_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_sar $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{10} = 1; +let Inst{11} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_SAR : WUR_AE_SAR_X24<[(int_xtensa_wur_ae_sar AR:$art)]>; + +class WUR_AE_SEARCHDONE_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_searchdone $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{11} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_SEARCHDONE : WUR_AE_SEARCHDONE_X24<[(int_xtensa_wur_ae_searchdone AR:$art)]>; + +class WUR_AE_TABLESIZE_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_tablesize $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{2} = 1; +let Inst{8} = 1; +let Inst{9} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{18} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_TABLESIZE : WUR_AE_TABLESIZE_X24<[(int_xtensa_wur_ae_tablesize AR:$art)]>; + +class WUR_AE_TS_FTS_BU_BP_X24 pattern> + : XtensaAEInst24<(outs ), (ins AR:$art), "wur.ae_ts_fts_bu_bp $art", pattern>, + Requires<[HasHIFI3]> +{ +//operand decl +bits<4> art; + + +let DecoderNamespace = "HIFI3"; + +//format + +//opcode +let Inst{9} = 1; +let Inst{12} = 1; +let Inst{13} = 1; +let Inst{14} = 1; +let Inst{15} = 1; +let Inst{16} = 1; +let Inst{17} = 1; +let Inst{20} = 1; +let Inst{21} = 1; +let Inst{22} = 1; +let Inst{23} = 1; +//operands +let Inst{4} = art{0}; +let Inst{5} = art{1}; +let Inst{6} = art{2}; +let Inst{7} = art{3}; +} + + + +def WUR_AE_TS_FTS_BU_BP : WUR_AE_TS_FTS_BU_BP_X24<[(int_xtensa_wur_ae_ts_fts_bu_bp AR:$art)]>; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 89fcf2ad55b08..d8129d11fbf34 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1954,3 +1954,8 @@ let Predicates = [HasSingleFloat] in { //===----------------------------------------------------------------------===// include "XtensaDSPInstrInfo.td" +//===----------------------------------------------------------------------===// +// HiFi3 Instructions +//===----------------------------------------------------------------------===// +include "XtensaHIFIInstrFormats.td" +include "XtensaHIFIInstrInfo.td" diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index ad22bac40ea76..3d10410a77599 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -21,6 +21,26 @@ class Immediate let ParserMatchClass = !cast(asmop); } +class ImmediateRanged : + Immediate= " # Low # " && Imm <= " # High # " && (Imm % " # Step # " ) == 0;", + asmop> { + let PrintMethod = "printImmOperand<" # Low #"," # High # "," # Step # ">"; + } + +class ImmRangedAsmOperand : + ImmAsmOperand { + let PredicateMethod = "isImmInRange<" # Low #"," # High # "," # Step # ">"; +} + +multiclass ImmRangeDecl { + + def _AsmOperand : ImmRangedAsmOperand; + def NAME : ImmediateRanged; + +} + + // imm8 predicate - Immediate in the range [-128,127] def Imm8_AsmOperand : ImmAsmOperand<"Imm8">; def imm8 : Immediate= -128 && Imm <= 127; }], "Imm8_AsmOperand"> { @@ -107,6 +127,14 @@ def shimm1_31 : Immediate= 1 && Imm <= 31; }], "Shimm1_31_A let DecoderMethod = "decodeShimm1_31Operand"; } +defm imm32n_28: ImmRangeDecl<-32, 28, 4>; +defm imm64n_56: ImmRangeDecl<-64, 56, 8>; +defm imm0_56: ImmRangeDecl<0, 56, 8>; +defm imm16n_14: ImmRangeDecl<-16, 14, 2>; +defm imm16n_47: ImmRangeDecl<-16, 47, 1>; +defm uimm2: ImmRangeDecl<0, 3, 1>; +defm uimm6: ImmRangeDecl<0, 63, 1>; + // Memory offset 0..255 for 8-bit memory accesses def Offset8m8_AsmOperand : ImmAsmOperand<"Offset8m8">; def offset8m8 : Immediate Date: Mon, 30 Sep 2024 15:08:29 +0300 Subject: [PATCH 199/289] [Xtensa] Add support for boolean vectors Xtensa architecture uses v2i1 (BR2 reg class) and v4i1 (BR4 reg class) boolean vectors as arguments for HIFI instructions: - vector compare, e.g.: AE_EQ16X4 - vector conditional move, e.g: AE_MOVT16X4 --- llvm/lib/Target/Xtensa/XtensaCallingConv.td | 2 + llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 152 ++++++++++++++++-- llvm/lib/Target/Xtensa/XtensaISelLowering.h | 2 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 39 ++++- llvm/lib/Target/Xtensa/XtensaOperators.td | 3 + .../CodeGen/Xtensa/xtensa-xtbool-spill.ll | 4 +- 6 files changed, 188 insertions(+), 14 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaCallingConv.td b/llvm/lib/Target/Xtensa/XtensaCallingConv.td index c48b97d446bbe..d9d065846b38a 100644 --- a/llvm/lib/Target/Xtensa/XtensaCallingConv.td +++ b/llvm/lib/Target/Xtensa/XtensaCallingConv.td @@ -16,6 +16,8 @@ class CCIfFeature: //===----------------------------------------------------------------------===// def RetCC_Xtensa : CallingConv<[ CCIfFeature<"Boolean",CCIfType<[v1i1], CCAssignToReg<[B0]>>>, + CCIfFeature<"Boolean",CCIfType<[v2i1], CCAssignToReg<[B0_B1]>>>, + CCIfFeature<"Boolean",CCIfType<[v4i1], CCAssignToReg<[B0_B1_B2_B3]>>>, // First two return values go in a2, a3, a4, a5 CCIfType<[i32], CCAssignToReg<[A2, A3, A4, A5]>>, diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 561cd5ba71cdb..6555827c26c9d 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -66,14 +66,21 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, if (Subtarget.hasBoolean()) { addRegisterClass(MVT::v1i1, &Xtensa::BRRegClass); + addRegisterClass(MVT::v2i1, &Xtensa::BR2RegClass); + addRegisterClass(MVT::v4i1, &Xtensa::BR4RegClass); + setOperationAction(ISD::Constant, MVT::v2i1, Expand); setOperationAction(ISD::Constant, MVT::v1i1, Expand); - for (MVT VT : MVT::integer_valuetypes()) { - setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v1i1, Promote); - setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v1i1, Promote); - setLoadExtAction(ISD::EXTLOAD, VT, MVT::v1i1, Promote); - } - } + setTargetDAGCombine(ISD::STORE); + setTargetDAGCombine(ISD::BITCAST); + setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR); + setOperationAction(ISD::STORE, MVT::v1i1, Legal); + setOperationAction(ISD::STORE, MVT::v2i1, Legal); + setOperationAction(ISD::STORE, MVT::v4i1, Legal); + setOperationAction(ISD::LOAD, MVT::v1i1, Legal); + setOperationAction(ISD::LOAD, MVT::v2i1, Legal); + setOperationAction(ISD::LOAD, MVT::v4i1, Legal); + } // Set up special registers. setStackPointerRegisterToSaveRestore(Xtensa::SP); @@ -117,6 +124,11 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, // Expand jump table branches as address arithmetic followed by an // indirect jump. setOperationAction(ISD::BR_JT, MVT::Other, Custom); + // Used by legalize types to correctly generate the setcc result. + // AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); + if (!Subtarget.hasBoolean()) + setOperationPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); + setOperationPromotedToType(ISD::BR_CC, MVT::i1, MVT::i32); setOperationAction(ISD::BR_CC, MVT::i32, Legal); setOperationAction(ISD::BR_CC, MVT::i64, Expand); @@ -745,6 +757,91 @@ static SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG, LHS, RHS, Dest); } +static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + // (vNi1 (bitcast (iN (trunc i32)))) -> (vNi1 (xtensa_bitcast i32)) + SDLoc DL(N); + SDValue Op = N->getOperand(0); + + if (N->getOpcode() != ISD::BITCAST || Op.getOpcode() != ISD::TRUNCATE) + return SDValue(); + + SDValue Int = Op.getOperand(0); + llvm::EVT BoolVT = N->getValueType(0); + + if (!BoolVT.isVector() || BoolVT.getVectorElementType() != MVT::i1 || + Int.getValueType() != MVT::i32) + return SDValue(); + + SDValue Trunc = DAG.getNode(XtensaISD::TRUNC, DL, BoolVT, Int); + + return Trunc; +} + +static SDValue +PerformExtractSubvectorCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + // (vNi1 (extract_subvector (v8i1 (load x))) -> (vNi1 (load x)) + SDLoc DL(N); + SDValue Load = N->getOperand(0); + + if (N->getOpcode() != ISD::EXTRACT_SUBVECTOR) + return SDValue(); + + EVT LoadVT = Load.getValueType(); + EVT BoolVT = N->getValueType(0); + + if (!BoolVT.isVector() || BoolVT.getVectorElementType() != MVT::i1) + return SDValue(); + + if (Load.getOpcode() != ISD::LOAD) + return SDValue(); + + LoadSDNode *LdNode = cast(Load.getNode()); + + if (!LoadVT.isVector() || LoadVT.getVectorElementType() != MVT::i1) + return SDValue(); + + SDValue NewLoad = + DAG.getLoad(BoolVT, DL, LdNode->getChain(), LdNode->getBasePtr(), + LdNode->getPointerInfo(), LdNode->getOriginalAlign(), + LdNode->getMemOperand()->getFlags()); + + return NewLoad; +} +static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + // (store (v8i1 (concat_vector (vNi1 elt) undef )) addr off) + // -> (store (vNi1 elt) addr off) + SDLoc DL(N); + + if (N->getOpcode() != ISD::STORE) + return SDValue(); + + StoreSDNode *StNode = cast(N); + + SDValue Concat = N->getOperand(1); + EVT BoolVT = Concat.getValueType(); + + if ((Concat.getOpcode() != ISD::CONCAT_VECTORS) || !BoolVT.isVector() || + (BoolVT.getVectorElementType() != MVT::i1)) + return SDValue(); + + SDValue Val = Concat.getNode()->getOperand(0); + EVT ValVT = Val.getValueType(); + + if (!ValVT.isVector() || ValVT.getVectorElementType() != MVT::i1 || + ValVT.getSizeInBits() > 8) { + return SDValue(); + } + + return DAG.getStore(StNode->getChain(), DL, Val, StNode->getBasePtr(), + StNode->getMemOperand()); +} + SDValue XtensaTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -761,6 +858,12 @@ SDValue XtensaTargetLowering::PerformDAGCombine(SDNode *N, return PerformHWLoopCombine(N, DAG, DCI, Subtarget); case ISD::BRCOND: return PerformBRCONDCombine(N, DAG, DCI, Subtarget); + case ISD::BITCAST: + return PerformBITCASTCombine(N, DAG, DCI, Subtarget); + case ISD::EXTRACT_SUBVECTOR: + return PerformExtractSubvectorCombine(N, DAG, DCI, Subtarget); + case ISD::STORE: + return PerformSTORECombine(N, DAG, DCI, Subtarget); } return SDValue(); @@ -783,6 +886,12 @@ static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT, Xtensa::B8, Xtensa::B9, Xtensa::B10, Xtensa::B11, Xtensa::B12, Xtensa::B13, Xtensa::B14, Xtensa::B15}; + ArrayRef BR2Regs(Xtensa::BR2RegClass.begin(), + Xtensa::BR2RegClass.end()); + + ArrayRef BR4Regs(Xtensa::BR4RegClass.begin(), + Xtensa::BR4RegClass.end()); + if (ArgFlags.isByVal()) { Align ByValAlign = ArgFlags.getNonZeroByValAlign(); unsigned ByValSize = ArgFlags.getByValSize(); @@ -841,6 +950,11 @@ static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT, LocVT = MVT::i32; } else if (ValVT == MVT::v1i1) { Register = State.AllocateReg(BoolRegs); + } else if (ValVT == MVT::v2i1) { + Register = State.AllocateReg(BR2Regs); + LocVT = ValVT; + } else if (ValVT == MVT::v4i1) { + Register = State.AllocateReg(BR4Regs); LocVT = ValVT; } else llvm_unreachable("Cannot handle this ValVT."); @@ -892,6 +1006,10 @@ SDValue XtensaTargetLowering::LowerFormalArguments( RC = &Xtensa::ARRegClass; } else if (RegVT == MVT::v1i1) { RC = &Xtensa::BRRegClass; + } else if (RegVT == MVT::v2i1) { + RC = &Xtensa::BR2RegClass; + } else if (RegVT == MVT::v4i1) { + RC = &Xtensa::BR4RegClass; } else llvm_unreachable("RegVT not supported by FormalArguments Lowering"); @@ -1985,6 +2103,8 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::MOVS"; case XtensaISD::MEMW: return "XtensaISD::MEMW"; + case XtensaISD::TRUNC: + return "XtensaISD::TRUNC"; } return nullptr; } @@ -3324,7 +3444,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( } return MBB; } - case Xtensa::MOVBA_P: { + case Xtensa::MOVBA_P: + case Xtensa::MOVBA2_P: { const TargetRegisterClass *AR = getRegClassFor(MVT::i32); Register Dst1 = MRI.createVirtualRegister(AR); @@ -3335,8 +3456,21 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( /* MOVBA_P2 Breg, Dst1, Dest2, Src */ - - BuildMI(*MBB, MI, DL, TII.get(Xtensa::MOVBA_P2), Breg.getReg()) + unsigned TargetOpcode; + switch (MI.getOpcode()) { + case Xtensa::MOVBA_P: + TargetOpcode = Xtensa::MOVBA_P2; + break; + case Xtensa::MOVBA2_P: + TargetOpcode = Xtensa::MOVBA2_P2; + break; + case Xtensa::MOVBA4_P: + TargetOpcode = Xtensa::MOVBA4_P2; + break; + default: + llvm_unreachable("Unknown opcode"); + } + BuildMI(*MBB, MI, DL, TII.get(TargetOpcode), Breg.getReg()) .addReg(Dst1, RegState::Define | RegState::EarlyClobber) .addReg(Dst2, RegState::Define | RegState::EarlyClobber) .addReg(Src.getReg()); diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index f64a0e415dd0a..cd5c99aa6d71c 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -91,6 +91,8 @@ enum { SRCL, // Shift Right Combined SRCR, + + TRUNC }; } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index d8129d11fbf34..ab4a75f9ff2c1 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -950,22 +950,57 @@ let Predicates = [HasBoolean] in { "!movba $r, $s", []> { let usesCustomInserter = 1; let Defs = [BREG]; - //let Uses = [BREG]; + } + + def MOVBA2_P2: Pseudo<(outs BR2:$r, AR:$x, AR:$y), (ins AR:$s), + "!movba $r, $x, $y, $s", []> { + let Defs = [BREG]; + } + + def MOVBA2_P: Pseudo<(outs BR2:$r), (ins AR:$s), + "!movba2 $r, $s", []> { + let usesCustomInserter = 1; + let Defs = [BREG]; + } + + def MOVBA4_P2: Pseudo<(outs BR4:$r, AR:$x, AR:$y), (ins AR:$s), + "!movba4 $r, $x, $y, $s", []> { + let Defs = [BREG]; + } + + def MOVBA4_P: Pseudo<(outs BR4:$r), (ins AR:$s), + "!movba4 $r, $s", []> { + let usesCustomInserter = 1; + let Defs = [BREG]; } def EXTUI_BR_P: Pseudo<(outs AR:$r), (ins AR:$s, BR:$b), "!extui_br $r, $s, $b", []>; + def EXTUI_BR2_P: Pseudo<(outs AR:$r), (ins AR:$s, BR2:$b), + "!extui_br2 $r, $s, $b", []>; + def EXTUI_BR4_P: Pseudo<(outs AR:$r), (ins AR:$s, BR4:$b), + "!extui_br4 $r, $s, $b", []>; def SLLI_BR_P: Pseudo<(outs AR:$r), (ins AR:$s, BR:$b), "!slli_br $r, $s, $b", []>; def : Pat<(v1i1 (build_vector AR:$a)), (MOVBA_P AR:$a)>; + def : Pat<(v1i1 (scalar_to_vector AR:$a)), (MOVBA_P AR:$a)>; + + def : Pat<(v2i1 (build_vector AR:$a, AR:$b)), + (MOVBA2_P (OR AR:$a, (SLLI AR:$b, (i32 1))))>; + + def : Pat<(v2i1 (Xtensa_trunc AR:$s)), (MOVBA2_P AR:$s)>; + def : Pat<(v4i1 (Xtensa_trunc AR:$s)), (MOVBA4_P AR:$s)>; def : Pat<(i32 (vector_extract (v1i1 BR:$b), (i32 0))), (EXTUI_BR_P (RSR BREG), BR:$b)>; - def : Pat<(v1i1 (load addr_ish1:$addr)), (MOVBA_P (L8UI mem8:$addr))>; + def : Pat<(v2i1 (load addr_ish1:$addr)), (MOVBA2_P (L8UI mem8:$addr))>; + def : Pat<(v4i1 (load addr_ish1:$addr)), (MOVBA4_P (L8UI mem8:$addr))>; def : Pat<(store BR:$b, addr_ish1:$addr), (S8I (EXTUI_BR_P (RSR BREG), BR:$b), mem32:$addr)>; + def : Pat<(store BR2:$b, addr_ish1:$addr), (S8I (EXTUI_BR2_P (RSR BREG), BR2:$b), mem32:$addr)>; + def : Pat<(store BR4:$b, addr_ish1:$addr), (S8I (EXTUI_BR4_P (RSR BREG), BR4:$b), mem32:$addr)>; def SPILL_BOOL: Pseudo<(outs), (ins BR:$b, mem8:$mem), "!spill_bool $b, $mem",[]> { diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index add29bf755dc4..97abb0b11803d 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -45,6 +45,8 @@ def SDT_XtensaEXTUI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCi def SDT_XtensaLoopEnd : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>; def SDT_XtensaLoopDec : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; def SDT_XtensaLoopBr : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisVT<1, OtherVT>]>; +def SDT_XtensaTRUNC : SDTypeProfile<1, 1, [SDTCisVT<1, i32>, SDTCisVec<0>]>; + //===----------------------------------------------------------------------===// // Node definitions @@ -118,3 +120,4 @@ def Xtensa_loopdec: SDNode<"XtensaISD::LOOPDEC", SDT_XtensaLoopDec, [SDNPHasChain, SDNPInGlue]>; def Xtensa_loopbr: SDNode<"XtensaISD::LOOPBR", SDT_XtensaLoopBr, [SDNPHasChain, SDNPInGlue]>; +def Xtensa_trunc: SDNode<"XtensaISD::TRUNC", SDT_XtensaTRUNC>; diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll index 321e955be6cd9..c1fb9f2087041 100644 --- a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll @@ -21,11 +21,9 @@ declare <1 x i1> @get_xtbool() define <1 x i1> @test_xtbool_load(i32 %addr) { ; CHECK-LABEL: test_xtbool_load ; CHECK: l8ui {{a[0-9]+}} - ; CHECK: movi.n [[C:a[0-9]+]], 1 - ; CHECK: and [[SRC:a[0-9]+]], {{a[0-9]+}}, [[C]] ; CHECK: rsr [[BREG:a[0-9]+]], br ; CHECK: and [[AND:a[0-9]+]], {{a[0-9]+}}, [[BREG]] - ; CHECK: or [[OR:a[0-9]+]], [[AND]], [[SRC]] + ; CHECK: or [[OR:a[0-9]+]], [[AND]], {{a[0-9]+}} ; CHECK: wsr [[OR]], br %ptr = inttoptr i32 %addr to ptr %load_bits = load <8 x i1>, ptr %ptr, align 1 From e7f0a0e9df4d8ca09f71619ab8211e18fc0fb9a7 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 30 Sep 2024 17:55:56 +0300 Subject: [PATCH 200/289] [Xtensa] Add HIFI3 instruction lowering --- llvm/lib/Target/Xtensa/XtensaCallingConv.td | 10 +- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 114 ++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelLowering.h | 9 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 19 ++- llvm/lib/Target/Xtensa/XtensaUtils.cpp | 37 ++++++ 5 files changed, 185 insertions(+), 4 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaCallingConv.td b/llvm/lib/Target/Xtensa/XtensaCallingConv.td index d9d065846b38a..bbc1974ab7d6a 100644 --- a/llvm/lib/Target/Xtensa/XtensaCallingConv.td +++ b/llvm/lib/Target/Xtensa/XtensaCallingConv.td @@ -22,7 +22,10 @@ def RetCC_Xtensa : CallingConv<[ // First two return values go in a2, a3, a4, a5 CCIfType<[i32], CCAssignToReg<[A2, A3, A4, A5]>>, CCIfType<[f32], CCAssignToReg<[A2, A3, A4, A5]>>, - CCIfType<[i64], CCAssignToRegWithShadow<[A2, A4], [A3, A5]>> + CCIfType<[i64], CCAssignToRegWithShadow<[A2, A4], [A3, A5]>>, + CCIfFeature<"HIFI3", + CCIfType<[v4i16, v2i32, v1i64, v1i32], + CCAssignToReg<[AED0, AED1, AED2, AED3]>>> ]>; //===----------------------------------------------------------------------===// @@ -43,5 +46,8 @@ def RetCCW_Xtensa : CallingConv<[ //First two return values go in a10, a11, a12, a13 CCIfType<[i32], CCAssignToReg<[A10, A11, A12, A13]>>, CCIfType<[f32], CCAssignToReg<[A10, A11, A12, A13]>>, - CCIfType<[i64], CCAssignToRegWithShadow<[A10, A12], [A11, A13]>> + CCIfType<[i64], CCAssignToRegWithShadow<[A10, A12], [A11, A13]>>, + CCIfFeature<"HIFI3", + CCIfType<[v4i16, v2i32, v1i64, v1i32], + CCAssignToReg<[AED0, AED1, AED2, AED3]>>> ]>; diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 6555827c26c9d..cdf5771e3c1aa 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -36,6 +36,25 @@ using namespace llvm; static const MCPhysReg XtensaArgRegs[6] = {Xtensa::A2, Xtensa::A3, Xtensa::A4, Xtensa::A5, Xtensa::A6, Xtensa::A7}; +static const MCPhysReg VecRegs[] = {Xtensa::AED0, Xtensa::AED1, Xtensa::AED2, + Xtensa::AED3}; + +static const MVT VectorIntTypes[] = { + MVT::v2i32, + MVT::v1i32, + MVT::v4i16, + MVT::v1i64, +}; + +template static bool isVecVT(VT ValVT) { + for (const auto &V : VectorIntTypes) { + auto VV = VT(V); + if (VV == ValVT) + return true; + } + return false; +} + // Return true if we must use long (in fact, indirect) function call. // It's simplified version, production implimentation must // resolve a functions in ROM (usually glibc functions) @@ -81,6 +100,27 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setOperationAction(ISD::LOAD, MVT::v2i1, Legal); setOperationAction(ISD::LOAD, MVT::v4i1, Legal); } + + if (Subtarget.hasHIFI3()) { + for (MVT VT : VectorIntTypes) { + addRegisterClass(VT, &Xtensa::AE_DRRegClass); + setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); + // handle bicast v8i8 to VEC_VT + setOperationAction(ISD::BITCAST, VT, Custom); + } + addRegisterClass(MVT::v8i8, &Xtensa::AE_VALIGNRegClass); + // handle bicast VEC_VT to v8i8 + setOperationAction(ISD::BITCAST, MVT::v8i8, Expand); + + setOperationAction(ISD::SIGN_EXTEND, MVT::v1i32, Expand); + setOperationAction(ISD::ZERO_EXTEND, MVT::v1i32, Expand); + setOperationAction(ISD::ANY_EXTEND, MVT::v1i32, Expand); + setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Legal); + + setTargetDAGCombine(ISD::BUILD_VECTOR); + setOperationAction(ISD::MUL, MVT::v1i64, Expand); + } + // Set up special registers. setStackPointerRegisterToSaveRestore(Xtensa::SP); @@ -388,6 +428,34 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setMaxAtomicSizeInBitsSupported(0); } + for (MVT VT : MVT::fixedlen_vector_valuetypes()) { + if (isTypeLegal(VT)) { + setOperationAction(ISD::CTPOP, VT, Expand); + setOperationAction(ISD::SRL, VT, Expand); + setOperationAction(ISD::SRA, VT, Expand); + setOperationAction(ISD::SHL, VT, Expand); + + // Expand all divisions and remainders for vectors + setOperationAction(ISD::SDIV, VT, Expand); + setOperationAction(ISD::UDIV, VT, Expand); + setOperationAction(ISD::SREM, VT, Expand); + setOperationAction(ISD::UREM, VT, Expand); + } + setOperationAction(ISD::SDIVREM, VT, Expand); + setOperationAction(ISD::UDIVREM, VT, Expand); + + setOperationAction(ISD::SELECT_CC, VT, Custom); + setOperationAction(ISD::SETCC, VT, Custom); + + // Disable all narrowing stores and extending loads for vectors + for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { + setTruncStoreAction(VT, InnerVT, Expand); + setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); + setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); + setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); + } + } + // Compute derived properties from the register classes computeRegisterProperties(STI.getRegisterInfo()); } @@ -757,6 +825,22 @@ static SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG, LHS, RHS, Dest); } +static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const XtensaSubtarget &Subtarget) { + SDLoc DL(N); + EVT VT = N->getValueType(0); + SDValue Op0 = N->getOperand(0); + ConstantSDNode *Const = dyn_cast(Op0); + if (VT == MVT::v1i64 && Const) { + int64_t Val = Const->getSExtValue(); + if (Val <= std::numeric_limits::max()) + return DAG.getNode(XtensaISD::BUILD_VEC, DL, MVT::v1i64, + DAG.getConstant(Val, DL, MVT::i32)); + } + return SDValue(); +} + static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const XtensaSubtarget &Subtarget) { @@ -858,6 +942,8 @@ SDValue XtensaTargetLowering::PerformDAGCombine(SDNode *N, return PerformHWLoopCombine(N, DAG, DCI, Subtarget); case ISD::BRCOND: return PerformBRCONDCombine(N, DAG, DCI, Subtarget); + case ISD::BUILD_VECTOR: + return PerformBUILD_VECTORCombine(N, DAG, DCI, Subtarget); case ISD::BITCAST: return PerformBITCASTCombine(N, DAG, DCI, Subtarget); case ISD::EXTRACT_SUBVECTOR: @@ -956,6 +1042,9 @@ static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT, } else if (ValVT == MVT::v4i1) { Register = State.AllocateReg(BR4Regs); LocVT = ValVT; + } else if (isVecVT(ValVT)) { + Register = State.AllocateReg(VecRegs); + LocVT = ValVT; } else llvm_unreachable("Cannot handle this ValVT."); @@ -1010,6 +1099,8 @@ SDValue XtensaTargetLowering::LowerFormalArguments( RC = &Xtensa::BR2RegClass; } else if (RegVT == MVT::v4i1) { RC = &Xtensa::BR4RegClass; + } else if (isVecVT(RegVT)) { + RC = &Xtensa::AE_DRRegClass; } else llvm_unreachable("RegVT not supported by FormalArguments Lowering"); @@ -1454,6 +1545,9 @@ SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op, LHS, RHS, TrueValue, FalseValue, (LHS.getValueType() == MVT::f32) ? TargetCC_FP : TargetCC); + else if (TrueValue.getValueType().isVector()) + return Op; + return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueValue, FalseValue, TargetCC); } @@ -1986,6 +2080,14 @@ SDValue XtensaTargetLowering::LowerATOMIC_FENCE(SDValue Op, return DAG.getNode(XtensaISD::MEMW, DL, MVT::Other, Chain); } +SDValue XtensaTargetLowering::LowerBitVecLOAD(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT VT = Op.getValueType(); + assert(VT.isVector() && VT.getSizeInBits() <= 8); + return SDValue(); // Expand +} + SDValue XtensaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { @@ -2038,6 +2140,8 @@ SDValue XtensaTargetLowering::LowerOperation(SDValue Op, case ISD::FSHL: case ISD::FSHR: return LowerFunnelShift(Op, DAG); + case ISD::BITCAST: + return LowerBITCAST(Op, DAG); default: report_fatal_error("Unexpected node to lower"); } @@ -2105,6 +2209,8 @@ const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const { return "XtensaISD::MEMW"; case XtensaISD::TRUNC: return "XtensaISD::TRUNC"; + case XtensaISD::BUILD_VEC: + return "XtensaISD::BUILD_VEC"; } return nullptr; } @@ -3484,3 +3590,11 @@ MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter( // llvm_unreachable("Unexpected instr type to insert"); } } + +SDValue XtensaTargetLowering::LowerBITCAST(SDValue Op, + SelectionDAG &DAG) const { + assert(Op.getValueType().isVector()); + if (Op.getOperand(0).getValueType() == MVT::v8i8) + return SDValue(); // Expand + return Op; // Legal +} diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.h b/llvm/lib/Target/Xtensa/XtensaISelLowering.h index cd5c99aa6d71c..84a301e469c41 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.h +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.h @@ -92,7 +92,8 @@ enum { // Shift Right Combined SRCR, - TRUNC + BUILD_VEC, + TRUNC, }; } @@ -185,6 +186,8 @@ class XtensaTargetLowering : public TargetLowering { const SmallVectorImpl &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override; + SDValue LowerVectorShift(SDValue Op, SelectionDAG &DAG) const; + bool shouldInsertFencesForAtomic(const Instruction *I) const override { return true; } @@ -253,6 +256,10 @@ class XtensaTargetLowering : public TargetLowering { SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerBitVecLOAD(SDValue Op, SelectionDAG &DAG) const; + SDValue getAddrPCRel(SDValue Op, SelectionDAG &DAG) const; CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const; diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index 7d2389875eaff..b9ddf19750fe9 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -135,7 +135,18 @@ void XtensaInstrInfo::copyPhysReg(MachineBasicBlock &MBB, .addReg(SrcReg, getKillRegState(KillSrc)) .addReg(SrcReg, getKillRegState(KillSrc)); return; - } else + } else if (STI.hasHIFI3() && Xtensa::AE_DRRegClass.contains(DestReg, SrcReg)) + Opcode = Xtensa::AE_MOV; + else if (STI.hasHIFI3() && Xtensa::AE_DRRegClass.contains(DestReg) && + Xtensa::ARRegClass.contains(SrcReg)) + Opcode = Xtensa::AE_MOVDA32; + else if (STI.hasHIFI3() && Xtensa::AE_DRRegClass.contains(SrcReg) && + Xtensa::ARRegClass.contains(DestReg)) + Opcode = Xtensa::AE_MOVAD32_L; + else if (STI.hasHIFI3() && + Xtensa::AE_VALIGNRegClass.contains(DestReg, SrcReg)) + Opcode = Xtensa::AE_MOVALIGN; + else report_fatal_error("Impossible reg-to-reg copy"); BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) @@ -179,6 +190,12 @@ void XtensaInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, } else if (RC == &Xtensa::BRRegClass) { LoadOpcode = Xtensa::RESTORE_BOOL; StoreOpcode = Xtensa::SPILL_BOOL; + } else if (RC == &Xtensa::AE_DRRegClass) { + LoadOpcode = Xtensa::AE_L64_I; + StoreOpcode = Xtensa::AE_S64_I; + } else if (RC == &Xtensa::AE_VALIGNRegClass) { + LoadOpcode = Xtensa::AE_LALIGN64_I; + StoreOpcode = Xtensa::AE_SALIGN64_I; } else llvm_unreachable("Unsupported regclass to load or store"); } diff --git a/llvm/lib/Target/Xtensa/XtensaUtils.cpp b/llvm/lib/Target/Xtensa/XtensaUtils.cpp index e11cdf67693c0..93c7c5fb17658 100644 --- a/llvm/lib/Target/Xtensa/XtensaUtils.cpp +++ b/llvm/lib/Target/Xtensa/XtensaUtils.cpp @@ -49,6 +49,43 @@ bool isValidAddrOffset(MachineInstr &MI, int64_t Offset) { break; case Xtensa::LEA_ADD: return (Offset >= -128 && Offset <= 127); + case Xtensa::AE_L64_I: + case Xtensa::AE_S64_I: + case Xtensa::AE_S32X2_I: + case Xtensa::AE_L32X2_I: + case Xtensa::AE_S16X4_I: + case Xtensa::AE_L16X4_I: + case Xtensa::AE_LALIGN64_I: + case Xtensa::AE_SALIGN64_I: + return (Offset >= -64 && Offset <= 56); + case Xtensa::AE_S64_IP: + case Xtensa::AE_L64_IP: + case Xtensa::AE_S32X2_IP: + case Xtensa::AE_L32X2_IP: + case Xtensa::AE_S16X4_IP: + case Xtensa::AE_L16X4_IP: + return (Offset >= 0 && Offset <= 56); + case Xtensa::AE_L16X2M_I: + case Xtensa::AE_L16X2M_IU: + case Xtensa::AE_L32F24_I: + case Xtensa::AE_L32F24_IP: + case Xtensa::AE_L32M_I: + case Xtensa::AE_L32M_IU: + case Xtensa::AE_L32_I: + case Xtensa::AE_L32_IP: + case Xtensa::AE_S16X2M_I: + case Xtensa::AE_S16X2M_IU: + case Xtensa::AE_S24RA64S_I: + case Xtensa::AE_S24RA64S_IP: + case Xtensa::AE_S32F24_L_I: + case Xtensa::AE_S32F24_L_IP: + case Xtensa::AE_S32M_I: + case Xtensa::AE_S32M_IU: + case Xtensa::AE_S32RA64S_I: + case Xtensa::AE_S32RA64S_IP: + case Xtensa::AE_S32_L_I: + case Xtensa::AE_S32_L_IP: + return (Offset >= -32 && Offset <= 28); default: // assume that MI is 32-bit load/store operation Scale = 4; From 85a4d93ed4d4526a1af806d15bcaf8b73b412ec8 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Mon, 30 Sep 2024 22:15:40 +0300 Subject: [PATCH 201/289] [Xtensa] Add HIFI3 instruction selection patterns --- .../Target/Xtensa/XtensaHIFIInstrPatterns.td | 189 ++++++++++++++++++ llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp | 38 ++++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 1 + llvm/lib/Target/Xtensa/XtensaOperators.td | 4 +- 4 files changed, 231 insertions(+), 1 deletion(-) create mode 100644 llvm/lib/Target/Xtensa/XtensaHIFIInstrPatterns.td diff --git a/llvm/lib/Target/Xtensa/XtensaHIFIInstrPatterns.td b/llvm/lib/Target/Xtensa/XtensaHIFIInstrPatterns.td new file mode 100644 index 0000000000000..fc60c8b7c10e7 --- /dev/null +++ b/llvm/lib/Target/Xtensa/XtensaHIFIInstrPatterns.td @@ -0,0 +1,189 @@ +//===- XtensaHIFIInstrPatterns.td - Tablegen patterns for Xtensa HIFI -*- tablegen -*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains Tablegen code generation patterns for Xtensa HIFI extension +// +//===----------------------------------------------------------------------===// + +def addr64n_56: ComplexPattern", [frameindex]>; + +def addr32n_28: ComplexPattern", [frameindex]>; + +let Predicates = [HasHIFI3] in { + +def : Pat<(v2i32 (load (addr64n_56 AR:$a, imm64n_56:$imm))), + (AE_L32X2_I AR:$a, imm64n_56:$imm)>; + +def : Pat<(store v2i32:$v, (addr64n_56 AR:$a, imm64n_56:$imm)), + (AE_S32X2_I v2i32:$v, AR:$a, imm64n_56:$imm)>; + +def : Pat<(v1i64 (load (addr64n_56 AR:$a, imm64n_56:$imm))), + (AE_L64_I AR:$a, imm64n_56:$imm)>; + +def : Pat<(store v1i64:$v, (addr64n_56 AR:$a, imm64n_56:$imm)), + (AE_S64_I v1i64:$v, AR:$a, imm64n_56:$imm)>; + +def : Pat<(v4i16 (load (addr64n_56 AR:$a, imm64n_56:$imm))), + (AE_L16X4_I AR:$a, imm64n_56:$imm)>; + +def : Pat<(store v4i16:$v, (addr64n_56 AR:$a, imm64n_56:$imm)), + (AE_S16X4_I v4i16:$v, AR:$a, imm64n_56:$imm)>; + +def : Pat<(v1i32 (load (addr32n_28 AR:$a, imm32n_28:$imm))), + (AE_L32_I AR:$a, imm32n_28:$imm)>; + +def : Pat<(store v1i32:$v, (addr32n_28 AR:$a, imm32n_28:$imm)), + (AE_S32_L_I v1i32:$v, AR:$a, imm32n_28:$imm)>; + + +def : Pat<(v8i8 (load (addr64n_56 AR:$a, imm64n_56:$imm))), + (AE_LALIGN64_I AR:$a, imm64n_56:$imm)>; + +def : Pat<(store AE_VALIGN:$v, (addr64n_56 AR:$a, imm64n_56:$imm)), + (AE_SALIGN64_I AE_VALIGN:$v, AR:$a, imm64n_56:$imm)>; + + +def : Pat<(v2i32 (build_vector AR:$v1, AR:$v2)), + (AE_MOVDA32X2 AR:$v2, AR:$v1)>; + +def : Pat<(v2i32 (build_vector AR:$a, AR:$a)), + (AE_MOVDA32 AR:$a)>; + +/* Build const i64 vector when const fit in [-16,47]*/ +def : Pat<(v1i64 (Xtensa_build_vec imm16n_47:$a)), + (AE_SRLI64 (AE_MOVI imm16n_47:$a), (i32 32))>; + +/* Build const i64 vector with 32-bit const */ +def : Pat<(v1i64 (Xtensa_build_vec AR:$a)), + (AE_SRLI64 (AE_MOVDA32 AR:$a), (i32 32))>; + + +def : Pat<(v1i32 (build_vector AR:$a)), + (AE_MOVDA32 AR:$a)>; + +def : Pat<(v4i16 (build_vector AR:$a, AR:$a, AR:$a, AR:$a)), + (AE_MOVDA16 AR:$a)>; + +def : Pat<(v4i16 (build_vector AR:$v1, AR:$v2, AR:$v1, AR:$v2)), + (AE_MOVDA16X2 AR:$v1, AR:$v2)>; + +def : Pat<(v4i16 (build_vector AR:$v1, AR:$v2, AR:$v3, AR:$v4)), + (AE_OR + (AE_SLAI64 (AE_MOVDA16X2 AR:$v1, AR:$v2), 32), + (AE_MOVDA16X2 AR:$v3, AR:$v4) + )>; + +def : Pat<(i32 (extractelt v2i32:$v1, (i32 0))), + (AE_MOVAD32_L AE_DR:$v1)>; + +def : Pat<(i32 (extractelt v2i32:$v1, (i32 1))), + (AE_MOVAD32_H AE_DR:$v1)>; + +def : Pat<(i32 (extractelt v1i32:$v1, (i32 0))), + (AE_MOVAD32_L AE_DR:$v1)>; + +def : Pat<(i32 (vector_extract v4i16:$v1, (i32 0))), + (AE_MOVAD16_0 AE_DR:$v1)>; + +def : Pat<(i32 (vector_extract v4i16:$v1, (i32 1))), + (AE_MOVAD16_1 AE_DR:$v1)>; + +def : Pat<(i32 (vector_extract v4i16:$v1, (i32 2))), + (AE_MOVAD16_2 AE_DR:$v1)>; + +def : Pat<(i32 (vector_extract v4i16:$v1, (i32 3))), + (AE_MOVAD16_3 AE_DR:$v1)>; + +def : Pat<(v1i32 (extract_subvector v2i32:$v1, (i32 0))), + (AE_MOVDA32 (AE_MOVAD32_L AE_DR:$v1))>; +} + +class CAST_PAT + : Pat<(dst_vt (bitconvert src_vt:$v)), + (COPY_TO_REGCLASS AE_DR:$v, AE_DR)>; + +def : CAST_PAT; +def : CAST_PAT; +def : CAST_PAT; +def : CAST_PAT; +def : CAST_PAT; +def : CAST_PAT; + +def : Pat<(v1i64 (anyext v1i32:$src)), + (AE_SRLI64 v1i32:$src, (i32 32))>; + +def : Pat<(v1i64 (zext v1i32:$src)), + (AE_SRLI64 v1i32:$src, (i32 32))>; + +def : Pat<(v1i64 (sext v1i32:$src)), + (AE_SRAI64 v1i32:$src, (i32 32))>; + +/* +class BIN_PAT + : Pat<(node src_vt:$f1, src_vt:$f2), + (inst dst_vt:$f1, dst_vt:$f2)>; +*/ +foreach VT = AE_DR.RegTypes in { + def : BIN_PAT; + def : BIN_PAT; + def : BIN_PAT; +} + +def : BIN_PAT; +def : BIN_PAT; +def : BIN_PAT; +def : BIN_PAT; + +def : BIN_PAT; +def : BIN_PAT; +def : BIN_PAT; +def : BIN_PAT; + +def : BIN_PAT; +def : BIN_PAT; +def : BIN_PAT; + +/* SELECT and SETCC patterns */ +foreach VT = AE_DR.RegTypes in { + def : Pat<(VT (select v1i1:$cc, AE_DR:$t, AE_DR:$f)), + (AE_MOVT64 AE_DR:$t, AE_DR:$f, v1i1:$cc)>; +} +def : Pat<(f32 (int_xtensa_xt_movt_s FPR:$t, FPR:$f, v1i1:$cc)), + (MOVT_S FPR:$t, FPR:$f, v1i1:$cc)>,Requires<[HasSingleFloat]>; + +def : Pat<(f32 (select v1i1:$cc, FPR:$t, FPR:$f)), + (MOVT_S FPR:$t, FPR:$f, v1i1:$cc)>,Requires<[HasSingleFloat]>; + +class SELECTCC_VEC_INT + : Pat<(vt (selectcc i32:$lhs, i32:$rhs, AE_DR:$t, AE_DR:$f, cond)), + (mov AE_DR:$t, AE_DR:$f, (cmp (AE_MOVDA32 AR:$lhs), + (AE_MOVDA32 AR:$rhs)))>; + +foreach vt = [v2i32,v1i32,v1i64,v4i16] in { + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; + def : SELECTCC_VEC_INT; +} diff --git a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp index 4a3ba220e2361..49bb2f169ab76 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelDAGToDAG.cpp @@ -109,6 +109,44 @@ class XtensaDAGToDAGISel : public SelectionDAGISel { return SelectionDAGISel::runOnMachineFunction(MF); } + template + bool selectMemRegImm(SDValue Addr, SDValue &Base, SDValue &Offset) { + EVT ValTy = Addr.getValueType(); + // if Address is FI, get the TargetFrameIndex. + if (FrameIndexSDNode *FIN = dyn_cast(Addr)) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); + Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), ValTy); + + return true; + } + if (TM.isPositionIndependent()) + report_fatal_error("PIC relocations is not supported"); + + if ((Addr.getOpcode() == ISD::TargetExternalSymbol || + Addr.getOpcode() == ISD::TargetGlobalAddress)) + return false; + + if (CurDAG->isBaseWithConstantOffset(Addr)) { + ConstantSDNode *CN = dyn_cast(Addr.getOperand(1)); + int64_t OffsetVal = CN->getSExtValue(); + if (((OffsetVal % std::abs(Scale)) == 0) && (OffsetVal >= Low) && + (OffsetVal <= High)) { + if (FrameIndexSDNode *FIN = + dyn_cast(Addr.getOperand(0))) + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); + else + Base = Addr.getOperand(0); + Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(Addr), + Addr.getValueType()); + return true; + } + } + // Last case + Base = Addr; + Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Addr.getValueType()); + return true; + } + // Include the pieces autogenerated from the target description. #include "XtensaGenDAGISel.inc" }; // namespace diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index ab4a75f9ff2c1..dc002650e6c72 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1994,3 +1994,4 @@ include "XtensaDSPInstrInfo.td" //===----------------------------------------------------------------------===// include "XtensaHIFIInstrFormats.td" include "XtensaHIFIInstrInfo.td" +include "XtensaHIFIInstrPatterns.td" diff --git a/llvm/lib/Target/Xtensa/XtensaOperators.td b/llvm/lib/Target/Xtensa/XtensaOperators.td index 97abb0b11803d..e7d2417a3b05c 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperators.td +++ b/llvm/lib/Target/Xtensa/XtensaOperators.td @@ -45,9 +45,9 @@ def SDT_XtensaEXTUI : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCi def SDT_XtensaLoopEnd : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>; def SDT_XtensaLoopDec : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisVT<0, i32>]>; def SDT_XtensaLoopBr : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisVT<1, OtherVT>]>; +def SDT_XtensaBuildVec : SDTypeProfile<1, 1, [SDTCisVT<0, v1i64>, SDTCisVT<1, i32>]>; def SDT_XtensaTRUNC : SDTypeProfile<1, 1, [SDTCisVT<1, i32>, SDTCisVec<0>]>; - //===----------------------------------------------------------------------===// // Node definitions //===----------------------------------------------------------------------===// @@ -72,6 +72,8 @@ def Xtensa_callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_XtensaCallSeqEnd, [SDNPHasChain, SDNPSideEffect, SDNPOptInGlue, SDNPOutGlue]>; +def Xtensa_build_vec: SDNode<"XtensaISD::BUILD_VEC", SDT_XtensaBuildVec, [SDNPInGlue]>; + def Xtensa_brjt: SDNode<"XtensaISD::BR_JT", SDT_XtensaBrJT, [SDNPHasChain]>; def Xtensa_select_cc: SDNode<"XtensaISD::SELECT_CC", SDT_XtensaSelectCC, From 8e0f7d2490625e2dcaf8221d4fcde05bf8240dfd Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 13:28:39 +0000 Subject: [PATCH 202/289] [Xtensa] Add codegen tests for vector operators --- llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll | 91 +++++ llvm/test/CodeGen/Xtensa/xtensa-vector-ops.ll | 329 ++++++++++++++++++ 2 files changed, 420 insertions(+) create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll create mode 100644 llvm/test/CodeGen/Xtensa/xtensa-vector-ops.ll diff --git a/llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll b/llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll new file mode 100644 index 0000000000000..fd1fa8dcf06d1 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll @@ -0,0 +1,91 @@ +# RUN: python3 %s > %t && ( llc -mtriple=xtensa -mcpu=cnl %t -o - | FileCheck %t ) + + +FIXTURES = [ + ('add','ae_add64','<1 x i64>'), + ('add','ae_add32','<1 x i32>'), + ('add','ae_add32','<2 x i32>'), + ('add','ae_add16','<4 x i16>'), + + ('sub','ae_sub64','<1 x i64>'), + ('sub','ae_sub32','<1 x i32>'), + ('sub','ae_sub32','<2 x i32>'), + ('sub','ae_sub16','<4 x i16>'), + + ('mul','ae_mulp32x2','<2 x i32>'), + ('mul','ae_mulp32x2','<1 x i32>'), + ('mul','ae_mul16x4','<4 x i16>'), +] + +REG_TYPES = ['<1 x i64>', '<2 x i32>', '<1 x i32>', '<4 x i16>'] + +BITWISE_OPS = [ + ('and', 'ae_and'), + ('or', 'ae_or'), + ('xor', 'ae_xor') +] + +from dataclasses import dataclass + +@dataclass +class F: + op: str + instr: str + type : str + +template = """ +define {type} @test_{fun}({type} %a, {type} %b) {{ + ; CHECK-LABEL: test_{fun} + ; CHECK: {instr} aed0, {{{{aed[01]}}}}, {{{{aed[01]}}}} + %r = {op} {type} %a, %b + ret {type} %r +}} +""" + +def v2s(typ): + return typ.strip('<>').replace(' ','') + +for f in FIXTURES: + f = F(*f) + f.fun = f.op + v2s(f.type) + print(template.format(**f.__dict__)) + +for f in BITWISE_OPS: + op, instr = f + for typ in REG_TYPES: + fun = op + v2s(typ) + print(template.format(op=op, instr=instr,fun=fun,type=typ)) + +cmp_template = """ +define {vtype} @test_sel_{fun}({ctype} %a, {ctype} %b, {vtype} %t, {vtype} %f) {{ + ; CHECK-LABEL: test_sel_{fun} + ; CHECK-DAG: ae_movda32 {{{{aed[0-9]+}}}}, {{{{a[0-9]+}}}} + ; CHECK-DAG: ae_movda32 {{{{aed[0-9]+}}}}, {{{{a[0-9]+}}}} + ; CHECK: {cmp_inst} {{{{b[0-9]+}}}}, {{{{aed[0-9]+}}}}, {{{{aed[0-9]+}}}} + ; CHECK: {mov_inst} {{{{aed[0-9]+}}}}, {{{{aed[0-9]+}}}}, {{{{b[0-9]+}}}} + %cmp = icmp {cmp_bc} {ctype} %a, %b + %cond = select i1 %cmp, {vtype} %f, {vtype} %t + ret {vtype} %cond +}} +""" + +CMP_FIXTURES = [ + ('eq','ae_eq64', 'ae_movt64'), + ('ne','ae_eq64', 'ae_movf64'), + ('ugt','ae_le64', 'ae_movf64'), + ('uge','ae_lt64', 'ae_movf64'), + ('ult','ae_lt64', 'ae_movt64'), + ('ule','ae_le64', 'ae_movt64'), + ('sgt','ae_le64', 'ae_movf64'), + ('sge','ae_lt64', 'ae_movf64'), + ('slt','ae_lt64', 'ae_movt64'), + ('sle','ae_le64', 'ae_movt64'), +] + +SCALARS = "i32 i16 i8".split() + +for cmp_bc, cmp_inst, mov_inst in CMP_FIXTURES: + for ctype in SCALARS: + for vtype in REG_TYPES: + fun = '_'.join((cmp_bc, ctype, v2s(vtype))) + print(cmp_template.format(**locals())) diff --git a/llvm/test/CodeGen/Xtensa/xtensa-vector-ops.ll b/llvm/test/CodeGen/Xtensa/xtensa-vector-ops.ll new file mode 100644 index 0000000000000..1e4df95919905 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/xtensa-vector-ops.ll @@ -0,0 +1,329 @@ +; RUN: llc -mtriple=xtensa -mcpu=cnl %s -o - | FileCheck %s + +define i32 @test_2xi32toi32(<2 x i32> %a) { + ; CHECK-LABEL: test_2xi32toi32 + ; CHECK: ae_movad32.l a2, aed0 + %r = extractelement <2 x i32> %a, i32 0 + ret i32 %r +} + +define <2 x i32> @test_i32to2xi32(i32 %a) { + ; CHECK-LABEL: test_i32to2xi32 + ; CHECK: ae_movda32x2 aed0, a2, a2 + %vecinit = insertelement <2 x i32> undef, i32 %a, i64 0 + %vecinit1 = shufflevector <2 x i32> %vecinit, <2 x i32> poison, <2 x i32> zeroinitializer + ret <2 x i32> %vecinit1 +} + +define void @test_store_2xi32(i32 %a, <2 x i32> %v) { + ; CHECK-LABEL: test_store_2xi32 + ; CHECK: ae_s32x2.i aed0, a2, 0 + %p = inttoptr i32 %a to ptr + store <2 x i32> %v, ptr %p, align 8 + ret void +} + +define void @test_store_1xi64(i32 %a, <1 x i64> %v) { + ; CHECK-LABEL: test_store_1xi64 + %p = inttoptr i32 %a to ptr + ; CHECK: ae_s64.i aed0, a2, 0 + store <1 x i64> %v, ptr %p, align 8 + ret void +} + +define <1 x i64> @test_build_1xi64(i64 %v) { + ; CHECK-LABEL: test_build_1xi64 + ; CHECK: ae_movda32x2 aed0, a3, a2 + %vec = insertelement <1 x i64> undef, i64 %v, i64 0 + ret <1 x i64> %vec +} + +define void @test_store_4xi16(i32 %a, <4 x i16> %v) { + ; CHECK-LABEL: test_store_4xi16 + ; CHECK: ae_s16x4.i aed0, a2, 0 + %p = inttoptr i32 %a to ptr + store <4 x i16> %v, ptr %p, align 8 + ret void +} + +define <2 x i32> @test_load_2xi32(i32 %a) { + ; CHECK-LABEL: test_load_2xi32 + ; CHECK: ae_l32x2.i aed0, a2, 0 + %p = inttoptr i32 %a to ptr + %v = load <2 x i32>, ptr %p, align 8 + ret <2 x i32> %v +} + +define <1 x i64> @test_load_1xi64(i32 %a) { + ; CHECK-LABEL: test_load_1xi64 + ; CHECK: ae_l64.i aed0, a2, 0 + %p = inttoptr i32 %a to ptr + %v = load <1 x i64>, ptr %p, align 8 + ret <1 x i64> %v +} + +define <4 x i16> @test_load_4xi16(i32 %a) { + ; CHECK-LABEL: test_load_4xi16 + ; CHECK: ae_l16x4.i aed0, a2, 0 + %p = inttoptr i32 %a to ptr + %v = load <4 x i16>, ptr %p, align 8 + ret <4 x i16> %v +} + +define void @test_build_store_1xi32(i32 %a, i32 %v) { + ; CHECK-LABEL: test_build_store_1xi32 + ; CHECK: ae_movda32 [[A:aed[0-9]+]], a3 + %vec = insertelement <1 x i32> undef, i32 %v, i64 0 + %p = inttoptr i32 %a to ptr + ; CHECK: ae_s32.l.i [[A]], a2, 0 + store <1 x i32> %vec, ptr %p, align 8 + ret void +} + +define i32 @test_load_extract_1xi32(i32 %a) { + ; CHECK-LABEL: test_load_extract_1xi32 + %p = inttoptr i32 %a to ptr + ; CHECK: ae_l32.i [[A:aed[0-9]+]], a2, 0 + %vec = load <1 x i32>, ptr %p, align 8 + ; CHECK: ae_movad32.l a2, [[A]] + %r = extractelement <1 x i32> %vec, i32 0 + ret i32 %r +} + +define <4 x i16> @test_build_4xi16_2(i16 %a, i16 %b) { + ; CHECK-LABEL: test_build_4xi16_2 + ; CHECK: ae_movda16x2 aed0, a2, a3 + %vecinit = insertelement <4 x i16> undef, i16 %a, i64 0 + %vecinit1 = insertelement <4 x i16> %vecinit, i16 %b, i64 1 + %vecinit2 = insertelement <4 x i16> %vecinit1, i16 %a, i64 2 + %vecinit3 = insertelement <4 x i16> %vecinit2, i16 %b, i64 3 + ret <4 x i16> %vecinit3 +} + +define <4 x i16> @test_build_4xi16_1(i16 %a) { + ; CHECK-LABEL: test_build_4xi16_1 + ; CHECK: ae_movda16 aed0, a2 + %vecinit = insertelement <4 x i16> undef, i16 %a, i64 0 + %vecinit1 = shufflevector <4 x i16> %vecinit, <4 x i16> poison, <4 x i32> zeroinitializer + ret <4 x i16> %vecinit1 +} + +define i32 @test_extract(<2 x i32> %v2i, <1 x i32> %v1i, <4 x i16> %v4s, <1 x i64> %v1l) { + ; CHECK-LABEL: test_extract + ; CHECK-DAG: ae_movad32.h {{a[0-9]+}}, aed0 + %v2i0 = extractelement <2 x i32> %v2i, i64 0 + ; CHECK-DAG: ae_movad32.l {{a[0-9]+}}, aed0 + %v2i1 = extractelement <2 x i32> %v2i, i64 1 + %sum1 = add i32 %v2i0, %v2i1 + ; CHECK-DAG: ae_movad32.l {{a[0-9]+}}, aed1 + %v1i0 = extractelement <1 x i32> %v1i, i64 0 + %sum2 = add i32 %sum1, %v1i0 + ; CHECK-DAG: ae_movad16.0 {{a[0-9]+}}, aed2 + %v4s0 = extractelement <4 x i16> %v4s, i64 0 + %v4s0i = zext i16 %v4s0 to i32 + %sum3 = add i32 %v4s0i, %sum2 + ; CHECK-DAG: ae_movad16.1 {{a[0-9]+}}, aed2 + %v4s1 = extractelement <4 x i16> %v4s, i64 1 + %v4s1i = zext i16 %v4s1 to i32 + %sum4 = add i32 %v4s1i, %sum3 + ; CHECK-DAG: ae_movad16.2 {{a[0-9]+}}, aed2 + %v4s2 = extractelement <4 x i16> %v4s, i64 2 + %v4s2i = zext i16 %v4s2 to i32 + %sum5 = add i32 %v4s2i, %sum4 + ; CHECK-DAG: ae_movad16.3 {{a[0-9]+}}, aed2 + %v4s3 = extractelement <4 x i16> %v4s, i64 3 + %v4s3i = zext i16 %v4s3 to i32 + %sum6 = add i32 %v4s3i, %sum5 + ; CHECK-DAG: ae_movad32.l {{a[0-9]+}}, aed3 + %v1l0 = extractelement <1 x i64> %v1l, i64 0 + %v1l0l = trunc i64 %v1l0 to i32 + %sum7 = add i32 %v1l0l, %sum6 + + ret i32 %sum7 +} + +define <1 x i32> @test_extract_subvec_1x32(<2 x i32> %v) { + ; CHECK-LABEL: test_extract_subvec_1x32 + ; CHECK: ae_movad32.l {{a[0-9]+}}, aed0 + ; CHECK: ae_movda32 aed0, {{a[0-9]+}} + %shuffle = shufflevector <2 x i32> %v, <2 x i32> poison, <1 x i32> zeroinitializer + ret <1 x i32> %shuffle +} + + +define <4 x i16> @rlshift4(<4 x i16> %a, i16 signext %b) { + ; CHECK-LABEL: rlshift4: + ; CHECK: ssr {{a[0-9]+}} + %v = insertelement <4 x i16> undef, i16 %b, i64 0 + %sh_prom = shufflevector <4 x i16> %v, <4 x i16> poison, <4 x i32> zeroinitializer + %shr = lshr <4 x i16> %a, %sh_prom + ret <4 x i16> %shr +} + + +define <4 x i16> @rlshift4_imm(<4 x i16> %a) { + ; CHECK-LABEL: rlshift4_imm: + ; CHECK: srli {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shr = lshr <4 x i16> %a, + ret <4 x i16> %shr +} + + +define <2 x i32> @rlshift2(<2 x i32> %a, i32 %b) { + ; CHECK-LABEL: rlshift2: + ; CHECK: ssr {{a[0-9]+}} + %splat.splatinsert = insertelement <2 x i32> poison, i32 %b, i64 0 + %splat.splat = shufflevector <2 x i32> %splat.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer + %shr = lshr <2 x i32> %a, %splat.splat + ret <2 x i32> %shr +} + + +define <2 x i32> @rlshift2_imm(<2 x i32> %a) { + ; CHECK-LABEL: rlshift2_imm: + ; CHECK: srli {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shr = lshr <2 x i32> %a, + ret <2 x i32> %shr +} + +define <1 x i64> @rlshift1(<1 x i64> %a, i32 %b) { + ; CHECK-LABEL: rlshift1: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: src {{a[0-9]+}} + ; CHECK: srl {{a[0-9]+}} + %splat.splatinsert = insertelement <1 x i32> poison, i32 %b, i64 0 + %sh_prom = zext <1 x i32> %splat.splatinsert to <1 x i64> + %shr = lshr <1 x i64> %a, %sh_prom + ret <1 x i64> %shr +} + +define <1 x i64> @rlshift1_imm(<1 x i64> %a) { + ; CHECK-LABEL: rlshift1_imm: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: src {{a[0-9]+}} + ; CHECK: srl {{a[0-9]+}} + %shr = lshr <1 x i64> %a, + ret <1 x i64> %shr +} + +define <4 x i16> @rashift4(<4 x i16> %a, i16 signext %b) { + ; CHECK-LABEL: rashift4: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: sra {{a[0-9]+}} + %v = insertelement <4 x i16> undef, i16 %b, i64 0 + %sh_prom = shufflevector <4 x i16> %v, <4 x i16> poison, <4 x i32> zeroinitializer + %shr = ashr <4 x i16> %a, %sh_prom + ret <4 x i16> %shr +} + +define <4 x i16> @rashift4_imm(<4 x i16> %a) { + ; CHECK-LABEL: rashift4_imm: + ; CHECK: srai {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shr = ashr <4 x i16> %a, + ret <4 x i16> %shr +} + +define <2 x i32> @rashift2(<2 x i32> %a, i32 %b) { + ; CHECK-LABEL: rashift2: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: sra {{a[0-9]+}} + %splat.splatinsert = insertelement <2 x i32> poison, i32 %b, i64 0 + %splat.splat = shufflevector <2 x i32> %splat.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer + %shr = ashr <2 x i32> %a, %splat.splat + ret <2 x i32> %shr +} + + +define <2 x i32> @rashift2_imm(<2 x i32> %a) { + ; CHECK-LABEL: rashift2_imm: + ; CHECK: srai {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shr = ashr <2 x i32> %a, + ret <2 x i32> %shr +} + + +define <1 x i64> @rashift1(<1 x i64> %a, i64 %b) { + ; CHECK-LABEL: rashift1: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: sra {{a[0-9]+}} + %splat.splatinsert = insertelement <1 x i64> poison, i64 %b, i64 0 + %shr = ashr <1 x i64> %a, %splat.splatinsert + ret <1 x i64> %shr +} + + +define <1 x i64> @rashift1_imm(<1 x i64> %a) { + ; CHECK-LABEL: rashift1_imm: + ; CHECK: ssr {{a[0-9]+}} + ; CHECK: sra {{a[0-9]+}} + %shr = ashr <1 x i64> %a, + ret <1 x i64> %shr +} + + +define <4 x i16> @lshift4(<4 x i16> %a, i16 signext %b) { + ; CHECK-LABEL: lshift4: + ; CHECK: ssl {{a[0-9]+}} + ; CHECK: sll {{a[0-9]+}} + %v = insertelement <4 x i16> undef, i16 %b, i64 0 + %sh_prom = shufflevector <4 x i16> %v, <4 x i16> poison, <4 x i32> zeroinitializer + %shl = shl <4 x i16> %a, %sh_prom + ret <4 x i16> %shl +} + + +define <4 x i16> @lshift4_imm(<4 x i16> %a) { + ; CHECK-LABEL: lshift4_imm: + ; CHECK: slli {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shl = shl <4 x i16> %a, + ret <4 x i16> %shl +} + + +define <2 x i32> @lshift2(<2 x i32> %a, i32 %b) { + ; CHECK-LABEL: lshift2: + ; CHECK: ssl {{a[0-9]+}} + ; CHECK: sll {{a[0-9]+}} + %splat.splatinsert = insertelement <2 x i32> poison, i32 %b, i64 0 + %splat.splat = shufflevector <2 x i32> %splat.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer + %shl = shl <2 x i32> %a, %splat.splat + ret <2 x i32> %shl +} + + +define <2 x i32> @lshift2_imm(<2 x i32> %a) { + ; CHECK-LABEL: lshift2_imm: + ; CHECK: slli {{a[0-9]+}}, {{a[0-9]+}}, 1 + %shl = shl <2 x i32> %a, + ret <2 x i32> %shl +} + + +define <1 x i64> @lshift1(<1 x i64> %a, i64 %b) { + ; CHECK-LABEL: lshift1: + ; CHECK: ssl {{a[0-9]+}} + ; CHECK: sll {{a[0-9]+}} + %splat.splatinsert = insertelement <1 x i64> poison, i64 %b, i64 0 + %shl = shl <1 x i64> %a, %splat.splatinsert + ret <1 x i64> %shl +} + + +define <1 x i64> @lshift1_imm(<1 x i64> %a) { + ; CHECK-LABEL: lshift1_imm: + ; CHECK: ssl {{a[0-9]+}} + ; CHECK: sll {{a[0-9]+}} + %shl = shl <1 x i64> %a, + ret <1 x i64> %shl +} + +define void @test_valign_load_store(i32 %p1, i32 %p2) { + ; CHECK-LABEL: test_valign_load_store: + %ptr1 = inttoptr i32 %p1 to ptr + %ptr2 = inttoptr i32 %p2 to ptr + ; CHECK: ae_lalign64.i [[V:u[0-3]]], {{a[0-9]+}}, 0 + %v = load <8 x i8>, ptr %ptr1, align 8 + ; CHECK: ae_salign64.i [[V]], {{a[0-9]+}}, 0 + store <8 x i8> %v, ptr %ptr2, align 8 + ret void +} From 0c811dd8b2a0200de9a1ed2d1ac8f70e0536b1dd Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 1 Oct 2024 01:41:20 +0300 Subject: [PATCH 203/289] [Xtensa] Fix HIFI dsp gen operation test. --- llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll b/llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll index fd1fa8dcf06d1..f2987a7e35a32 100644 --- a/llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll +++ b/llvm/test/CodeGen/Xtensa/xtensa-gen-ops.ll @@ -72,14 +72,14 @@ define {vtype} @test_sel_{fun}({ctype} %a, {ctype} %b, {vtype} %t, {vtype} %f) { CMP_FIXTURES = [ ('eq','ae_eq64', 'ae_movt64'), ('ne','ae_eq64', 'ae_movf64'), - ('ugt','ae_le64', 'ae_movf64'), + ('ugt','ae_lt64', 'ae_movf64'), ('uge','ae_lt64', 'ae_movf64'), ('ult','ae_lt64', 'ae_movt64'), - ('ule','ae_le64', 'ae_movt64'), - ('sgt','ae_le64', 'ae_movf64'), + ('ule','ae_lt64', 'ae_movt64'), + ('sgt','ae_lt64', 'ae_movf64'), ('sge','ae_lt64', 'ae_movf64'), ('slt','ae_lt64', 'ae_movt64'), - ('sle','ae_le64', 'ae_movt64'), + ('sle','ae_lt64', 'ae_movt64'), ] SCALARS = "i32 i16 i8".split() From f8bbc5f3e2355a0259b848859f78b33cdbdd178d Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 13:51:02 +0000 Subject: [PATCH 204/289] [Xtensa] Support bit vectors in BRegFixupPass --- .../lib/Target/Xtensa/XtensaBRegFixupPass.cpp | 49 +++++++++++++++++-- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp b/llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp index e5da98500b57b..44f451be59328 100644 --- a/llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp +++ b/llvm/lib/Target/Xtensa/XtensaBRegFixupPass.cpp @@ -92,10 +92,26 @@ bool XtensaBRegFixup::VisitInstruction( const XtensaInstrInfo &TII = *static_cast(MF->getSubtarget().getInstrInfo()); unsigned Opcode = MI->getOpcode(); - unsigned RegBase = Xtensa::B0; switch (Opcode) { + case Xtensa::MOVBA2_P2: case Xtensa::MOVBA_P2: { + + unsigned RegBase; + unsigned Arity; + + switch (Opcode) { + case Xtensa::MOVBA_P2: + RegBase = Xtensa::B0; + Arity = 1; + break; + case Xtensa::MOVBA2_P2: + RegBase = Xtensa::B0_B1; + Arity = 2; + break; + default: + llvm_unreachable("Unknown MOVBA opcode"); + } /* MOVBA_P2 Breg, Dst1, Dst2, Src | @@ -107,13 +123,16 @@ bool XtensaBRegFixup::VisitInstruction( OR Dst2, Dst2, Dst1 WSR BREG, Dst2 */ + // TODO: Mask SRC, e.g. by EXTUI MachineOperand Breg = MI->getOperand(0); MachineOperand Dst1 = MI->getOperand(1); MachineOperand Dst2 = MI->getOperand(2); MachineOperand Src = MI->getOperand(3); DebugLoc DL = MI->getDebugLoc(); unsigned RegNo = Breg.getReg().id() - RegBase; - int64_t Mask = 0xffff & (~(1 << RegNo)); + + int64_t BaseMask = (1 << Arity) - 1; + int64_t Mask = 0xffff & (~(BaseMask << (RegNo * Arity))); MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(Xtensa::RSR)).add(Dst1).addReg(Xtensa::BREG); @@ -139,6 +158,27 @@ bool XtensaBRegFixup::VisitInstruction( return true; } break; case Xtensa::EXTUI_BR_P: { + case Xtensa::EXTUI_BR2_P: + case Xtensa::EXTUI_BR4_P: + unsigned RegBase; + unsigned Arity; + + switch (Opcode) { + case Xtensa::EXTUI_BR_P: + RegBase = Xtensa::B0; + Arity = 1; + break; + case Xtensa::EXTUI_BR2_P: + RegBase = Xtensa::B0_B1; + Arity = 2; + break; + case Xtensa::EXTUI_BR4_P: + RegBase = Xtensa::B0_B1_B2_B3; + Arity = 4; + break; + default: + llvm_unreachable("Unknown EXTUI opcode"); + } MachineOperand Breg = MI->getOperand(2); DebugLoc dl = MI->getDebugLoc(); @@ -149,8 +189,8 @@ bool XtensaBRegFixup::VisitInstruction( MIB.add(MI->getOperand(0)); MIB.add(MI->getOperand(1)); unsigned RegNo = Breg.getReg().id() - RegBase; - MIB.addImm(RegNo); - MIB.addImm(1); + MIB.addImm(RegNo * Arity); + MIB.addImm(Arity); LLVM_DEBUG(dbgs() << " Fixed EXTUI: " << *MIB); MBB.erase_instr(MI); @@ -160,6 +200,7 @@ bool XtensaBRegFixup::VisitInstruction( case Xtensa::SLLI_BR_P: { + unsigned RegBase = Xtensa::B0; MachineOperand Breg = MI->getOperand(2); unsigned RegNo = Breg.getReg().id() - RegBase; if (RegNo != 0) { From 92476b1d2ccb89dfca8d4786dafc62c37d92108c Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 13:29:23 +0000 Subject: [PATCH 205/289] [Xtensa] Add codegen tests for bit vectors --- .../CodeGen/Xtensa/xtensa-xtbool-convert.ll | 16 +++++++++++ .../CodeGen/Xtensa/xtensa-xtbool-spill.ll | 27 +++++++++++++++++-- 2 files changed, 41 insertions(+), 2 deletions(-) diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll index c2428cead69d7..17eb2b5fefee5 100644 --- a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-convert.ll @@ -19,3 +19,19 @@ define i32 @test_xtbool_zext(<1 x i1> %b) { %int = zext i1 %bit to i32 ret i32 %int } + + +define <2 x i1> @test_xtbool2_build(i32 %a, i32 %b) { + ; CHECK-LABEL: test_xtbool2_build: + ; CHECK: slli {{a[0-9]+}}, {{a[0-9]+}}, 1 + ; CHECK: or {{a[0-9]+}}, {{a[0-9]+}}, {{a[0-9]+}} + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: and [[AND:a[0-9]+]], {{a[0-9]+}}, {{a[0-9]+}} + ; CHECK: or [[OR:a[0-9]+]], [[AND]], {{a[0-9]+}} + ; CHECK: wsr [[OR]], br + %tobool = icmp ne i32 %a, 0 + %vecinit = insertelement <2 x i1> undef, i1 %tobool, i64 0 + %tobool1 = icmp ne i32 %b, 0 + %vecinit2 = insertelement <2 x i1> %vecinit, i1 %tobool1, i64 1 + ret <2 x i1> %vecinit2 +} diff --git a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll index c1fb9f2087041..3381657d294b9 100644 --- a/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll +++ b/llvm/test/CodeGen/Xtensa/xtensa-xtbool-spill.ll @@ -32,8 +32,7 @@ define <1 x i1> @test_xtbool_load(i32 %addr) { } define void @test_xtbool_store(i32 %addr, <1 x i1> %b) { -entry: - ; CHECK-LABEL: test_xtbool_store + ; CHECK-LABEL: test_xtbool_store: ; CHECK: rsr [[BREG:a[0-9]+]], br ; CHECK: extui [[DST:a[0-9]+]], [[BREG]], 0, 1 ; CHECK: s8i [[DST]], {{a[0-9]+}}, {{[0-9]+}} @@ -42,3 +41,27 @@ entry: store <8 x i1> %insertvec, ptr %ptr, align 1 ret void } +define <2 x i1> @test_xtbool2_load(i32 %addr) { + ; CHECK-LABEL: test_xtbool2_load: + ; CHECK: l8ui {{a[0-9]+}} + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: and [[AND:a[0-9]+]], {{a[0-9]+}}, [[BREG]] + ; CHECK: or [[OR:a[0-9]+]], [[AND]], {{a[0-9]+}} + ; CHECK: wsr [[OR]], br + %ptr = inttoptr i32 %addr to ptr + %load_bits = load <8 x i1>, ptr %ptr, align 1 + %extractvec = shufflevector <8 x i1> %load_bits, <8 x i1> poison,<2 x i32> + ret <2 x i1> %extractvec +} + +; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn writeonly +define void @test_xtbool2_store(i32 %p, <2 x i1> %v) { + ; CHECK-LABEL: test_xtbool2_store: + ; CHECK: rsr [[BREG:a[0-9]+]], br + ; CHECK: extui [[DST:a[0-9]+]], [[BREG]], 0, 2 + ; CHECK: s8i [[DST]], {{a[0-9]+}}, {{[0-9]+}} + %ptr = inttoptr i32 %p to ptr + %insertvec = shufflevector <2 x i1> %v, <2 x i1> poison, <8 x i32> + store <8 x i1> %insertvec, ptr %ptr, align 1 + ret void +} From 8c04316ad61f259321507211cf50ba936316bd86 Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 13:52:19 +0000 Subject: [PATCH 206/289] [Xtensa] Remove unsspported intrinsic xt_conjc_s --- llvm/include/llvm/IR/IntrinsicsXtensa.td | 3 --- 1 file changed, 3 deletions(-) diff --git a/llvm/include/llvm/IR/IntrinsicsXtensa.td b/llvm/include/llvm/IR/IntrinsicsXtensa.td index e805a02f62455..3ac44dba439fb 100644 --- a/llvm/include/llvm/IR/IntrinsicsXtensa.td +++ b/llvm/include/llvm/IR/IntrinsicsXtensa.td @@ -312,9 +312,6 @@ def int_xtensa_xt_addexpm_s: ClangBuiltin<"__builtin_xtensa_xt_addexpm_s">, def int_xtensa_xt_ceil_s: ClangBuiltin<"__builtin_xtensa_xt_ceil_s">, Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty], [ImmArg>, IntrNoMem]>; -def int_xtensa_xt_conjc_s: ClangBuiltin<"__builtin_xtensa_xt_conjc_s">, - Intrinsic<[llvm_v2f32_ty], [llvm_v2f32_ty], [IntrNoMem]>; - def int_xtensa_xt_div0_s: ClangBuiltin<"__builtin_xtensa_xt_div0_s">, Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>; From 0a6ee3d7efd51a147e4bbcf6f45d432c30541fc3 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 1 Oct 2024 01:58:31 +0300 Subject: [PATCH 207/289] [Xtensa] Add HIFI3 intrinsic definitions to clang --- clang/include/clang/Basic/BuiltinsXtensa.def | 2 - .../clang/Basic/BuiltinsXtensaHIFI.def | 2617 +++++++++++++++++ clang/include/clang/Basic/TargetBuiltins.h | 10 +- clang/lib/Basic/Targets/Xtensa.cpp | 2 + 4 files changed, 2625 insertions(+), 6 deletions(-) create mode 100644 clang/include/clang/Basic/BuiltinsXtensaHIFI.def diff --git a/clang/include/clang/Basic/BuiltinsXtensa.def b/clang/include/clang/Basic/BuiltinsXtensa.def index a09762568defd..8c0fcc17133e6 100644 --- a/clang/include/clang/Basic/BuiltinsXtensa.def +++ b/clang/include/clang/Basic/BuiltinsXtensa.def @@ -285,5 +285,3 @@ BUILTIN(__builtin_xtensa_xt_wur_fsr, "vi", "n") // generated code #include "clang/Basic/BuiltinsXtensaESP32S3.def" - -#undef BUILTIN diff --git a/clang/include/clang/Basic/BuiltinsXtensaHIFI.def b/clang/include/clang/Basic/BuiltinsXtensaHIFI.def new file mode 100644 index 0000000000000..d0ac10aeab39b --- /dev/null +++ b/clang/include/clang/Basic/BuiltinsXtensaHIFI.def @@ -0,0 +1,2617 @@ +//===-- BuiltinsXtensaHIFI.def - Xtensa HIFI Builtin function database ----*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the builtin function database for Xtensa HIFI extension. Users of +// this file must define the BUILTIN macro to make use of this information. +// +//===----------------------------------------------------------------------===// + +// The format of this database matches clang/Basic/Builtins.def.// ae_int16x4 __builtin_xtensa_ae_abs16s(ae_int16x4 ae_arth_v1) + +// ae_int16x4 __builtin_xtensa_ae_abs16s(ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs16s, "V4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_abs24s(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs24s, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_abs32(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs32, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_abs32s(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs32s, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_abs64(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs64, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_abs64s(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_abs64s, "V1LLiV1LLi", "n") + +// ae_int16x4 __builtin_xtensa_ae_add16(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add16, "V4sV4sV4s", "n") + +// ae_int16x4 __builtin_xtensa_ae_add16s(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add16s, "V4sV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_add24s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add24s, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_add32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add32, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_add32_hl_lh(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add32_hl_lh, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_add32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add32s, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_add64(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add64, "V1LLiV1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_add64s(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_add64s, "V1LLiV1LLiV1LLi", "n") + +// int __builtin_xtensa_ae_addbrba32(int art,int ars) +BUILTIN(__builtin_xtensa_ae_addbrba32, "iii", "n") + +// ae_int32x2 __builtin_xtensa_ae_addsub32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_addsub32, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_addsub32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_addsub32s, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_and(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) +BUILTIN(__builtin_xtensa_ae_and, "V1LLiV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_cvt32x2f16_10(ae_int16x4 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_cvt32x2f16_10, "V2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_cvt32x2f16_32(ae_int16x4 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_cvt32x2f16_32, "V2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_cvt48a32(int ars) +BUILTIN(__builtin_xtensa_ae_cvt48a32, "V1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_cvt64a32(int ars) +BUILTIN(__builtin_xtensa_ae_cvt64a32, "V1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_cvt64f32_h(ae_int32x2 ae_dr_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_cvt64f32_h, "V1LLiV2i", "n") + +// int __builtin_xtensa_ae_cvta32f24s_h(ae_int32x2 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_cvta32f24s_h, "iV2i", "n") + +// int __builtin_xtensa_ae_cvta32f24s_l(ae_int32x2 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_cvta32f24s_l, "iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_cvtq56a32s(int ars) +BUILTIN(__builtin_xtensa_ae_cvtq56a32s, "V1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_cvtq56p32s_h(ae_int32x2 ae_dr_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_cvtq56p32s_h, "V1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_cvtq56p32s_l(ae_int32x2 ae_dr_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_cvtq56p32s_l, "V1LLiV2i", "n") + +// void __builtin_xtensa_ae_db(const short** ars,int art) +BUILTIN(__builtin_xtensa_ae_db, "vsC**i", "n") + +// void __builtin_xtensa_ae_db_ic(const short** ars,int art) +BUILTIN(__builtin_xtensa_ae_db_ic, "vsC**i", "n") + +// void __builtin_xtensa_ae_db_ip(const short** ars,int art) +BUILTIN(__builtin_xtensa_ae_db_ip, "vsC**i", "n") + +// void __builtin_xtensa_ae_dbi(const short** ars,immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_dbi, "vsC**i", "n") + +// void __builtin_xtensa_ae_dbi_ic(const short** ars,immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_dbi_ic, "vsC**i", "n") + +// void __builtin_xtensa_ae_dbi_ip(const short** ars,immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_dbi_ip, "vsC**i", "n") + +// void __builtin_xtensa_ae_div64d32_h(ae_int64* ae_arth_v,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_div64d32_h, "vV1LLi*V2i", "n") + +// void __builtin_xtensa_ae_div64d32_l(ae_int64* ae_arth_v,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_div64d32_l, "vV1LLi*V2i", "n") + +// xtbool4 __builtin_xtensa_ae_eq16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_eq16, "V4bV4sV4s", "n") + +// xtbool2 __builtin_xtensa_ae_eq32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_eq32, "V2bV2iV2i", "n") + +// xtbool __builtin_xtensa_ae_eq64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_eq64, "V1bV1LLiV1LLi", "n") + +// ae_int16x4 __builtin_xtensa_ae_l16_i(const ae_int16* ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_l16_i, "V4sV1sC*i", "n") + +// void __builtin_xtensa_ae_l16_ip(ae_int16x4* ae_ls_v,const ae_int16** ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_l16_ip, "vV4s*V1sC**i", "n") + +// ae_int16x4 __builtin_xtensa_ae_l16_x(const ae_int16* ars,int art) +BUILTIN(__builtin_xtensa_ae_l16_x, "V4sV1sC*i", "n") + +// void __builtin_xtensa_ae_l16_xc(ae_int16x4* ae_ls_v,const ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16_xc, "vV4s*V1sC**i", "n") + +// void __builtin_xtensa_ae_l16_xp(ae_int16x4* ae_ls_v,const ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16_xp, "vV4s*V1sC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l16m_i(const ae_int16* ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_l16m_i, "V2iV1sC*i", "n") + +// void __builtin_xtensa_ae_l16m_iu(ae_int32x2* ae_ls_v,const ae_int16** ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_l16m_iu, "vV2i*V1sC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l16m_x(const ae_int16* ars,int art) +BUILTIN(__builtin_xtensa_ae_l16m_x, "V2iV1sC*i", "n") + +// void __builtin_xtensa_ae_l16m_xc(ae_int32x2* ae_ls_v,const ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16m_xc, "vV2i*V1sC**i", "n") + +// void __builtin_xtensa_ae_l16m_xu(ae_int32x2* ae_ls_v,const ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16m_xu, "vV2i*V1sC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l16x2m_i(const ae_int16x2* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l16x2m_i, "V2iV2sC*i", "n") + +// void __builtin_xtensa_ae_l16x2m_iu(ae_int32x2* ae_ls_v,const ae_int16x2** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l16x2m_iu, "vV2i*V2sC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l16x2m_x(const ae_int16x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x2m_x, "V2iV2sC*i", "n") + +// void __builtin_xtensa_ae_l16x2m_xc(ae_int32x2* ae_ls_v,const ae_int16x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x2m_xc, "vV2i*V2sC**i", "n") + +// void __builtin_xtensa_ae_l16x2m_xu(ae_int32x2* ae_ls_v,const ae_int16x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x2m_xu, "vV2i*V2sC**i", "n") + +// ae_int16x4 __builtin_xtensa_ae_l16x4_i(const ae_int16x4* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_l16x4_i, "V4sV4sC*i", "n") + +// void __builtin_xtensa_ae_l16x4_ip(ae_int16x4* ae_ls_v,const ae_int16x4** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_l16x4_ip, "vV4s*V4sC**i", "n") + +// void __builtin_xtensa_ae_l16x4_ric(ae_int16x4* ae_ls_v,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_l16x4_ric, "vV4s*V4sC**", "n") + +// void __builtin_xtensa_ae_l16x4_rip(ae_int16x4* ae_ls_v,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_l16x4_rip, "vV4s*V4sC**", "n") + +// ae_int16x4 __builtin_xtensa_ae_l16x4_x(const ae_int16x4* ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x4_x, "V4sV4sC*i", "n") + +// void __builtin_xtensa_ae_l16x4_xc(ae_int16x4* ae_ls_v,const ae_int16x4** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x4_xc, "vV4s*V4sC**i", "n") + +// void __builtin_xtensa_ae_l16x4_xp(ae_int16x4* ae_ls_v,const ae_int16x4** ars,int art) +BUILTIN(__builtin_xtensa_ae_l16x4_xp, "vV4s*V4sC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32_i(const ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32_i, "V2iV1iC*i", "n") + +// void __builtin_xtensa_ae_l32_ip(ae_int32x2* ae_ls_v,const ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32_ip, "vV2i*V1iC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32_x(const ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_l32_x, "V2iV1iC*i", "n") + +// void __builtin_xtensa_ae_l32_xc(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32_xc, "vV2i*V1iC**i", "n") + +// void __builtin_xtensa_ae_l32_xp(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32_xp, "vV2i*V1iC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32f24_i(const ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32f24_i, "V2iV1iC*i", "n") + +// void __builtin_xtensa_ae_l32f24_ip(ae_int32x2* ae_ls_v,const ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32f24_ip, "vV2i*V1iC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32f24_x(const ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_l32f24_x, "V2iV1iC*i", "n") + +// void __builtin_xtensa_ae_l32f24_xc(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32f24_xc, "vV2i*V1iC**i", "n") + +// void __builtin_xtensa_ae_l32f24_xp(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32f24_xp, "vV2i*V1iC**i", "n") + +// ae_int64 __builtin_xtensa_ae_l32m_i(const ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32m_i, "V1LLiV1iC*i", "n") + +// void __builtin_xtensa_ae_l32m_iu(ae_int64* ae_ls_v,const ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_l32m_iu, "vV1LLi*V1iC**i", "n") + +// ae_int64 __builtin_xtensa_ae_l32m_x(const ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_l32m_x, "V1LLiV1iC*i", "n") + +// void __builtin_xtensa_ae_l32m_xc(ae_int64* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32m_xc, "vV1LLi*V1iC**i", "n") + +// void __builtin_xtensa_ae_l32m_xu(ae_int64* ae_ls_v,const ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32m_xu, "vV1LLi*V1iC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32x2_i(const ae_int32x2* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_l32x2_i, "V2iV2iC*i", "n") + +// void __builtin_xtensa_ae_l32x2_ip(ae_int32x2* ae_ls_v,const ae_int32x2** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_l32x2_ip, "vV2i*V2iC**i", "n") + +// void __builtin_xtensa_ae_l32x2_ric(ae_int32x2* ae_ls_v,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_l32x2_ric, "vV2i*V2iC**", "n") + +// void __builtin_xtensa_ae_l32x2_rip(ae_int32x2* ae_ls_v,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_l32x2_rip, "vV2i*V2iC**", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32x2_x(const ae_int32x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2_x, "V2iV2iC*i", "n") + +// void __builtin_xtensa_ae_l32x2_xc(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2_xc, "vV2i*V2iC**i", "n") + +// void __builtin_xtensa_ae_l32x2_xp(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2_xp, "vV2i*V2iC**i", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32x2f24_i(const ae_int32x2* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_l32x2f24_i, "V2iV2iC*i", "n") + +// void __builtin_xtensa_ae_l32x2f24_ip(ae_int32x2* ae_ls_v,const ae_int32x2** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_l32x2f24_ip, "vV2i*V2iC**i", "n") + +// void __builtin_xtensa_ae_l32x2f24_ric(ae_int32x2* ae_ls_v,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_l32x2f24_ric, "vV2i*V2iC**", "n") + +// void __builtin_xtensa_ae_l32x2f24_rip(ae_int32x2* ae_ls_v,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_l32x2f24_rip, "vV2i*V2iC**", "n") + +// ae_int32x2 __builtin_xtensa_ae_l32x2f24_x(const ae_int32x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2f24_x, "V2iV2iC*i", "n") + +// void __builtin_xtensa_ae_l32x2f24_xc(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2f24_xc, "vV2i*V2iC**i", "n") + +// void __builtin_xtensa_ae_l32x2f24_xp(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_l32x2f24_xp, "vV2i*V2iC**i", "n") + +// ae_int64 __builtin_xtensa_ae_l64_i(const ae_int64* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_l64_i, "V1LLiV1LLiC*i", "n") + +// void __builtin_xtensa_ae_l64_ip(ae_int64* ae_ls_v,const ae_int64** ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_l64_ip, "vV1LLi*V1LLiC**i", "n") + +// ae_int64 __builtin_xtensa_ae_l64_x(const ae_int64* ars,int art) +BUILTIN(__builtin_xtensa_ae_l64_x, "V1LLiV1LLiC*i", "n") + +// void __builtin_xtensa_ae_l64_xc(ae_int64* ae_ls_v,const ae_int64** ars,int art) +BUILTIN(__builtin_xtensa_ae_l64_xc, "vV1LLi*V1LLiC**i", "n") + +// void __builtin_xtensa_ae_l64_xp(ae_int64* ae_ls_v,const ae_int64** ars,int art) +BUILTIN(__builtin_xtensa_ae_l64_xp, "vV1LLi*V1LLiC**i", "n") + +// void __builtin_xtensa_ae_la16x4_ic(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4_ic, "vV4s*V8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la16x4_ip(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4_ip, "vV4s*V8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la16x4_ric(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4_ric, "vV4s*V8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la16x4_rip(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4_rip, "vV4s*V8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la16x4neg_pc(ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4neg_pc, "vV8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la16x4pos_pc(ae_valign* ae_ls_uu,const ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_la16x4pos_pc, "vV8Uc*V4sC**", "n") + +// void __builtin_xtensa_ae_la24_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24_ic, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24_ip, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24_ric, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24_rip, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24neg_pc(ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24neg_pc, "vV8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24pos_pc(ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24pos_pc, "vV8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2_ic, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2_ip, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2_ric, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2_rip, "vV2i*V8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2neg_pc(ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2neg_pc, "vV8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la24x2pos_pc(ae_valign* ae_ls_uu,const void** ars) +BUILTIN(__builtin_xtensa_ae_la24x2pos_pc, "vV8Uc*vC**", "n") + +// void __builtin_xtensa_ae_la32x2_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2_ic, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2_ip, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2_ric, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2_rip, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2f24_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2f24_ic, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2f24_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2f24_ip, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2f24_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2f24_ric, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2f24_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2f24_rip, "vV2i*V8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2neg_pc(ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2neg_pc, "vV8Uc*V2iC**", "n") + +// void __builtin_xtensa_ae_la32x2pos_pc(ae_valign* ae_ls_uu,const ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_la32x2pos_pc, "vV8Uc*V2iC**", "n") + +// ae_valign __builtin_xtensa_ae_la64_pp(const void* ars) +BUILTIN(__builtin_xtensa_ae_la64_pp, "V8UcvC*", "n") + +// ae_valign __builtin_xtensa_ae_lalign64_i(const ae_valign* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_lalign64_i, "V8UcV8UcC*i", "n") + +// int __builtin_xtensa_ae_lb(int art) +BUILTIN(__builtin_xtensa_ae_lb, "ii", "n") + +// int __builtin_xtensa_ae_lbi(immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_lbi, "ii", "n") + +// int __builtin_xtensa_ae_lbk(int ars,int art) +BUILTIN(__builtin_xtensa_ae_lbk, "iii", "n") + +// int __builtin_xtensa_ae_lbki(int ars,immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_lbki, "iii", "n") + +// int __builtin_xtensa_ae_lbs(int art) +BUILTIN(__builtin_xtensa_ae_lbs, "ii", "n") + +// int __builtin_xtensa_ae_lbsi(immediate ae_ohba) +BUILTIN(__builtin_xtensa_ae_lbsi, "ii", "n") + +// xtbool4 __builtin_xtensa_ae_le16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_le16, "V4bV4sV4s", "n") + +// xtbool2 __builtin_xtensa_ae_le32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_le32, "V2bV2iV2i", "n") + +// xtbool __builtin_xtensa_ae_le64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_le64, "V1bV1LLiV1LLi", "n") + +// xtbool4 __builtin_xtensa_ae_lt16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_lt16, "V4bV4sV4s", "n") + +// xtbool2 __builtin_xtensa_ae_lt32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_lt32, "V2bV2iV2i", "n") + +// xtbool __builtin_xtensa_ae_lt64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_lt64, "V1bV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_max32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_max32, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_max64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_max64, "V1LLiV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_maxabs32s(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_maxabs32s, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_maxabs64s(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_maxabs64s, "V1LLiV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_min32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_min32, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_min64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_min64, "V1LLiV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_minabs32s(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_minabs32s, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_minabs64s(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) +BUILTIN(__builtin_xtensa_ae_minabs64s, "V1LLiV1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_mov(ae_int64 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_mov, "V1LLiV1LLi", "n") + +// int __builtin_xtensa_ae_movad16_0(ae_int16x4 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad16_0, "iV4s", "n") + +// int __builtin_xtensa_ae_movad16_1(ae_int16x4 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad16_1, "iV4s", "n") + +// int __builtin_xtensa_ae_movad16_2(ae_int16x4 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad16_2, "iV4s", "n") + +// int __builtin_xtensa_ae_movad16_3(ae_int16x4 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad16_3, "iV4s", "n") + +// int __builtin_xtensa_ae_movad32_h(ae_int32x2 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad32_h, "iV2i", "n") + +// int __builtin_xtensa_ae_movad32_l(ae_int32x2 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_movad32_l, "iV2i", "n") + +// ae_valign __builtin_xtensa_ae_movalign(ae_valign ae_uu_v) +BUILTIN(__builtin_xtensa_ae_movalign, "V8UcV8Uc", "n") + +// ae_int16x4 __builtin_xtensa_ae_movda16(int ars) +BUILTIN(__builtin_xtensa_ae_movda16, "V4si", "n") + +// ae_int16x4 __builtin_xtensa_ae_movda16x2(int ars,int art) +BUILTIN(__builtin_xtensa_ae_movda16x2, "V4sii", "n") + +// ae_int32 __builtin_xtensa_ae_movda32(int ars) +BUILTIN(__builtin_xtensa_ae_movda32, "V1ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_movda32x2(int ars,int art) +BUILTIN(__builtin_xtensa_ae_movda32x2, "V2iii", "n") + +// void __builtin_xtensa_ae_movf16x4(ae_int16x4* ae_cmov_v,ae_int16x4 ae_cmov_v0,xtbool4 bt4) +BUILTIN(__builtin_xtensa_ae_movf16x4, "vV4s*V4sV4b", "n") + +// void __builtin_xtensa_ae_movf32x2(ae_int32x2* ae_cmov_v,ae_int32x2 ae_cmov_v0,xtbool2 bt2) +BUILTIN(__builtin_xtensa_ae_movf32x2, "vV2i*V2iV2b", "n") + +// void __builtin_xtensa_ae_movf64(ae_int64* ae_cmov_v,ae_int64 ae_cmov_v0,xtbool bt) +BUILTIN(__builtin_xtensa_ae_movf64, "vV1LLi*V1LLiV1b", "n") + +// ae_int32x2 __builtin_xtensa_ae_movi(immediate movi_imm) +BUILTIN(__builtin_xtensa_ae_movi, "V2ii", "n") + +// void __builtin_xtensa_ae_movt16x4(ae_int16x4* ae_cmov_v,ae_int16x4 ae_cmov_v0,xtbool4 bt4) +BUILTIN(__builtin_xtensa_ae_movt16x4, "vV4s*V4sV4b", "n") + +// void __builtin_xtensa_ae_movt32x2(ae_int32x2* ae_cmov_v,ae_int32x2 ae_cmov_v0,xtbool2 bt2) +BUILTIN(__builtin_xtensa_ae_movt32x2, "vV2i*V2iV2b", "n") + +// void __builtin_xtensa_ae_movt64(ae_int64* ae_cmov_v,ae_int64 ae_cmov_v0,xtbool bt) +BUILTIN(__builtin_xtensa_ae_movt64, "vV1LLi*V1LLiV1b", "n") + +// void __builtin_xtensa_ae_mul16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_mul16x4, "vV2i*V2i*V4sV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32_hh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mul32_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mul32_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mul32_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mul32u_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32u_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_h3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_h3_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mul32x16_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mul32x16_l3_s2, "V1LLiV2iV4s", "n") + +// void __builtin_xtensa_ae_mula16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_mula16x4, "vV2i*V2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mula32_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mula32_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mula32_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mula32_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mula32u_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32u_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mula32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_h3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mula32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mula32x16_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaad24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaad24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaad24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaad24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaad24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h0_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h0_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h0_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h0_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h2_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h2_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h2_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h2_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaad32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaad32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_11_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_11_00, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_11_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_11_00_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_13_02(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_13_02, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_13_02_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_13_02_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_33_22(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_33_22, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd16ss_33_22_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd16ss_33_22_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaafd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaafd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaafd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaafd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h0_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h0_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h0_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h0_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h2_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h2_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h2_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h2_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaafd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaafd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulac24(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulac24, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulac32x16_h(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulac32x16_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulac32x16_l(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulac32x16_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_00, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_00_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_10(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_10, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_11(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_11, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_20(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_20, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_21(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_21, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_22(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_22, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_30(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_30, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_31(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_31, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_32(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_32, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16ss_33(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulaf16ss_33, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_mulaf16x4ss, "vV2i*V2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulaf32r_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32r_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32r_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32r_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32r_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32r_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32r_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32r_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32s_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32s_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32s_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32s_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_h3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf32x16_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulaf48q32sp16s_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf48q32sp16s_l, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulaf48q32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf48q32sp16s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulaf48q32sp16u_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulaf48q32sp16u_l, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulaf48q32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaf48q32sp16u_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulafc24ra(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int32x2 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulafc24ra, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafc32x16ras_h(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulafc32x16ras_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafc32x16ras_l(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulafc32x16ras_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafd24x2_fir_h(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd24x2_fir_h, "vV1LLi*V1LLi*V2iV2iV2i", "n") + +// void __builtin_xtensa_ae_mulafd24x2_fir_l(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd24x2_fir_l, "vV1LLi*V1LLi*V2iV2iV2i", "n") + +// void __builtin_xtensa_ae_mulafd32x16x2_fir_hh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd32x16x2_fir_hh, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulafd32x16x2_fir_hl(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd32x16x2_fir_hl, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulafd32x16x2_fir_lh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd32x16x2_fir_lh, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulafd32x16x2_fir_ll(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulafd32x16x2_fir_ll, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp24x2r(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp24x2r, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafp24x2r_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp24x2r_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafp24x2ra(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp24x2ra, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafp24x2ra_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp24x2ra_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2ras_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2ras_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2ras_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2ras_h_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2ras_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2ras_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2ras_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2ras_l_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2rs_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2rs_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2rs_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2rs_h_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2rs_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2rs_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x16x2rs_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x16x2rs_l_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulafp32x2ras(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x2ras, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafp32x2rs(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulafp32x2rs, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulafq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafq32sp24s_h_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulafq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulafq32sp24s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulap24x2(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulap24x2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulap24x2_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulap24x2_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulap32x16x2_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulap32x16x2_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulap32x16x2_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulap32x16x2_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulap32x2(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulap32x2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulaq32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaq32sp16s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulaq32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulaq32sp16u_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mularfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mularfq32sp24s_h_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mularfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mularfq32sp24s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_hh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_hh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulas32f48p16s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulas32f48p16s_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulasd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulasd24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasd24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulasd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulasd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasfd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasfd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasfd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasfd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulasfd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasfd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasfd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulasfd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulasfd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulc24(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulc24, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulc32x16_h(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulc32x16_h, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulc32x16_l(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulc32x16_l, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_00, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_00_s2, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_10(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_10, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_11(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_11, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_20(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_20, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_21(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_21, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_22(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_22, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_30(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_30, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_31(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_31, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_32(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_32, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulf16ss_33(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulf16ss_33, "V2iV4sV4s", "n") + +// void __builtin_xtensa_ae_mulf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_mulf16x4ss, "vV2i*V2i*V4sV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32r_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32r_hh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32r_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32r_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32r_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32r_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32r_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32r_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32s_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32s_hh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32s_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32s_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32s_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32s_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32s_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32s_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_h3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_h3_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf32x16_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf32x16_l3_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulf48q32sp16s_l(ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf48q32sp16s_l, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf48q32sp16s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf48q32sp16s_l_s2, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf48q32sp16u_l(ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulf48q32sp16u_l, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulf48q32sp16u_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulf48q32sp16u_l_s2, "V1LLiV1LLiV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfc24ra(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int32x2 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulfc24ra, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfc32x16ras_h(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulfc32x16ras_h, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfc32x16ras_l(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) +BUILTIN(__builtin_xtensa_ae_mulfc32x16ras_l, "V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulfd24x2_fir_h(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd24x2_fir_h, "vV1LLi*V1LLi*V2iV2iV2i", "n") + +// void __builtin_xtensa_ae_mulfd24x2_fir_l(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd24x2_fir_l, "vV1LLi*V1LLi*V2iV2iV2i", "n") + +// void __builtin_xtensa_ae_mulfd32x16x2_fir_hh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd32x16x2_fir_hh, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulfd32x16x2_fir_hl(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd32x16x2_fir_hl, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulfd32x16x2_fir_lh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd32x16x2_fir_lh, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// void __builtin_xtensa_ae_mulfd32x16x2_fir_ll(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) +BUILTIN(__builtin_xtensa_ae_mulfd32x16x2_fir_ll, "vV1LLi*V1LLi*V2iV2iV4s", "n") + +// ae_int16x4 __builtin_xtensa_ae_mulfp16x4ras(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulfp16x4ras, "V4sV4sV4s", "n") + +// ae_int16x4 __builtin_xtensa_ae_mulfp16x4s(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulfp16x4s, "V4sV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp24x2r(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp24x2r, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp24x2r_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp24x2r_s2, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp24x2ra(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp24x2ra, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp24x2ra_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp24x2ra_s2, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2ras_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2ras_h, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2ras_h_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2ras_h_s2, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2ras_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2ras_l, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2ras_l_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2ras_l_s2, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2rs_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2rs_h, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2rs_h_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2rs_h_s2, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2rs_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2rs_l, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x16x2rs_l_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x16x2rs_l_s2, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x2ras(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x2ras, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulfp32x2rs(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulfp32x2rs, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulfq32sp24s_h_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfq32sp24s_h_s2, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulfq32sp24s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulfq32sp24s_l_s2, "V1LLiV1LLiV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulp24x2(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulp24x2, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulp24x2_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulp24x2_s2, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulp32x16x2_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulp32x16x2_h, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulp32x16x2_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulp32x16x2_l, "V2iV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulp32x2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulp32x2, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulq32sp16s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulq32sp16s_l_s2, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulq32sp16u_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulq32sp16u_l_s2, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulrfq32sp24s_h_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulrfq32sp24s_h_s2, "V1LLiV1LLiV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulrfq32sp24s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulrfq32sp24s_l_s2, "V1LLiV1LLiV2i", "n") + +// void __builtin_xtensa_ae_muls16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_muls16x4, "vV2i*V2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_muls32_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_muls32_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_muls32_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32_ll, "vV1LLi*V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_hh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_hh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_hh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_muls32f48p16s_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32f48p16s_ll_s2, "V1LLiV2iV2i", "n") + +// void __builtin_xtensa_ae_muls32u_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32u_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_muls32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_h3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_muls32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_muls32x16_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsad24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsad24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsad24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsad24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsad32x16_h1_l0(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsad32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsad32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsad32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsad32x16_h3_l2(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsad32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsad32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsad32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsafd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsafd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsafd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsafd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsafd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsafd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsafd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_00, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_00_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_10(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_10, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_11(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_11, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_20(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_20, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_21(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_21, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_22(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_22, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_30(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_30, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_31(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_31, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_32(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_32, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16ss_33(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsf16ss_33, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) +BUILTIN(__builtin_xtensa_ae_mulsf16x4ss, "vV2i*V2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulsf32r_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32r_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32r_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32r_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32r_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32r_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32r_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32r_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32s_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32s_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32s_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_h3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l1, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l1_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l3, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf32x16_l3_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsf48q32sp16s_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf48q32sp16s_l, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsf48q32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf48q32sp16s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsf48q32sp16u_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsf48q32sp16u_l, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsf48q32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsf48q32sp16u_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsfp24x2r(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp24x2r, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfp24x2r_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp24x2r_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfp24x2ra(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp24x2ra, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfp24x2ra_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp24x2ra_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2ras_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2ras_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2ras_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2ras_h_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2ras_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2ras_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2ras_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2ras_l_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2rs_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2rs_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2rs_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2rs_h_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2rs_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2rs_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x16x2rs_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x16x2rs_l_s2, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsfp32x2ras(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x2ras, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfp32x2rs(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsfp32x2rs, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfq32sp24s_h_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsfq32sp24s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsp24x2(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulsp24x2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsp24x2_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsp24x2_s2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsp32x16x2_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsp32x16x2_h, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsp32x16x2_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsp32x16x2_l, "vV2i*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulsp32x2(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulsp32x2, "vV2i*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulsq32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsq32sp16s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsq32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsq32sp16u_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsrfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsrfq32sp24s_h_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulsrfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulsrfq32sp24s_l_s2, "vV1LLi*V1LLiV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_hh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_hh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_hh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulss32f48p16s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulss32f48p16s_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulssd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulssd24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssd24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_11_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_11_00, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_11_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_11_00_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_13_02(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_13_02, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_13_02_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_13_02_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_33_22(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_33_22, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd16ss_33_22_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd16ss_33_22_s2, "vV2i*V4sV4s", "n") + +// void __builtin_xtensa_ae_mulssfd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd24_hh_ll, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssfd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd24_hh_ll_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssfd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd24_hl_lh, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssfd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd24_hl_lh_s2, "vV1LLi*V2iV2i", "n") + +// void __builtin_xtensa_ae_mulssfd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd32x16_h1_l0, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssfd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd32x16_h1_l0_s2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssfd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd32x16_h3_l2, "vV1LLi*V2iV4s", "n") + +// void __builtin_xtensa_ae_mulssfd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulssfd32x16_h3_l2_s2, "vV1LLi*V2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h0_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h0_l1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h0_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h0_l1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h2_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h2_l3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h2_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h2_l3_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaad32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaad32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_11_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_11_00, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_11_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_11_00_s2, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_13_02(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_13_02, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_13_02_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_13_02_s2, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_33_22(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_33_22, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzaafd16ss_33_22_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd16ss_33_22_s2, "V2iV4sV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h0_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h0_l1, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h2_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h2_l3, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad32x16_h1_l0(ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad32x16_h3_l2(ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsad32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsad32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_11_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_11_00, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_11_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_11_00_s2, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_13_02(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_13_02, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_13_02_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_13_02_s2, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_33_22(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_33_22, "V2iV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_mulzssfd16ss_33_22_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd16ss_33_22_s2, "V2iV4sV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd24_hh_ll, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd24_hh_ll_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd24_hl_lh, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd24_hl_lh_s2, "V1LLiV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd32x16_h1_l0, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd32x16_h3_l2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) +BUILTIN(__builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2, "V1LLiV2iV4s", "n") + +// ae_int64 __builtin_xtensa_ae_nand(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) +BUILTIN(__builtin_xtensa_ae_nand, "V1LLiV1LLiV1LLi", "n") + +// ae_int16x4 __builtin_xtensa_ae_neg16s(ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg16s, "V4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_neg24s(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg24s, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_neg32(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg32, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_neg32s(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg32s, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_neg64(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg64, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_neg64s(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_neg64s, "V1LLiV1LLi", "n") + +// int __builtin_xtensa_ae_nsa64(ae_int64 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_nsa64, "iV1LLi", "n") + +// int __builtin_xtensa_ae_nsaz16_0(ae_int16x4 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_nsaz16_0, "iV4s", "n") + +// int __builtin_xtensa_ae_nsaz32_l(ae_int32x2 ae_dr_to_ar_v0) +BUILTIN(__builtin_xtensa_ae_nsaz32_l, "iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_or(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) +BUILTIN(__builtin_xtensa_ae_or, "V1LLiV1LLiV1LLi", "n") + +// void __builtin_xtensa_ae_pksr24(ae_int32x2* ae_pks_d,ae_int64 ae_pks_s,immediate ae_imm2) +BUILTIN(__builtin_xtensa_ae_pksr24, "vV2i*V1LLii", "n") + +// void __builtin_xtensa_ae_pksr32(ae_int32x2* ae_pks_d,ae_int64 ae_pks_s,immediate ae_imm2) +BUILTIN(__builtin_xtensa_ae_pksr32, "vV2i*V1LLii", "n") + +// ae_int16x4 __builtin_xtensa_ae_round16x4f32sasym(ae_int32x2 ae_arth_v1,ae_int32x2 ae_arth_v0) +BUILTIN(__builtin_xtensa_ae_round16x4f32sasym, "V4sV2iV2i", "n") + +// ae_int16x4 __builtin_xtensa_ae_round16x4f32ssym(ae_int32x2 ae_arth_v1,ae_int32x2 ae_arth_v0) +BUILTIN(__builtin_xtensa_ae_round16x4f32ssym, "V4sV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_round24x2f48sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round24x2f48sasym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_round24x2f48ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round24x2f48ssym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_round32x2f48sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round32x2f48sasym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_round32x2f48ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round32x2f48ssym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_round32x2f64sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round32x2f64sasym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_round32x2f64ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_round32x2f64ssym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_roundsp16f24asym(ae_int32x2 ae_arth_v0) +BUILTIN(__builtin_xtensa_ae_roundsp16f24asym, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_roundsp16f24sym(ae_int32x2 ae_arth_v0) +BUILTIN(__builtin_xtensa_ae_roundsp16f24sym, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_roundsp16q48x2asym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_roundsp16q48x2asym, "V2iV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_roundsp16q48x2sym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_roundsp16q48x2sym, "V2iV1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_roundsq32f48asym(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_roundsq32f48asym, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_roundsq32f48sym(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_roundsq32f48sym, "V1LLiV1LLi", "n") + +// void __builtin_xtensa_ae_s16_0_i(ae_int16x4 ae_ls_v,ae_int16* ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_s16_0_i, "vV4sV1s*i", "n") + +// void __builtin_xtensa_ae_s16_0_ip(ae_int16x4 ae_ls_v,ae_int16** ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_s16_0_ip, "vV4sV1s**i", "n") + +// void __builtin_xtensa_ae_s16_0_x(ae_int16x4 ae_ls_v,ae_int16* ars,int art) +BUILTIN(__builtin_xtensa_ae_s16_0_x, "vV4sV1s*i", "n") + +// void __builtin_xtensa_ae_s16_0_xc(ae_int16x4 ae_ls_v,ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16_0_xc, "vV4sV1s**i", "n") + +// void __builtin_xtensa_ae_s16_0_xp(ae_int16x4 ae_ls_v,ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16_0_xp, "vV4sV1s**i", "n") + +// void __builtin_xtensa_ae_s16m_l_i(ae_int32x2 ae_ls_v,ae_int16* ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_s16m_l_i, "vV2iV1s*i", "n") + +// void __builtin_xtensa_ae_s16m_l_iu(ae_int32x2 ae_ls_v,ae_int16** ars,immediate ae_immls16) +BUILTIN(__builtin_xtensa_ae_s16m_l_iu, "vV2iV1s**i", "n") + +// void __builtin_xtensa_ae_s16m_l_x(ae_int32x2 ae_ls_v,ae_int16* ars,int art) +BUILTIN(__builtin_xtensa_ae_s16m_l_x, "vV2iV1s*i", "n") + +// void __builtin_xtensa_ae_s16m_l_xc(ae_int32x2 ae_ls_v,ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16m_l_xc, "vV2iV1s**i", "n") + +// void __builtin_xtensa_ae_s16m_l_xu(ae_int32x2 ae_ls_v,ae_int16** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16m_l_xu, "vV2iV1s**i", "n") + +// void __builtin_xtensa_ae_s16x2m_i(ae_int32x2 ae_ls_v,ae_int16x2* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s16x2m_i, "vV2iV2s*i", "n") + +// void __builtin_xtensa_ae_s16x2m_iu(ae_int32x2 ae_ls_v,ae_int16x2** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s16x2m_iu, "vV2iV2s**i", "n") + +// void __builtin_xtensa_ae_s16x2m_x(ae_int32x2 ae_ls_v,ae_int16x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x2m_x, "vV2iV2s*i", "n") + +// void __builtin_xtensa_ae_s16x2m_xc(ae_int32x2 ae_ls_v,ae_int16x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x2m_xc, "vV2iV2s**i", "n") + +// void __builtin_xtensa_ae_s16x2m_xu(ae_int32x2 ae_ls_v,ae_int16x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x2m_xu, "vV2iV2s**i", "n") + +// void __builtin_xtensa_ae_s16x4_i(ae_int16x4 ae_ls_v,ae_int16x4* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_s16x4_i, "vV4sV4s*i", "n") + +// void __builtin_xtensa_ae_s16x4_ip(ae_int16x4 ae_ls_v,ae_int16x4** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_s16x4_ip, "vV4sV4s**i", "n") + +// void __builtin_xtensa_ae_s16x4_ric(ae_int16x4 ae_ls_v,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_s16x4_ric, "vV4sV4s**", "n") + +// void __builtin_xtensa_ae_s16x4_rip(ae_int16x4 ae_ls_v,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_s16x4_rip, "vV4sV4s**", "n") + +// void __builtin_xtensa_ae_s16x4_x(ae_int16x4 ae_ls_v,ae_int16x4* ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x4_x, "vV4sV4s*i", "n") + +// void __builtin_xtensa_ae_s16x4_xc(ae_int16x4 ae_ls_v,ae_int16x4** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x4_xc, "vV4sV4s**i", "n") + +// void __builtin_xtensa_ae_s16x4_xp(ae_int16x4 ae_ls_v,ae_int16x4** ars,int art) +BUILTIN(__builtin_xtensa_ae_s16x4_xp, "vV4sV4s**i", "n") + +// void __builtin_xtensa_ae_s24ra64s_i(ae_int64 ae_ls_v1,ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s24ra64s_i, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s24ra64s_ip(ae_int64 ae_ls_v1,ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s24ra64s_ip, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s24ra64s_x(ae_int64 ae_ls_v1,ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_s24ra64s_x, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s24ra64s_xc(ae_int64 ae_ls_v1,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s24ra64s_xc, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s24ra64s_xp(ae_int64 ae_ls_v1,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s24ra64s_xp, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s24x2ra64s_ip(ae_int64 ae_ls_v2,ae_int64 ae_ls_v1,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s24x2ra64s_ip, "vV1LLiV1LLiV2i**", "n") + +// void __builtin_xtensa_ae_s32_l_i(ae_int32x2 ae_ls_v,ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32_l_i, "vV2iV1i*i", "n") + +// void __builtin_xtensa_ae_s32_l_ip(ae_int32x2 ae_ls_v,ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32_l_ip, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32_l_x(ae_int32x2 ae_ls_v,ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32_l_x, "vV2iV1i*i", "n") + +// void __builtin_xtensa_ae_s32_l_xc(ae_int32x2 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32_l_xc, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32_l_xp(ae_int32x2 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32_l_xp, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32f24_l_i(ae_int32x2 ae_ls_v,ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32f24_l_i, "vV2iV1i*i", "n") + +// void __builtin_xtensa_ae_s32f24_l_ip(ae_int32x2 ae_ls_v,ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32f24_l_ip, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32f24_l_x(ae_int32x2 ae_ls_v,ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32f24_l_x, "vV2iV1i*i", "n") + +// void __builtin_xtensa_ae_s32f24_l_xc(ae_int32x2 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32f24_l_xc, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32f24_l_xp(ae_int32x2 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32f24_l_xp, "vV2iV1i**i", "n") + +// void __builtin_xtensa_ae_s32m_i(ae_int64 ae_ls_v,ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32m_i, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s32m_iu(ae_int64 ae_ls_v,ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32m_iu, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32m_x(ae_int64 ae_ls_v,ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32m_x, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s32m_xc(ae_int64 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32m_xc, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32m_xu(ae_int64 ae_ls_v,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32m_xu, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32ra64s_i(ae_int64 ae_ls_v1,ae_int32* ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32ra64s_i, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s32ra64s_ip(ae_int64 ae_ls_v1,ae_int32** ars,immediate ae_immls32) +BUILTIN(__builtin_xtensa_ae_s32ra64s_ip, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32ra64s_x(ae_int64 ae_ls_v1,ae_int32* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32ra64s_x, "vV1LLiV1i*i", "n") + +// void __builtin_xtensa_ae_s32ra64s_xc(ae_int64 ae_ls_v1,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32ra64s_xc, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32ra64s_xp(ae_int64 ae_ls_v1,ae_int32** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32ra64s_xp, "vV1LLiV1i**i", "n") + +// void __builtin_xtensa_ae_s32x2_i(ae_int32x2 ae_ls_v,ae_int32x2* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_s32x2_i, "vV2iV2i*i", "n") + +// void __builtin_xtensa_ae_s32x2_ip(ae_int32x2 ae_ls_v,ae_int32x2** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_s32x2_ip, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2_ric(ae_int32x2 ae_ls_v,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s32x2_ric, "vV2iV2i**", "n") + +// void __builtin_xtensa_ae_s32x2_rip(ae_int32x2 ae_ls_v,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s32x2_rip, "vV2iV2i**", "n") + +// void __builtin_xtensa_ae_s32x2_x(ae_int32x2 ae_ls_v,ae_int32x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2_x, "vV2iV2i*i", "n") + +// void __builtin_xtensa_ae_s32x2_xc(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2_xc, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2_xp(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2_xp, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2f24_i(ae_int32x2 ae_ls_v,ae_int32x2* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_s32x2f24_i, "vV2iV2i*i", "n") + +// void __builtin_xtensa_ae_s32x2f24_ip(ae_int32x2 ae_ls_v,ae_int32x2** ars,immediate ae_immls64pos) +BUILTIN(__builtin_xtensa_ae_s32x2f24_ip, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2f24_ric(ae_int32x2 ae_ls_v,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s32x2f24_ric, "vV2iV2i**", "n") + +// void __builtin_xtensa_ae_s32x2f24_rip(ae_int32x2 ae_ls_v,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s32x2f24_rip, "vV2iV2i**", "n") + +// void __builtin_xtensa_ae_s32x2f24_x(ae_int32x2 ae_ls_v,ae_int32x2* ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2f24_x, "vV2iV2i*i", "n") + +// void __builtin_xtensa_ae_s32x2f24_xc(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2f24_xc, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2f24_xp(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) +BUILTIN(__builtin_xtensa_ae_s32x2f24_xp, "vV2iV2i**i", "n") + +// void __builtin_xtensa_ae_s32x2ra64s_ip(ae_int64 ae_ls_v2,ae_int64 ae_ls_v1,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_s32x2ra64s_ip, "vV1LLiV1LLiV2i**", "n") + +// void __builtin_xtensa_ae_s64_i(ae_int64 ae_ls_v,ae_int64* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_s64_i, "vV1LLiV1LLi*i", "n") + +// void __builtin_xtensa_ae_s64_ip(ae_int64 ae_ls_v,ae_int64** ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_s64_ip, "vV1LLiV1LLi**i", "n") + +// void __builtin_xtensa_ae_s64_x(ae_int64 ae_ls_v,ae_int64* ars,int art) +BUILTIN(__builtin_xtensa_ae_s64_x, "vV1LLiV1LLi*i", "n") + +// void __builtin_xtensa_ae_s64_xc(ae_int64 ae_ls_v,ae_int64** ars,int art) +BUILTIN(__builtin_xtensa_ae_s64_xc, "vV1LLiV1LLi**i", "n") + +// void __builtin_xtensa_ae_s64_xp(ae_int64 ae_ls_v,ae_int64** ars,int art) +BUILTIN(__builtin_xtensa_ae_s64_xp, "vV1LLiV1LLi**i", "n") + +// void __builtin_xtensa_ae_sa16x4_ic(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_sa16x4_ic, "vV4sV8Uc*V4s**", "n") + +// void __builtin_xtensa_ae_sa16x4_ip(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_sa16x4_ip, "vV4sV8Uc*V4s**", "n") + +// void __builtin_xtensa_ae_sa16x4_ric(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_sa16x4_ric, "vV4sV8Uc*V4s**", "n") + +// void __builtin_xtensa_ae_sa16x4_rip(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) +BUILTIN(__builtin_xtensa_ae_sa16x4_rip, "vV4sV8Uc*V4s**", "n") + +// void __builtin_xtensa_ae_sa24_l_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24_l_ic, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24_l_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24_l_ip, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24_l_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24_l_ric, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24_l_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24_l_rip, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24x2_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24x2_ic, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24x2_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24x2_ip, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24x2_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24x2_ric, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa24x2_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) +BUILTIN(__builtin_xtensa_ae_sa24x2_rip, "vV2iV8Uc*v**", "n") + +// void __builtin_xtensa_ae_sa32x2_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2_ic, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2_ip, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2_ric, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2_rip, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2f24_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2f24_ic, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2f24_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2f24_ip, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2f24_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2f24_ric, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa32x2f24_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) +BUILTIN(__builtin_xtensa_ae_sa32x2f24_rip, "vV2iV8Uc*V2i**", "n") + +// void __builtin_xtensa_ae_sa64neg_fp(ae_valign* ae_ls_su,void* ars) +BUILTIN(__builtin_xtensa_ae_sa64neg_fp, "vV8Uc*v*", "n") + +// void __builtin_xtensa_ae_sa64pos_fp(ae_valign* ae_ls_su,void* ars) +BUILTIN(__builtin_xtensa_ae_sa64pos_fp, "vV8Uc*v*", "n") + +// void __builtin_xtensa_ae_salign64_i(ae_valign ae_ls_su,ae_valign* ars,immediate ae_immls64) +BUILTIN(__builtin_xtensa_ae_salign64_i, "vV8UcV8Uc*i", "n") + +// ae_int16x4 __builtin_xtensa_ae_sat16x4(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sat16x4, "V4sV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_sat24s(ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sat24s, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_sat48s(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sat48s, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_satq56s(ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_satq56s, "V1LLiV1LLi", "n") + +// void __builtin_xtensa_ae_sb(short** ars,int art) +BUILTIN(__builtin_xtensa_ae_sb, "vs**i", "n") + +// void __builtin_xtensa_ae_sb_ic(short** ars,int art) +BUILTIN(__builtin_xtensa_ae_sb_ic, "vs**i", "n") + +// void __builtin_xtensa_ae_sb_ip(short** ars,int art) +BUILTIN(__builtin_xtensa_ae_sb_ip, "vs**i", "n") + +// void __builtin_xtensa_ae_sbf(short** ars) +BUILTIN(__builtin_xtensa_ae_sbf, "vs**", "n") + +// void __builtin_xtensa_ae_sbf_ic(short** ars) +BUILTIN(__builtin_xtensa_ae_sbf_ic, "vs**", "n") + +// void __builtin_xtensa_ae_sbf_ip(short** ars) +BUILTIN(__builtin_xtensa_ae_sbf_ip, "vs**", "n") + +// void __builtin_xtensa_ae_sbi(short** ars,int art,immediate ae_ohba2) +BUILTIN(__builtin_xtensa_ae_sbi, "vs**ii", "n") + +// void __builtin_xtensa_ae_sbi_ic(short** ars,int art,immediate ae_ohba2) +BUILTIN(__builtin_xtensa_ae_sbi_ic, "vs**ii", "n") + +// void __builtin_xtensa_ae_sbi_ip(short** ars,int art,immediate ae_ohba2) +BUILTIN(__builtin_xtensa_ae_sbi_ip, "vs**ii", "n") + +// ae_int16x4 __builtin_xtensa_ae_sel16i(ae_int16x4 ae_dr_to_dr_v0,ae_int16x4 ae_dr_to_dr_v1,immediate ae_selimm) +BUILTIN(__builtin_xtensa_ae_sel16i, "V4sV4sV4si", "n") + +// ae_int16x4 __builtin_xtensa_ae_sel16i_n(ae_int16x4 ae_dr_to_dr_v0,ae_int16x4 ae_dr_to_dr_v1,immediate ae_selimm_N) +BUILTIN(__builtin_xtensa_ae_sel16i_n, "V4sV4sV4si", "n") + +// ae_int32x2 __builtin_xtensa_ae_sext32(ae_int32x2 ae_dr_to_dr_v0,immediate ae_opnd_tp7) +BUILTIN(__builtin_xtensa_ae_sext32, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_sext32x2d16_10(ae_int16x4 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_sext32x2d16_10, "V2iV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_sext32x2d16_32(ae_int16x4 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_sext32x2d16_32, "V2iV4s", "n") + +// int __builtin_xtensa_ae_sha32(int ars) +BUILTIN(__builtin_xtensa_ae_sha32, "ii", "n") + +// ae_int16x4 __builtin_xtensa_ae_shortswap(ae_int16x4 ae_to_dr_v0) +BUILTIN(__builtin_xtensa_ae_shortswap, "V4sV4s", "n") + +// ae_int16x4 __builtin_xtensa_ae_slaa16s(ae_int16x4 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaa16s, "V4sV4si", "n") + +// ae_int32x2 __builtin_xtensa_ae_slaa32(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaa32, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_slaa32s(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaa32s, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_slaa64(ae_int64 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaa64, "V1LLiV1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_slaa64s(ae_int64 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaa64s, "V1LLiV1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_slaaq56(ae_int64 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_slaaq56, "V1LLiV1LLii", "n") + +// ae_int16x4 __builtin_xtensa_ae_slai16s(ae_int16x4 ae_shift_d0,immediate ae_osa16) +BUILTIN(__builtin_xtensa_ae_slai16s, "V4sV4si", "n") + +// ae_int32x2 __builtin_xtensa_ae_slai24(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_slai24, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_slai24s(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_slai24s, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_slai32(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_slai32, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_slai32s(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_slai32s, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_slai64(ae_int64 ae_shift_d0,immediate ae_osa64) +BUILTIN(__builtin_xtensa_ae_slai64, "V1LLiV1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_slai64s(ae_int64 ae_shift_d0,immediate ae_osa64) +BUILTIN(__builtin_xtensa_ae_slai64s, "V1LLiV1LLii", "n") + +// ae_int64 __builtin_xtensa_ae_slaisq56s(ae_int64 ae_shift_d0,immediate ae_osa64) +BUILTIN(__builtin_xtensa_ae_slaisq56s, "V1LLiV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_slas24(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas24, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_slas24s(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas24s, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_slas32(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas32, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_slas32s(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas32s, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_slas64(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas64, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_slas64s(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slas64s, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_slasq56(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slasq56, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_slassq56s(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_slassq56s, "V1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_sra64_32(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sra64_32, "V1LLiV2ii", "n") + +// ae_int16x4 __builtin_xtensa_ae_sraa16rs(ae_int16x4 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa16rs, "V4sV4si", "n") + +// ae_int16x4 __builtin_xtensa_ae_sraa16s(ae_int16x4 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa16s, "V4sV4si", "n") + +// ae_int32x2 __builtin_xtensa_ae_sraa32(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa32, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_sraa32rs(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa32rs, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_sraa32s(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa32s, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_sraa64(ae_int64 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_sraa64, "V1LLiV1LLii", "n") + +// ae_int16x4 __builtin_xtensa_ae_srai16(ae_int16x4 ae_shift_d0,immediate ae_osa16) +BUILTIN(__builtin_xtensa_ae_srai16, "V4sV4si", "n") + +// ae_int16x4 __builtin_xtensa_ae_srai16r(ae_int16x4 ae_shift_d0,immediate ae_osa16) +BUILTIN(__builtin_xtensa_ae_srai16r, "V4sV4si", "n") + +// ae_int32x2 __builtin_xtensa_ae_srai24(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_srai24, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_srai32(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_srai32, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_srai32r(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_srai32r, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_srai64(ae_int64 ae_shift_d0,immediate ae_osa64) +BUILTIN(__builtin_xtensa_ae_srai64, "V1LLiV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_sras24(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_sras24, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_sras32(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_sras32, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_sras64(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_sras64, "V1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_srla32(ae_int32x2 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_srla32, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_srla64(ae_int64 ae_shift_d0,int ars) +BUILTIN(__builtin_xtensa_ae_srla64, "V1LLiV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_srli24(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_srli24, "V2iV2ii", "n") + +// ae_int32x2 __builtin_xtensa_ae_srli32(ae_int32x2 ae_shift_d0,immediate ae_osa32) +BUILTIN(__builtin_xtensa_ae_srli32, "V2iV2ii", "n") + +// ae_int64 __builtin_xtensa_ae_srli64(ae_int64 ae_shift_d0,immediate ae_osa64) +BUILTIN(__builtin_xtensa_ae_srli64, "V1LLiV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_srls24(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_srls24, "V2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_srls32(ae_int32x2 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_srls32, "V2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_srls64(ae_int64 ae_shift_d0) +BUILTIN(__builtin_xtensa_ae_srls64, "V1LLiV1LLi", "n") + +// ae_int16x4 __builtin_xtensa_ae_sub16(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub16, "V4sV4sV4s", "n") + +// ae_int16x4 __builtin_xtensa_ae_sub16s(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub16s, "V4sV4sV4s", "n") + +// ae_int32x2 __builtin_xtensa_ae_sub24s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub24s, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_sub32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub32, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_sub32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub32s, "V2iV2iV2i", "n") + +// ae_int64 __builtin_xtensa_ae_sub64(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub64, "V1LLiV1LLiV1LLi", "n") + +// ae_int64 __builtin_xtensa_ae_sub64s(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_sub64s, "V1LLiV1LLiV1LLi", "n") + +// ae_int32x2 __builtin_xtensa_ae_subadd32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_subadd32, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_subadd32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) +BUILTIN(__builtin_xtensa_ae_subadd32s, "V2iV2iV2i", "n") + +// ae_int32x2 __builtin_xtensa_ae_trunca32f64s_l(ae_int32x2 ae_shift_d0,ae_int64 ae_shift_sd,int ars) +BUILTIN(__builtin_xtensa_ae_trunca32f64s_l, "V2iV2iV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_trunca32x2f64s(ae_int64 ae_shift_d0,ae_int64 ae_shift_sd,int ars) +BUILTIN(__builtin_xtensa_ae_trunca32x2f64s, "V2iV1LLiV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_trunci32f64s_l(ae_int32x2 ae_shift_d0,ae_int64 ae_shift_sd,immediate ae_osa16) +BUILTIN(__builtin_xtensa_ae_trunci32f64s_l, "V2iV2iV1LLii", "n") + +// ae_int32x2 __builtin_xtensa_ae_trunci32x2f64s(ae_int64 ae_shift_d0,ae_int64 ae_shift_sd,immediate ae_osa16) +BUILTIN(__builtin_xtensa_ae_trunci32x2f64s, "V2iV1LLiV1LLii", "n") + +// void __builtin_xtensa_ae_vldl16c(const short** ars) +BUILTIN(__builtin_xtensa_ae_vldl16c, "vsC**", "n") + +// void __builtin_xtensa_ae_vldl16c_ic(const short** ars) +BUILTIN(__builtin_xtensa_ae_vldl16c_ic, "vsC**", "n") + +// void __builtin_xtensa_ae_vldl16c_ip(const short** ars) +BUILTIN(__builtin_xtensa_ae_vldl16c_ip, "vsC**", "n") + +// void __builtin_xtensa_ae_vldl16t(xtbool* br,int* art,const short* ars) +BUILTIN(__builtin_xtensa_ae_vldl16t, "vV1b*i*sC*", "n") + +// void __builtin_xtensa_ae_vldl32t(xtbool* br,int* art,const int* ars) +BUILTIN(__builtin_xtensa_ae_vldl32t, "vV1b*i*iC*", "n") + +// void __builtin_xtensa_ae_vldsht(int art) +BUILTIN(__builtin_xtensa_ae_vldsht, "vi", "n") + +// void __builtin_xtensa_ae_vlel16t(xtbool* br,int* art,const short* ars) +BUILTIN(__builtin_xtensa_ae_vlel16t, "vV1b*i*sC*", "n") + +// void __builtin_xtensa_ae_vlel32t(xtbool* br,int* art,const int* ars) +BUILTIN(__builtin_xtensa_ae_vlel32t, "vV1b*i*iC*", "n") + +// void __builtin_xtensa_ae_vles16c(short** ars) +BUILTIN(__builtin_xtensa_ae_vles16c, "vs**", "n") + +// void __builtin_xtensa_ae_vles16c_ic(short** ars) +BUILTIN(__builtin_xtensa_ae_vles16c_ic, "vs**", "n") + +// void __builtin_xtensa_ae_vles16c_ip(short** ars) +BUILTIN(__builtin_xtensa_ae_vles16c_ip, "vs**", "n") + +// ae_int64 __builtin_xtensa_ae_xor(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) +BUILTIN(__builtin_xtensa_ae_xor, "V1LLiV1LLiV1LLi", "n") + +// ae_valign __builtin_xtensa_ae_zalign64() +BUILTIN(__builtin_xtensa_ae_zalign64, "V8Uc", "n") + +// int __builtin_xtensa_rur_ae_bithead() +BUILTIN(__builtin_xtensa_rur_ae_bithead, "i", "n") + +// int __builtin_xtensa_rur_ae_bitptr() +BUILTIN(__builtin_xtensa_rur_ae_bitptr, "i", "n") + +// int __builtin_xtensa_rur_ae_bitsused() +BUILTIN(__builtin_xtensa_rur_ae_bitsused, "i", "n") + +// int __builtin_xtensa_rur_ae_cbegin0() +BUILTIN(__builtin_xtensa_rur_ae_cbegin0, "i", "n") + +// int __builtin_xtensa_rur_ae_cend0() +BUILTIN(__builtin_xtensa_rur_ae_cend0, "i", "n") + +// int __builtin_xtensa_rur_ae_cw_sd_no() +BUILTIN(__builtin_xtensa_rur_ae_cw_sd_no, "i", "n") + +// int __builtin_xtensa_rur_ae_cwrap() +BUILTIN(__builtin_xtensa_rur_ae_cwrap, "i", "n") + +// int __builtin_xtensa_rur_ae_first_ts() +BUILTIN(__builtin_xtensa_rur_ae_first_ts, "i", "n") + +// int __builtin_xtensa_rur_ae_nextoffset() +BUILTIN(__builtin_xtensa_rur_ae_nextoffset, "i", "n") + +// int __builtin_xtensa_rur_ae_overflow() +BUILTIN(__builtin_xtensa_rur_ae_overflow, "i", "n") + +// int __builtin_xtensa_rur_ae_ovf_sar() +BUILTIN(__builtin_xtensa_rur_ae_ovf_sar, "i", "n") + +// int __builtin_xtensa_rur_ae_sar() +BUILTIN(__builtin_xtensa_rur_ae_sar, "i", "n") + +// int __builtin_xtensa_rur_ae_searchdone() +BUILTIN(__builtin_xtensa_rur_ae_searchdone, "i", "n") + +// int __builtin_xtensa_rur_ae_tablesize() +BUILTIN(__builtin_xtensa_rur_ae_tablesize, "i", "n") + +// int __builtin_xtensa_rur_ae_ts_fts_bu_bp() +BUILTIN(__builtin_xtensa_rur_ae_ts_fts_bu_bp, "i", "n") + +// void __builtin_xtensa_wur_ae_bithead(int art) +BUILTIN(__builtin_xtensa_wur_ae_bithead, "vi", "n") + +// void __builtin_xtensa_wur_ae_bitptr(int art) +BUILTIN(__builtin_xtensa_wur_ae_bitptr, "vi", "n") + +// void __builtin_xtensa_wur_ae_bitsused(int art) +BUILTIN(__builtin_xtensa_wur_ae_bitsused, "vi", "n") + +// void __builtin_xtensa_wur_ae_cbegin0(int art) +BUILTIN(__builtin_xtensa_wur_ae_cbegin0, "vi", "n") + +// void __builtin_xtensa_wur_ae_cend0(int art) +BUILTIN(__builtin_xtensa_wur_ae_cend0, "vi", "n") + +// void __builtin_xtensa_wur_ae_cw_sd_no(int art) +BUILTIN(__builtin_xtensa_wur_ae_cw_sd_no, "vi", "n") + +// void __builtin_xtensa_wur_ae_cwrap(int art) +BUILTIN(__builtin_xtensa_wur_ae_cwrap, "vi", "n") + +// void __builtin_xtensa_wur_ae_first_ts(int art) +BUILTIN(__builtin_xtensa_wur_ae_first_ts, "vi", "n") + +// void __builtin_xtensa_wur_ae_nextoffset(int art) +BUILTIN(__builtin_xtensa_wur_ae_nextoffset, "vi", "n") + +// void __builtin_xtensa_wur_ae_overflow(int art) +BUILTIN(__builtin_xtensa_wur_ae_overflow, "vi", "n") + +// void __builtin_xtensa_wur_ae_ovf_sar(int art) +BUILTIN(__builtin_xtensa_wur_ae_ovf_sar, "vi", "n") + +// void __builtin_xtensa_wur_ae_sar(int art) +BUILTIN(__builtin_xtensa_wur_ae_sar, "vi", "n") + +// void __builtin_xtensa_wur_ae_searchdone(int art) +BUILTIN(__builtin_xtensa_wur_ae_searchdone, "vi", "n") + +// void __builtin_xtensa_wur_ae_tablesize(int art) +BUILTIN(__builtin_xtensa_wur_ae_tablesize, "vi", "n") + +// void __builtin_xtensa_wur_ae_ts_fts_bu_bp(int art) +BUILTIN(__builtin_xtensa_wur_ae_ts_fts_bu_bp, "vi", "n") + +#undef BUILTIN diff --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h index e357667b0a9de..55ee7715e02a7 100644 --- a/clang/include/clang/Basic/TargetBuiltins.h +++ b/clang/include/clang/Basic/TargetBuiltins.h @@ -368,12 +368,14 @@ namespace clang { /// Xtensa builtins namespace Xtensa { - enum { - LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1, + enum { + LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1, #define BUILTIN(ID, TYPE, ATTRS) BI##ID, #include "clang/Basic/BuiltinsXtensa.def" - LastTSBuiltin - }; +#include "clang/Basic/BuiltinsXtensaHIFI.def" +#undef BUILTIN + LastTSBuiltin + }; } // namespace Xtensa static constexpr uint64_t LargestBuiltinID = std::max( diff --git a/clang/lib/Basic/Targets/Xtensa.cpp b/clang/lib/Basic/Targets/Xtensa.cpp index cded885966c89..7b5346062bccb 100644 --- a/clang/lib/Basic/Targets/Xtensa.cpp +++ b/clang/lib/Basic/Targets/Xtensa.cpp @@ -24,6 +24,8 @@ static constexpr Builtin::Info BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) \ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES}, #include "clang/Basic/BuiltinsXtensa.def" +#include "clang/Basic/BuiltinsXtensaHIFI.def" +#undef BUILTIN }; ArrayRef XtensaTargetInfo::getTargetBuiltins() const { From 6fff22cd82d1c3270a0b7f5d85351057f6c843c3 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 1 Oct 2024 02:02:00 +0300 Subject: [PATCH 208/289] [Xtensa] Add constant checks for HIFI3 intrinsics --- clang/include/clang/Basic/XtensaSemaCheck.inc | 215 ++++++++++++++++++ clang/lib/Sema/SemaXtensa.cpp | 1 + 2 files changed, 216 insertions(+) create mode 100644 clang/include/clang/Basic/XtensaSemaCheck.inc diff --git a/clang/include/clang/Basic/XtensaSemaCheck.inc b/clang/include/clang/Basic/XtensaSemaCheck.inc new file mode 100644 index 0000000000000..983014004209c --- /dev/null +++ b/clang/include/clang/Basic/XtensaSemaCheck.inc @@ -0,0 +1,215 @@ +//===-- XtensaSemaCheck.inc - Clang semantic checks for Xtensa arch ----*- C++ +//-*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +case Xtensa::BI__builtin_xtensa_ae_dbi: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_dbi_ic: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_dbi_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_l16_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -16, 14) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 2); +case Xtensa::BI__builtin_xtensa_ae_l16_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_l16m_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -16, 14) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 2); +case Xtensa::BI__builtin_xtensa_ae_l16m_iu: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_l16x2m_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 4); +case Xtensa::BI__builtin_xtensa_ae_l16x2m_iu: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_l16x4_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 8); +case Xtensa::BI__builtin_xtensa_ae_l16x4_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_l32_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 4); +case Xtensa::BI__builtin_xtensa_ae_l32_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_l32f24_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 4); +case Xtensa::BI__builtin_xtensa_ae_l32f24_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_l32m_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 4); +case Xtensa::BI__builtin_xtensa_ae_l32m_iu: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_l32x2_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 8); +case Xtensa::BI__builtin_xtensa_ae_l32x2_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 8); +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_l64_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 8); +case Xtensa::BI__builtin_xtensa_ae_l64_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_lalign64_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 1, 8); +case Xtensa::BI__builtin_xtensa_ae_lbi: +return SemaRef.BuiltinConstantArgRange(TheCall, 0, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_lbki: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_lbsi: +return SemaRef.BuiltinConstantArgRange(TheCall, 0, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_movi: +return SemaRef.BuiltinConstantArgRange(TheCall, 0, -16, 47); +case Xtensa::BI__builtin_xtensa_ae_pksr24: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3); +case Xtensa::BI__builtin_xtensa_ae_pksr32: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3); +case Xtensa::BI__builtin_xtensa_ae_s16_0_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_s16_0_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_s16m_l_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_s16m_l_iu: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -16, 14) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 2); +case Xtensa::BI__builtin_xtensa_ae_s16x2m_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s16x2m_iu: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s16x4_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s16x4_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32_l_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32_l_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32m_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32m_iu: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -32, 28) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 4); +case Xtensa::BI__builtin_xtensa_ae_s32x2_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s32x2_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s64_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_s64_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_salign64_i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, -64, 56) || + SemaRef.BuiltinConstantArgMultiple(TheCall, 2, 8); +case Xtensa::BI__builtin_xtensa_ae_sbi: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_sbi_ic: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_sbi_ip: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 1, 16); +case Xtensa::BI__builtin_xtensa_ae_sel16i: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15); +case Xtensa::BI__builtin_xtensa_ae_sel16i_n: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3); +case Xtensa::BI__builtin_xtensa_ae_sext32: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 7, 22); +case Xtensa::BI__builtin_xtensa_ae_slai16s: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15); +case Xtensa::BI__builtin_xtensa_ae_slai24: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_slai24s: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_slai32: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_slai32s: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_slai64: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 63); +case Xtensa::BI__builtin_xtensa_ae_slai64s: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 63); +case Xtensa::BI__builtin_xtensa_ae_slaisq56s: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 63); +case Xtensa::BI__builtin_xtensa_ae_srai16: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15); +case Xtensa::BI__builtin_xtensa_ae_srai16r: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15); +case Xtensa::BI__builtin_xtensa_ae_srai24: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_srai32: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_srai32r: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_srai64: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 63); +case Xtensa::BI__builtin_xtensa_ae_srli24: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_srli32: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31); +case Xtensa::BI__builtin_xtensa_ae_srli64: +return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 63); +case Xtensa::BI__builtin_xtensa_ae_trunci32f64s_l: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15); +case Xtensa::BI__builtin_xtensa_ae_trunci32x2f64s: +return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15); diff --git a/clang/lib/Sema/SemaXtensa.cpp b/clang/lib/Sema/SemaXtensa.cpp index b3cae0adbfe31..10f51339a1f51 100644 --- a/clang/lib/Sema/SemaXtensa.cpp +++ b/clang/lib/Sema/SemaXtensa.cpp @@ -24,6 +24,7 @@ bool SemaXtensa::CheckXtensaBuiltinFunctionCall(const TargetInfo &TI, unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { +#include "clang/Basic/XtensaSemaCheck.inc" default: return false; case Xtensa::BI__builtin_xtensa_mul_ad_ll: From 4431686ea391914a3560050db59be8bee978526b Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 14:01:42 +0000 Subject: [PATCH 209/289] [Xtensa] Support HIFI3 vectors in LLVM calls --- clang/lib/CodeGen/Targets/Xtensa.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/clang/lib/CodeGen/Targets/Xtensa.cpp b/clang/lib/CodeGen/Targets/Xtensa.cpp index 0c132816f670d..82692d49be4e7 100644 --- a/clang/lib/CodeGen/Targets/Xtensa.cpp +++ b/clang/lib/CodeGen/Targets/Xtensa.cpp @@ -104,7 +104,16 @@ ABIArgInfo XtensaABIInfo::classifyArgumentType(QualType Ty, llvm::FixedVectorType::get(llvm::Type::getInt1Ty(getVMContext()), 1); return ABIArgInfo::getDirect(ResType); } - + // Vector arguments + if (getTarget().hasFeature("hifi3") && Ty->isVectorType() && (Size <= 64)) { + const VectorType *VT = Ty->getAs(); + QualType EltTy = VT->getElementType(); + unsigned EltSize = getContext().getTypeSize(EltTy); + if (EltSize == 8) // VAlign + return ABIArgInfo::getDirect( + llvm::IntegerType::get(getVMContext(), Size)); + return ABIArgInfo::getDirectInReg(); + } // Aggregates which are <= 6*32 will be passed in registers if possible, // so coerce to integers. if ((Size <= (MaxNumArgGPRs * 32)) && (!MustUseStack)) { @@ -246,7 +255,6 @@ class XtensaTargetCodeGenInfo : public TargetCodeGenInfo { }; } // namespace - std::unique_ptr CodeGen::createXtensaTargetCodeGenInfo(CodeGenModule &CGM) { return std::make_unique(CGM.getTypes()); From 32c14d6eeb82833affacd94232e05f6fe866fbee Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 26 Mar 2024 16:47:48 +0300 Subject: [PATCH 210/289] [Xtensa] Add HIFI3 intrinsics to Clang codegen --- clang/include/clang/Basic/XtensaBuiltins.inc | 1743 ++ clang/lib/CodeGen/CGBuiltin.cpp | 216 +- clang/lib/CodeGen/CodeGenFunction.h | 2 + .../CodeGen/Xtensa/xtensa-hifi-intrinsics.c | 21408 ++++++++++++++++ 4 files changed, 23335 insertions(+), 34 deletions(-) create mode 100644 clang/include/clang/Basic/XtensaBuiltins.inc create mode 100644 clang/test/CodeGen/Xtensa/xtensa-hifi-intrinsics.c diff --git a/clang/include/clang/Basic/XtensaBuiltins.inc b/clang/include/clang/Basic/XtensaBuiltins.inc new file mode 100644 index 0000000000000..1231d992c36c4 --- /dev/null +++ b/clang/include/clang/Basic/XtensaBuiltins.inc @@ -0,0 +1,1743 @@ +//===-- XtensaBuiltins.inc - Clang intrinsic database for Xtensa arch ----*- C++ +//-*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +case Xtensa::BI__builtin_xtensa_ae_abs16s: +return {Intrinsic::xtensa_ae_abs16s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_abs24s: +return {Intrinsic::xtensa_ae_abs24s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_abs32: +return {Intrinsic::xtensa_ae_abs32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_abs32s: +return {Intrinsic::xtensa_ae_abs32s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_abs64: +return {Intrinsic::xtensa_ae_abs64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_abs64s: +return {Intrinsic::xtensa_ae_abs64s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_add16: +return {Intrinsic::xtensa_ae_add16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add16s: +return {Intrinsic::xtensa_ae_add16s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add24s: +return {Intrinsic::xtensa_ae_add24s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add32: +return {Intrinsic::xtensa_ae_add32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add32_hl_lh: +return {Intrinsic::xtensa_ae_add32_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add32s: +return {Intrinsic::xtensa_ae_add32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add64: +return {Intrinsic::xtensa_ae_add64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_add64s: +return {Intrinsic::xtensa_ae_add64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_addbrba32: +return {Intrinsic::xtensa_ae_addbrba32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_addsub32: +return {Intrinsic::xtensa_ae_addsub32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_addsub32s: +return {Intrinsic::xtensa_ae_addsub32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_and: +return {Intrinsic::xtensa_ae_and, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_cvt32x2f16_10: +return {Intrinsic::xtensa_ae_cvt32x2f16_10, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvt32x2f16_32: +return {Intrinsic::xtensa_ae_cvt32x2f16_32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvt48a32: +return {Intrinsic::xtensa_ae_cvt48a32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvt64a32: +return {Intrinsic::xtensa_ae_cvt64a32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvt64f32_h: +return {Intrinsic::xtensa_ae_cvt64f32_h, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvta32f24s_h: +return {Intrinsic::xtensa_ae_cvta32f24s_h, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvta32f24s_l: +return {Intrinsic::xtensa_ae_cvta32f24s_l, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvtq56a32s: +return {Intrinsic::xtensa_ae_cvtq56a32s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvtq56p32s_h: +return {Intrinsic::xtensa_ae_cvtq56p32s_h, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_cvtq56p32s_l: +return {Intrinsic::xtensa_ae_cvtq56p32s_l, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_db: +return {Intrinsic::xtensa_ae_db, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_db_ic: +return {Intrinsic::xtensa_ae_db_ic, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_db_ip: +return {Intrinsic::xtensa_ae_db_ip, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_dbi: +return {Intrinsic::xtensa_ae_dbi, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_dbi_ic: +return {Intrinsic::xtensa_ae_dbi_ic, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_dbi_ip: +return {Intrinsic::xtensa_ae_dbi_ip, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_div64d32_h: +return {Intrinsic::xtensa_ae_div64d32_h, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_div64d32_l: +return {Intrinsic::xtensa_ae_div64d32_l, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_eq16: +return {Intrinsic::xtensa_ae_eq16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_eq32: +return {Intrinsic::xtensa_ae_eq32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_eq64: +return {Intrinsic::xtensa_ae_eq64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16_i: +return {Intrinsic::xtensa_ae_l16_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16_ip: +return {Intrinsic::xtensa_ae_l16_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16_x: +return {Intrinsic::xtensa_ae_l16_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16_xc: +return {Intrinsic::xtensa_ae_l16_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16_xp: +return {Intrinsic::xtensa_ae_l16_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16m_i: +return {Intrinsic::xtensa_ae_l16m_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16m_iu: +return {Intrinsic::xtensa_ae_l16m_iu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16m_x: +return {Intrinsic::xtensa_ae_l16m_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16m_xc: +return {Intrinsic::xtensa_ae_l16m_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16m_xu: +return {Intrinsic::xtensa_ae_l16m_xu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x2m_i: +return {Intrinsic::xtensa_ae_l16x2m_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16x2m_iu: +return {Intrinsic::xtensa_ae_l16x2m_iu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x2m_x: +return {Intrinsic::xtensa_ae_l16x2m_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16x2m_xc: +return {Intrinsic::xtensa_ae_l16x2m_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x2m_xu: +return {Intrinsic::xtensa_ae_l16x2m_xu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_i: +return {Intrinsic::xtensa_ae_l16x4_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_ip: +return {Intrinsic::xtensa_ae_l16x4_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_ric: +return {Intrinsic::xtensa_ae_l16x4_ric, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_rip: +return {Intrinsic::xtensa_ae_l16x4_rip, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_x: +return {Intrinsic::xtensa_ae_l16x4_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_xc: +return {Intrinsic::xtensa_ae_l16x4_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l16x4_xp: +return {Intrinsic::xtensa_ae_l16x4_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32_i: +return {Intrinsic::xtensa_ae_l32_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32_ip: +return {Intrinsic::xtensa_ae_l32_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32_x: +return {Intrinsic::xtensa_ae_l32_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32_xc: +return {Intrinsic::xtensa_ae_l32_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32_xp: +return {Intrinsic::xtensa_ae_l32_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32f24_i: +return {Intrinsic::xtensa_ae_l32f24_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32f24_ip: +return {Intrinsic::xtensa_ae_l32f24_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32f24_x: +return {Intrinsic::xtensa_ae_l32f24_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32f24_xc: +return {Intrinsic::xtensa_ae_l32f24_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32f24_xp: +return {Intrinsic::xtensa_ae_l32f24_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32m_i: +return {Intrinsic::xtensa_ae_l32m_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32m_iu: +return {Intrinsic::xtensa_ae_l32m_iu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32m_x: +return {Intrinsic::xtensa_ae_l32m_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32m_xc: +return {Intrinsic::xtensa_ae_l32m_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32m_xu: +return {Intrinsic::xtensa_ae_l32m_xu, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_i: +return {Intrinsic::xtensa_ae_l32x2_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_ip: +return {Intrinsic::xtensa_ae_l32x2_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_ric: +return {Intrinsic::xtensa_ae_l32x2_ric, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_rip: +return {Intrinsic::xtensa_ae_l32x2_rip, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_x: +return {Intrinsic::xtensa_ae_l32x2_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_xc: +return {Intrinsic::xtensa_ae_l32x2_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2_xp: +return {Intrinsic::xtensa_ae_l32x2_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_i: +return {Intrinsic::xtensa_ae_l32x2f24_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_ip: +return {Intrinsic::xtensa_ae_l32x2f24_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_ric: +return {Intrinsic::xtensa_ae_l32x2f24_ric, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_rip: +return {Intrinsic::xtensa_ae_l32x2f24_rip, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_x: +return {Intrinsic::xtensa_ae_l32x2f24_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_xc: +return {Intrinsic::xtensa_ae_l32x2f24_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l32x2f24_xp: +return {Intrinsic::xtensa_ae_l32x2f24_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l64_i: +return {Intrinsic::xtensa_ae_l64_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l64_ip: +return {Intrinsic::xtensa_ae_l64_ip, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l64_x: +return {Intrinsic::xtensa_ae_l64_x, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_l64_xc: +return {Intrinsic::xtensa_ae_l64_xc, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_l64_xp: +return {Intrinsic::xtensa_ae_l64_xp, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_la16x4_ic: +return {Intrinsic::xtensa_ae_la16x4_ic, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la16x4_ip: +return {Intrinsic::xtensa_ae_la16x4_ip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la16x4_ric: +return {Intrinsic::xtensa_ae_la16x4_ric, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la16x4_rip: +return {Intrinsic::xtensa_ae_la16x4_rip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la16x4neg_pc: +return {Intrinsic::xtensa_ae_la16x4neg_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la16x4pos_pc: +return {Intrinsic::xtensa_ae_la16x4pos_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la24_ic: +return {Intrinsic::xtensa_ae_la24_ic, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24_ip: +return {Intrinsic::xtensa_ae_la24_ip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24_ric: +return {Intrinsic::xtensa_ae_la24_ric, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24_rip: +return {Intrinsic::xtensa_ae_la24_rip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24neg_pc: +return {Intrinsic::xtensa_ae_la24neg_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la24pos_pc: +return {Intrinsic::xtensa_ae_la24pos_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la24x2_ic: +return {Intrinsic::xtensa_ae_la24x2_ic, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24x2_ip: +return {Intrinsic::xtensa_ae_la24x2_ip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24x2_ric: +return {Intrinsic::xtensa_ae_la24x2_ric, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24x2_rip: +return {Intrinsic::xtensa_ae_la24x2_rip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la24x2neg_pc: +return {Intrinsic::xtensa_ae_la24x2neg_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la24x2pos_pc: +return {Intrinsic::xtensa_ae_la24x2pos_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la32x2_ic: +return {Intrinsic::xtensa_ae_la32x2_ic, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2_ip: +return {Intrinsic::xtensa_ae_la32x2_ip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2_ric: +return {Intrinsic::xtensa_ae_la32x2_ric, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2_rip: +return {Intrinsic::xtensa_ae_la32x2_rip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2f24_ic: +return {Intrinsic::xtensa_ae_la32x2f24_ic, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2f24_ip: +return {Intrinsic::xtensa_ae_la32x2f24_ip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2f24_ric: +return {Intrinsic::xtensa_ae_la32x2f24_ric, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2f24_rip: +return {Intrinsic::xtensa_ae_la32x2f24_rip, 0, 0x601}; +case Xtensa::BI__builtin_xtensa_ae_la32x2neg_pc: +return {Intrinsic::xtensa_ae_la32x2neg_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la32x2pos_pc: +return {Intrinsic::xtensa_ae_la32x2pos_pc, 0, 0x201}; +case Xtensa::BI__builtin_xtensa_ae_la64_pp: +return {Intrinsic::xtensa_ae_la64_pp, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_lalign64_i: +return {Intrinsic::xtensa_ae_lalign64_i, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lb: +return {Intrinsic::xtensa_ae_lb, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_lbi: +return {Intrinsic::xtensa_ae_lbi, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_lbk: +return {Intrinsic::xtensa_ae_lbk, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lbki: +return {Intrinsic::xtensa_ae_lbki, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lbs: +return {Intrinsic::xtensa_ae_lbs, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_lbsi: +return {Intrinsic::xtensa_ae_lbsi, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_le16: +return {Intrinsic::xtensa_ae_le16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_le32: +return {Intrinsic::xtensa_ae_le32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_le64: +return {Intrinsic::xtensa_ae_le64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lt16: +return {Intrinsic::xtensa_ae_lt16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lt32: +return {Intrinsic::xtensa_ae_lt32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_lt64: +return {Intrinsic::xtensa_ae_lt64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_max32: +return {Intrinsic::xtensa_ae_max32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_max64: +return {Intrinsic::xtensa_ae_max64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_maxabs32s: +return {Intrinsic::xtensa_ae_maxabs32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_maxabs64s: +return {Intrinsic::xtensa_ae_maxabs64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_min32: +return {Intrinsic::xtensa_ae_min32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_min64: +return {Intrinsic::xtensa_ae_min64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_minabs32s: +return {Intrinsic::xtensa_ae_minabs32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_minabs64s: +return {Intrinsic::xtensa_ae_minabs64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mov: +return {Intrinsic::xtensa_ae_mov, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad16_0: +return {Intrinsic::xtensa_ae_movad16_0, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad16_1: +return {Intrinsic::xtensa_ae_movad16_1, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad16_2: +return {Intrinsic::xtensa_ae_movad16_2, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad16_3: +return {Intrinsic::xtensa_ae_movad16_3, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad32_h: +return {Intrinsic::xtensa_ae_movad32_h, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movad32_l: +return {Intrinsic::xtensa_ae_movad32_l, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movalign: +return {Intrinsic::xtensa_ae_movalign, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movda16: +return {Intrinsic::xtensa_ae_movda16, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movda16x2: +return {Intrinsic::xtensa_ae_movda16x2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_movda32: +return {Intrinsic::xtensa_ae_movda32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movda32x2: +return {Intrinsic::xtensa_ae_movda32x2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_movf16x4: +return {Intrinsic::xtensa_ae_movf16x4, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_movf32x2: +return {Intrinsic::xtensa_ae_movf32x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_movf64: +return {Intrinsic::xtensa_ae_movf64, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_movi: +return {Intrinsic::xtensa_ae_movi, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_movt16x4: +return {Intrinsic::xtensa_ae_movt16x4, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_movt32x2: +return {Intrinsic::xtensa_ae_movt32x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_movt64: +return {Intrinsic::xtensa_ae_movt64, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mul16x4: +return {Intrinsic::xtensa_ae_mul16x4, 0, 0xc0003}; +case Xtensa::BI__builtin_xtensa_ae_mul32_hh: +return {Intrinsic::xtensa_ae_mul32_hh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32_lh: +return {Intrinsic::xtensa_ae_mul32_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32_ll: +return {Intrinsic::xtensa_ae_mul32_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32_ll_s2: +return {Intrinsic::xtensa_ae_mul32_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32u_ll: +return {Intrinsic::xtensa_ae_mul32u_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h0: +return {Intrinsic::xtensa_ae_mul32x16_h0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h0_s2: +return {Intrinsic::xtensa_ae_mul32x16_h0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h1: +return {Intrinsic::xtensa_ae_mul32x16_h1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h1_s2: +return {Intrinsic::xtensa_ae_mul32x16_h1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h2: +return {Intrinsic::xtensa_ae_mul32x16_h2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h2_s2: +return {Intrinsic::xtensa_ae_mul32x16_h2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h3: +return {Intrinsic::xtensa_ae_mul32x16_h3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_h3_s2: +return {Intrinsic::xtensa_ae_mul32x16_h3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l0: +return {Intrinsic::xtensa_ae_mul32x16_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l0_s2: +return {Intrinsic::xtensa_ae_mul32x16_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l1: +return {Intrinsic::xtensa_ae_mul32x16_l1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l1_s2: +return {Intrinsic::xtensa_ae_mul32x16_l1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l2: +return {Intrinsic::xtensa_ae_mul32x16_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l2_s2: +return {Intrinsic::xtensa_ae_mul32x16_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l3: +return {Intrinsic::xtensa_ae_mul32x16_l3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mul32x16_l3_s2: +return {Intrinsic::xtensa_ae_mul32x16_l3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mula16x4: +return {Intrinsic::xtensa_ae_mula16x4, 0, 0xc0300}; +case Xtensa::BI__builtin_xtensa_ae_mula32_hh: +return {Intrinsic::xtensa_ae_mula32_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32_lh: +return {Intrinsic::xtensa_ae_mula32_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32_ll: +return {Intrinsic::xtensa_ae_mula32_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32_ll_s2: +return {Intrinsic::xtensa_ae_mula32_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32u_ll: +return {Intrinsic::xtensa_ae_mula32u_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h0: +return {Intrinsic::xtensa_ae_mula32x16_h0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h0_s2: +return {Intrinsic::xtensa_ae_mula32x16_h0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h1: +return {Intrinsic::xtensa_ae_mula32x16_h1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h1_s2: +return {Intrinsic::xtensa_ae_mula32x16_h1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h2: +return {Intrinsic::xtensa_ae_mula32x16_h2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h2_s2: +return {Intrinsic::xtensa_ae_mula32x16_h2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h3: +return {Intrinsic::xtensa_ae_mula32x16_h3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_h3_s2: +return {Intrinsic::xtensa_ae_mula32x16_h3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l0: +return {Intrinsic::xtensa_ae_mula32x16_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l0_s2: +return {Intrinsic::xtensa_ae_mula32x16_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l1: +return {Intrinsic::xtensa_ae_mula32x16_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l1_s2: +return {Intrinsic::xtensa_ae_mula32x16_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l2: +return {Intrinsic::xtensa_ae_mula32x16_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l2_s2: +return {Intrinsic::xtensa_ae_mula32x16_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l3: +return {Intrinsic::xtensa_ae_mula32x16_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mula32x16_l3_s2: +return {Intrinsic::xtensa_ae_mula32x16_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad24_hh_ll: +return {Intrinsic::xtensa_ae_mulaad24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulaad24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad24_hl_lh: +return {Intrinsic::xtensa_ae_mulaad24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulaad24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h0_l1: +return {Intrinsic::xtensa_ae_mulaad32x16_h0_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h0_l1_s2: +return {Intrinsic::xtensa_ae_mulaad32x16_h0_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulaad32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulaad32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h2_l3: +return {Intrinsic::xtensa_ae_mulaad32x16_h2_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h2_l3_s2: +return {Intrinsic::xtensa_ae_mulaad32x16_h2_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulaad32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaad32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulaad32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_11_00: +return {Intrinsic::xtensa_ae_mulaafd16ss_11_00, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_11_00_s2: +return {Intrinsic::xtensa_ae_mulaafd16ss_11_00_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_13_02: +return {Intrinsic::xtensa_ae_mulaafd16ss_13_02, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_13_02_s2: +return {Intrinsic::xtensa_ae_mulaafd16ss_13_02_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_33_22: +return {Intrinsic::xtensa_ae_mulaafd16ss_33_22, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd16ss_33_22_s2: +return {Intrinsic::xtensa_ae_mulaafd16ss_33_22_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd24_hh_ll: +return {Intrinsic::xtensa_ae_mulaafd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulaafd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd24_hl_lh: +return {Intrinsic::xtensa_ae_mulaafd24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulaafd24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h0_l1: +return {Intrinsic::xtensa_ae_mulaafd32x16_h0_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h0_l1_s2: +return {Intrinsic::xtensa_ae_mulaafd32x16_h0_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulaafd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulaafd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h2_l3: +return {Intrinsic::xtensa_ae_mulaafd32x16_h2_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h2_l3_s2: +return {Intrinsic::xtensa_ae_mulaafd32x16_h2_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulaafd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaafd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulaafd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulac24: +return {Intrinsic::xtensa_ae_mulac24, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulac32x16_h: +return {Intrinsic::xtensa_ae_mulac32x16_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulac32x16_l: +return {Intrinsic::xtensa_ae_mulac32x16_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_00: +return {Intrinsic::xtensa_ae_mulaf16ss_00, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_00_s2: +return {Intrinsic::xtensa_ae_mulaf16ss_00_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_10: +return {Intrinsic::xtensa_ae_mulaf16ss_10, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_11: +return {Intrinsic::xtensa_ae_mulaf16ss_11, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_20: +return {Intrinsic::xtensa_ae_mulaf16ss_20, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_21: +return {Intrinsic::xtensa_ae_mulaf16ss_21, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_22: +return {Intrinsic::xtensa_ae_mulaf16ss_22, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_30: +return {Intrinsic::xtensa_ae_mulaf16ss_30, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_31: +return {Intrinsic::xtensa_ae_mulaf16ss_31, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_32: +return {Intrinsic::xtensa_ae_mulaf16ss_32, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16ss_33: +return {Intrinsic::xtensa_ae_mulaf16ss_33, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf16x4ss: +return {Intrinsic::xtensa_ae_mulaf16x4ss, 0, 0xc0300}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32r_hh: +return {Intrinsic::xtensa_ae_mulaf32r_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32r_lh: +return {Intrinsic::xtensa_ae_mulaf32r_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32r_ll: +return {Intrinsic::xtensa_ae_mulaf32r_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32r_ll_s2: +return {Intrinsic::xtensa_ae_mulaf32r_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32s_hh: +return {Intrinsic::xtensa_ae_mulaf32s_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32s_lh: +return {Intrinsic::xtensa_ae_mulaf32s_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32s_ll: +return {Intrinsic::xtensa_ae_mulaf32s_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32s_ll_s2: +return {Intrinsic::xtensa_ae_mulaf32s_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h0: +return {Intrinsic::xtensa_ae_mulaf32x16_h0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h0_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_h0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h1: +return {Intrinsic::xtensa_ae_mulaf32x16_h1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h1_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_h1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h2: +return {Intrinsic::xtensa_ae_mulaf32x16_h2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h2_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_h2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h3: +return {Intrinsic::xtensa_ae_mulaf32x16_h3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_h3_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_h3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l0: +return {Intrinsic::xtensa_ae_mulaf32x16_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l0_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l1: +return {Intrinsic::xtensa_ae_mulaf32x16_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l1_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l2: +return {Intrinsic::xtensa_ae_mulaf32x16_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l2_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l3: +return {Intrinsic::xtensa_ae_mulaf32x16_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf32x16_l3_s2: +return {Intrinsic::xtensa_ae_mulaf32x16_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf48q32sp16s_l: +return {Intrinsic::xtensa_ae_mulaf48q32sp16s_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf48q32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulaf48q32sp16s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf48q32sp16u_l: +return {Intrinsic::xtensa_ae_mulaf48q32sp16u_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaf48q32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulaf48q32sp16u_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafc24ra: +return {Intrinsic::xtensa_ae_mulafc24ra, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafc32x16ras_h: +return {Intrinsic::xtensa_ae_mulafc32x16ras_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafc32x16ras_l: +return {Intrinsic::xtensa_ae_mulafc32x16ras_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafd24x2_fir_h: +return {Intrinsic::xtensa_ae_mulafd24x2_fir_h, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafd24x2_fir_l: +return {Intrinsic::xtensa_ae_mulafd24x2_fir_l, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafd32x16x2_fir_hh: +return {Intrinsic::xtensa_ae_mulafd32x16x2_fir_hh, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafd32x16x2_fir_hl: +return {Intrinsic::xtensa_ae_mulafd32x16x2_fir_hl, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafd32x16x2_fir_lh: +return {Intrinsic::xtensa_ae_mulafd32x16x2_fir_lh, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafd32x16x2_fir_ll: +return {Intrinsic::xtensa_ae_mulafd32x16x2_fir_ll, 0, 0x1c0300}; +case Xtensa::BI__builtin_xtensa_ae_mulafp24x2r: +return {Intrinsic::xtensa_ae_mulafp24x2r, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp24x2r_s2: +return {Intrinsic::xtensa_ae_mulafp24x2r_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp24x2ra: +return {Intrinsic::xtensa_ae_mulafp24x2ra, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp24x2ra_s2: +return {Intrinsic::xtensa_ae_mulafp24x2ra_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2ras_h: +return {Intrinsic::xtensa_ae_mulafp32x16x2ras_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2ras_h_s2: +return {Intrinsic::xtensa_ae_mulafp32x16x2ras_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2ras_l: +return {Intrinsic::xtensa_ae_mulafp32x16x2ras_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2ras_l_s2: +return {Intrinsic::xtensa_ae_mulafp32x16x2ras_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2rs_h: +return {Intrinsic::xtensa_ae_mulafp32x16x2rs_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2rs_h_s2: +return {Intrinsic::xtensa_ae_mulafp32x16x2rs_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2rs_l: +return {Intrinsic::xtensa_ae_mulafp32x16x2rs_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x16x2rs_l_s2: +return {Intrinsic::xtensa_ae_mulafp32x16x2rs_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x2ras: +return {Intrinsic::xtensa_ae_mulafp32x2ras, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafp32x2rs: +return {Intrinsic::xtensa_ae_mulafp32x2rs, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mulafq32sp24s_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulafq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mulafq32sp24s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulap24x2: +return {Intrinsic::xtensa_ae_mulap24x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulap24x2_s2: +return {Intrinsic::xtensa_ae_mulap24x2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulap32x16x2_h: +return {Intrinsic::xtensa_ae_mulap32x16x2_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulap32x16x2_l: +return {Intrinsic::xtensa_ae_mulap32x16x2_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulap32x2: +return {Intrinsic::xtensa_ae_mulap32x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaq32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulaq32sp16s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulaq32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulaq32sp16u_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mularfq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mularfq32sp24s_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mularfq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mularfq32sp24s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_hh: +return {Intrinsic::xtensa_ae_mulas32f48p16s_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_hh_s2: +return {Intrinsic::xtensa_ae_mulas32f48p16s_hh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_lh: +return {Intrinsic::xtensa_ae_mulas32f48p16s_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_lh_s2: +return {Intrinsic::xtensa_ae_mulas32f48p16s_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_ll: +return {Intrinsic::xtensa_ae_mulas32f48p16s_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulas32f48p16s_ll_s2: +return {Intrinsic::xtensa_ae_mulas32f48p16s_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd24_hh_ll: +return {Intrinsic::xtensa_ae_mulasd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulasd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd24_hl_lh: +return {Intrinsic::xtensa_ae_mulasd24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulasd24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulasd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulasd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulasd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulasd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd24_hh_ll: +return {Intrinsic::xtensa_ae_mulasfd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulasfd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd24_hl_lh: +return {Intrinsic::xtensa_ae_mulasfd24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulasfd24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulasfd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulasfd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulasfd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulasfd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulasfd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulc24: +return {Intrinsic::xtensa_ae_mulc24, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulc32x16_h: +return {Intrinsic::xtensa_ae_mulc32x16_h, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulc32x16_l: +return {Intrinsic::xtensa_ae_mulc32x16_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_00: +return {Intrinsic::xtensa_ae_mulf16ss_00, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_00_s2: +return {Intrinsic::xtensa_ae_mulf16ss_00_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_10: +return {Intrinsic::xtensa_ae_mulf16ss_10, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_11: +return {Intrinsic::xtensa_ae_mulf16ss_11, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_20: +return {Intrinsic::xtensa_ae_mulf16ss_20, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_21: +return {Intrinsic::xtensa_ae_mulf16ss_21, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_22: +return {Intrinsic::xtensa_ae_mulf16ss_22, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_30: +return {Intrinsic::xtensa_ae_mulf16ss_30, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_31: +return {Intrinsic::xtensa_ae_mulf16ss_31, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_32: +return {Intrinsic::xtensa_ae_mulf16ss_32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16ss_33: +return {Intrinsic::xtensa_ae_mulf16ss_33, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf16x4ss: +return {Intrinsic::xtensa_ae_mulf16x4ss, 0, 0xc0003}; +case Xtensa::BI__builtin_xtensa_ae_mulf32r_hh: +return {Intrinsic::xtensa_ae_mulf32r_hh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32r_lh: +return {Intrinsic::xtensa_ae_mulf32r_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32r_ll: +return {Intrinsic::xtensa_ae_mulf32r_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32r_ll_s2: +return {Intrinsic::xtensa_ae_mulf32r_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32s_hh: +return {Intrinsic::xtensa_ae_mulf32s_hh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32s_lh: +return {Intrinsic::xtensa_ae_mulf32s_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32s_ll: +return {Intrinsic::xtensa_ae_mulf32s_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32s_ll_s2: +return {Intrinsic::xtensa_ae_mulf32s_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h0: +return {Intrinsic::xtensa_ae_mulf32x16_h0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h0_s2: +return {Intrinsic::xtensa_ae_mulf32x16_h0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h1: +return {Intrinsic::xtensa_ae_mulf32x16_h1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h1_s2: +return {Intrinsic::xtensa_ae_mulf32x16_h1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h2: +return {Intrinsic::xtensa_ae_mulf32x16_h2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h2_s2: +return {Intrinsic::xtensa_ae_mulf32x16_h2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h3: +return {Intrinsic::xtensa_ae_mulf32x16_h3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_h3_s2: +return {Intrinsic::xtensa_ae_mulf32x16_h3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l0: +return {Intrinsic::xtensa_ae_mulf32x16_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l0_s2: +return {Intrinsic::xtensa_ae_mulf32x16_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l1: +return {Intrinsic::xtensa_ae_mulf32x16_l1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l1_s2: +return {Intrinsic::xtensa_ae_mulf32x16_l1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l2: +return {Intrinsic::xtensa_ae_mulf32x16_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l2_s2: +return {Intrinsic::xtensa_ae_mulf32x16_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l3: +return {Intrinsic::xtensa_ae_mulf32x16_l3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf32x16_l3_s2: +return {Intrinsic::xtensa_ae_mulf32x16_l3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf48q32sp16s_l: +return {Intrinsic::xtensa_ae_mulf48q32sp16s_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf48q32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulf48q32sp16s_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf48q32sp16u_l: +return {Intrinsic::xtensa_ae_mulf48q32sp16u_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulf48q32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulf48q32sp16u_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfc24ra: +return {Intrinsic::xtensa_ae_mulfc24ra, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfc32x16ras_h: +return {Intrinsic::xtensa_ae_mulfc32x16ras_h, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfc32x16ras_l: +return {Intrinsic::xtensa_ae_mulfc32x16ras_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfd24x2_fir_h: +return {Intrinsic::xtensa_ae_mulfd24x2_fir_h, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfd24x2_fir_l: +return {Intrinsic::xtensa_ae_mulfd24x2_fir_l, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfd32x16x2_fir_hh: +return {Intrinsic::xtensa_ae_mulfd32x16x2_fir_hh, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfd32x16x2_fir_hl: +return {Intrinsic::xtensa_ae_mulfd32x16x2_fir_hl, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfd32x16x2_fir_lh: +return {Intrinsic::xtensa_ae_mulfd32x16x2_fir_lh, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfd32x16x2_fir_ll: +return {Intrinsic::xtensa_ae_mulfd32x16x2_fir_ll, 0, 0x1c0003}; +case Xtensa::BI__builtin_xtensa_ae_mulfp16x4ras: +return {Intrinsic::xtensa_ae_mulfp16x4ras, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp16x4s: +return {Intrinsic::xtensa_ae_mulfp16x4s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp24x2r: +return {Intrinsic::xtensa_ae_mulfp24x2r, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp24x2r_s2: +return {Intrinsic::xtensa_ae_mulfp24x2r_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp24x2ra: +return {Intrinsic::xtensa_ae_mulfp24x2ra, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp24x2ra_s2: +return {Intrinsic::xtensa_ae_mulfp24x2ra_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2ras_h: +return {Intrinsic::xtensa_ae_mulfp32x16x2ras_h, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2ras_h_s2: +return {Intrinsic::xtensa_ae_mulfp32x16x2ras_h_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2ras_l: +return {Intrinsic::xtensa_ae_mulfp32x16x2ras_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2ras_l_s2: +return {Intrinsic::xtensa_ae_mulfp32x16x2ras_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2rs_h: +return {Intrinsic::xtensa_ae_mulfp32x16x2rs_h, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2rs_h_s2: +return {Intrinsic::xtensa_ae_mulfp32x16x2rs_h_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2rs_l: +return {Intrinsic::xtensa_ae_mulfp32x16x2rs_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x16x2rs_l_s2: +return {Intrinsic::xtensa_ae_mulfp32x16x2rs_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x2ras: +return {Intrinsic::xtensa_ae_mulfp32x2ras, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfp32x2rs: +return {Intrinsic::xtensa_ae_mulfp32x2rs, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mulfq32sp24s_h_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulfq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mulfq32sp24s_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulp24x2: +return {Intrinsic::xtensa_ae_mulp24x2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulp24x2_s2: +return {Intrinsic::xtensa_ae_mulp24x2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulp32x16x2_h: +return {Intrinsic::xtensa_ae_mulp32x16x2_h, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulp32x16x2_l: +return {Intrinsic::xtensa_ae_mulp32x16x2_l, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulp32x2: +return {Intrinsic::xtensa_ae_mulp32x2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulq32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulq32sp16s_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulq32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulq32sp16u_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulrfq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mulrfq32sp24s_h_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulrfq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mulrfq32sp24s_l_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls16x4: +return {Intrinsic::xtensa_ae_muls16x4, 0, 0xc0300}; +case Xtensa::BI__builtin_xtensa_ae_muls32_hh: +return {Intrinsic::xtensa_ae_muls32_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32_lh: +return {Intrinsic::xtensa_ae_muls32_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32_ll: +return {Intrinsic::xtensa_ae_muls32_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_hh: +return {Intrinsic::xtensa_ae_muls32f48p16s_hh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_hh_s2: +return {Intrinsic::xtensa_ae_muls32f48p16s_hh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_lh: +return {Intrinsic::xtensa_ae_muls32f48p16s_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_lh_s2: +return {Intrinsic::xtensa_ae_muls32f48p16s_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_ll: +return {Intrinsic::xtensa_ae_muls32f48p16s_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32f48p16s_ll_s2: +return {Intrinsic::xtensa_ae_muls32f48p16s_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_muls32u_ll: +return {Intrinsic::xtensa_ae_muls32u_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h0: +return {Intrinsic::xtensa_ae_muls32x16_h0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h0_s2: +return {Intrinsic::xtensa_ae_muls32x16_h0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h1: +return {Intrinsic::xtensa_ae_muls32x16_h1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h1_s2: +return {Intrinsic::xtensa_ae_muls32x16_h1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h2: +return {Intrinsic::xtensa_ae_muls32x16_h2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h2_s2: +return {Intrinsic::xtensa_ae_muls32x16_h2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h3: +return {Intrinsic::xtensa_ae_muls32x16_h3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_h3_s2: +return {Intrinsic::xtensa_ae_muls32x16_h3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l0: +return {Intrinsic::xtensa_ae_muls32x16_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l0_s2: +return {Intrinsic::xtensa_ae_muls32x16_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l1: +return {Intrinsic::xtensa_ae_muls32x16_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l1_s2: +return {Intrinsic::xtensa_ae_muls32x16_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l2: +return {Intrinsic::xtensa_ae_muls32x16_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l2_s2: +return {Intrinsic::xtensa_ae_muls32x16_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l3: +return {Intrinsic::xtensa_ae_muls32x16_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_muls32x16_l3_s2: +return {Intrinsic::xtensa_ae_muls32x16_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad24_hh_ll: +return {Intrinsic::xtensa_ae_mulsad24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulsad24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulsad32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulsad32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulsad32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsad32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulsad32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd24_hh_ll: +return {Intrinsic::xtensa_ae_mulsafd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulsafd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulsafd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulsafd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulsafd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsafd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulsafd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_00: +return {Intrinsic::xtensa_ae_mulsf16ss_00, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_00_s2: +return {Intrinsic::xtensa_ae_mulsf16ss_00_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_10: +return {Intrinsic::xtensa_ae_mulsf16ss_10, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_11: +return {Intrinsic::xtensa_ae_mulsf16ss_11, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_20: +return {Intrinsic::xtensa_ae_mulsf16ss_20, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_21: +return {Intrinsic::xtensa_ae_mulsf16ss_21, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_22: +return {Intrinsic::xtensa_ae_mulsf16ss_22, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_30: +return {Intrinsic::xtensa_ae_mulsf16ss_30, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_31: +return {Intrinsic::xtensa_ae_mulsf16ss_31, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_32: +return {Intrinsic::xtensa_ae_mulsf16ss_32, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16ss_33: +return {Intrinsic::xtensa_ae_mulsf16ss_33, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf16x4ss: +return {Intrinsic::xtensa_ae_mulsf16x4ss, 0, 0xc0300}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32r_hh: +return {Intrinsic::xtensa_ae_mulsf32r_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32r_lh: +return {Intrinsic::xtensa_ae_mulsf32r_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32r_ll: +return {Intrinsic::xtensa_ae_mulsf32r_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32r_ll_s2: +return {Intrinsic::xtensa_ae_mulsf32r_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32s_hh: +return {Intrinsic::xtensa_ae_mulsf32s_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32s_lh: +return {Intrinsic::xtensa_ae_mulsf32s_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32s_ll: +return {Intrinsic::xtensa_ae_mulsf32s_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h0: +return {Intrinsic::xtensa_ae_mulsf32x16_h0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h0_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_h0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h1: +return {Intrinsic::xtensa_ae_mulsf32x16_h1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h1_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_h1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h2: +return {Intrinsic::xtensa_ae_mulsf32x16_h2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h2_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_h2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h3: +return {Intrinsic::xtensa_ae_mulsf32x16_h3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_h3_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_h3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l0: +return {Intrinsic::xtensa_ae_mulsf32x16_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l0_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l1: +return {Intrinsic::xtensa_ae_mulsf32x16_l1, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l1_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_l1_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l2: +return {Intrinsic::xtensa_ae_mulsf32x16_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l2_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l3: +return {Intrinsic::xtensa_ae_mulsf32x16_l3, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf32x16_l3_s2: +return {Intrinsic::xtensa_ae_mulsf32x16_l3_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf48q32sp16s_l: +return {Intrinsic::xtensa_ae_mulsf48q32sp16s_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf48q32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulsf48q32sp16s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf48q32sp16u_l: +return {Intrinsic::xtensa_ae_mulsf48q32sp16u_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsf48q32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulsf48q32sp16u_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp24x2r: +return {Intrinsic::xtensa_ae_mulsfp24x2r, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp24x2r_s2: +return {Intrinsic::xtensa_ae_mulsfp24x2r_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp24x2ra: +return {Intrinsic::xtensa_ae_mulsfp24x2ra, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp24x2ra_s2: +return {Intrinsic::xtensa_ae_mulsfp24x2ra_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2ras_h: +return {Intrinsic::xtensa_ae_mulsfp32x16x2ras_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2ras_h_s2: +return {Intrinsic::xtensa_ae_mulsfp32x16x2ras_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2ras_l: +return {Intrinsic::xtensa_ae_mulsfp32x16x2ras_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2ras_l_s2: +return {Intrinsic::xtensa_ae_mulsfp32x16x2ras_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2rs_h: +return {Intrinsic::xtensa_ae_mulsfp32x16x2rs_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2rs_h_s2: +return {Intrinsic::xtensa_ae_mulsfp32x16x2rs_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2rs_l: +return {Intrinsic::xtensa_ae_mulsfp32x16x2rs_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x16x2rs_l_s2: +return {Intrinsic::xtensa_ae_mulsfp32x16x2rs_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x2ras: +return {Intrinsic::xtensa_ae_mulsfp32x2ras, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfp32x2rs: +return {Intrinsic::xtensa_ae_mulsfp32x2rs, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mulsfq32sp24s_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsfq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mulsfq32sp24s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsp24x2: +return {Intrinsic::xtensa_ae_mulsp24x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsp24x2_s2: +return {Intrinsic::xtensa_ae_mulsp24x2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsp32x16x2_h: +return {Intrinsic::xtensa_ae_mulsp32x16x2_h, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsp32x16x2_l: +return {Intrinsic::xtensa_ae_mulsp32x16x2_l, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsp32x2: +return {Intrinsic::xtensa_ae_mulsp32x2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsq32sp16s_l_s2: +return {Intrinsic::xtensa_ae_mulsq32sp16s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsq32sp16u_l_s2: +return {Intrinsic::xtensa_ae_mulsq32sp16u_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsrfq32sp24s_h_s2: +return {Intrinsic::xtensa_ae_mulsrfq32sp24s_h_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulsrfq32sp24s_l_s2: +return {Intrinsic::xtensa_ae_mulsrfq32sp24s_l_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_hh: +return {Intrinsic::xtensa_ae_mulss32f48p16s_hh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_hh_s2: +return {Intrinsic::xtensa_ae_mulss32f48p16s_hh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_lh: +return {Intrinsic::xtensa_ae_mulss32f48p16s_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_lh_s2: +return {Intrinsic::xtensa_ae_mulss32f48p16s_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_ll: +return {Intrinsic::xtensa_ae_mulss32f48p16s_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulss32f48p16s_ll_s2: +return {Intrinsic::xtensa_ae_mulss32f48p16s_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd24_hh_ll: +return {Intrinsic::xtensa_ae_mulssd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulssd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd24_hl_lh: +return {Intrinsic::xtensa_ae_mulssd24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulssd24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulssd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulssd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulssd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulssd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_11_00: +return {Intrinsic::xtensa_ae_mulssfd16ss_11_00, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_11_00_s2: +return {Intrinsic::xtensa_ae_mulssfd16ss_11_00_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_13_02: +return {Intrinsic::xtensa_ae_mulssfd16ss_13_02, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_13_02_s2: +return {Intrinsic::xtensa_ae_mulssfd16ss_13_02_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_33_22: +return {Intrinsic::xtensa_ae_mulssfd16ss_33_22, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd16ss_33_22_s2: +return {Intrinsic::xtensa_ae_mulssfd16ss_33_22_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd24_hh_ll: +return {Intrinsic::xtensa_ae_mulssfd24_hh_ll, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulssfd24_hh_ll_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd24_hl_lh: +return {Intrinsic::xtensa_ae_mulssfd24_hl_lh, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulssfd24_hl_lh_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulssfd32x16_h1_l0, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulssfd32x16_h1_l0_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulssfd32x16_h3_l2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulssfd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulssfd32x16_h3_l2_s2, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad24_hh_ll: +return {Intrinsic::xtensa_ae_mulzaad24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzaad24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad24_hl_lh: +return {Intrinsic::xtensa_ae_mulzaad24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzaad24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h0_l1: +return {Intrinsic::xtensa_ae_mulzaad32x16_h0_l1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h0_l1_s2: +return {Intrinsic::xtensa_ae_mulzaad32x16_h0_l1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzaad32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzaad32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h2_l3: +return {Intrinsic::xtensa_ae_mulzaad32x16_h2_l3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h2_l3_s2: +return {Intrinsic::xtensa_ae_mulzaad32x16_h2_l3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzaad32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaad32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzaad32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_11_00: +return {Intrinsic::xtensa_ae_mulzaafd16ss_11_00, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_11_00_s2: +return {Intrinsic::xtensa_ae_mulzaafd16ss_11_00_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_13_02: +return {Intrinsic::xtensa_ae_mulzaafd16ss_13_02, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_13_02_s2: +return {Intrinsic::xtensa_ae_mulzaafd16ss_13_02_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_33_22: +return {Intrinsic::xtensa_ae_mulzaafd16ss_33_22, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd16ss_33_22_s2: +return {Intrinsic::xtensa_ae_mulzaafd16ss_33_22_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzaafd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzaafd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd24_hl_lh: +return {Intrinsic::xtensa_ae_mulzaafd24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzaafd24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h0_l1: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h0_l1, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h0_l1_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h2_l3: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h2_l3, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h2_l3_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzaafd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzasd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzasd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd24_hl_lh: +return {Intrinsic::xtensa_ae_mulzasd24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzasd24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzasd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzasd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzasd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzasd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzasfd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzasfd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd24_hl_lh: +return {Intrinsic::xtensa_ae_mulzasfd24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzasfd24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzasfd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzasfd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzasfd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzasfd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad24_hh_ll: +return {Intrinsic::xtensa_ae_mulzsad24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzsad24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzsad32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzsad32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzsad32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsad32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzsad32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzsafd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzsafd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzsafd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzsafd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzsafd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzsafd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzssd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzssd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd24_hl_lh: +return {Intrinsic::xtensa_ae_mulzssd24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzssd24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzssd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzssd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzssd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzssd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_11_00: +return {Intrinsic::xtensa_ae_mulzssfd16ss_11_00, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_11_00_s2: +return {Intrinsic::xtensa_ae_mulzssfd16ss_11_00_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_13_02: +return {Intrinsic::xtensa_ae_mulzssfd16ss_13_02, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_13_02_s2: +return {Intrinsic::xtensa_ae_mulzssfd16ss_13_02_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_33_22: +return {Intrinsic::xtensa_ae_mulzssfd16ss_33_22, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd16ss_33_22_s2: +return {Intrinsic::xtensa_ae_mulzssfd16ss_33_22_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd24_hh_ll: +return {Intrinsic::xtensa_ae_mulzssfd24_hh_ll, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd24_hh_ll_s2: +return {Intrinsic::xtensa_ae_mulzssfd24_hh_ll_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd24_hl_lh: +return {Intrinsic::xtensa_ae_mulzssfd24_hl_lh, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd24_hl_lh_s2: +return {Intrinsic::xtensa_ae_mulzssfd24_hl_lh_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd32x16_h1_l0: +return {Intrinsic::xtensa_ae_mulzssfd32x16_h1_l0, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2: +return {Intrinsic::xtensa_ae_mulzssfd32x16_h1_l0_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd32x16_h3_l2: +return {Intrinsic::xtensa_ae_mulzssfd32x16_h3_l2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2: +return {Intrinsic::xtensa_ae_mulzssfd32x16_h3_l2_s2, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_nand: +return {Intrinsic::xtensa_ae_nand, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_neg16s: +return {Intrinsic::xtensa_ae_neg16s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_neg24s: +return {Intrinsic::xtensa_ae_neg24s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_neg32: +return {Intrinsic::xtensa_ae_neg32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_neg32s: +return {Intrinsic::xtensa_ae_neg32s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_neg64: +return {Intrinsic::xtensa_ae_neg64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_neg64s: +return {Intrinsic::xtensa_ae_neg64s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_nsa64: +return {Intrinsic::xtensa_ae_nsa64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_nsaz16_0: +return {Intrinsic::xtensa_ae_nsaz16_0, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_nsaz32_l: +return {Intrinsic::xtensa_ae_nsaz32_l, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_or: +return {Intrinsic::xtensa_ae_or, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_pksr24: +return {Intrinsic::xtensa_ae_pksr24, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_pksr32: +return {Intrinsic::xtensa_ae_pksr32, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_round16x4f32sasym: +return {Intrinsic::xtensa_ae_round16x4f32sasym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round16x4f32ssym: +return {Intrinsic::xtensa_ae_round16x4f32ssym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round24x2f48sasym: +return {Intrinsic::xtensa_ae_round24x2f48sasym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round24x2f48ssym: +return {Intrinsic::xtensa_ae_round24x2f48ssym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round32x2f48sasym: +return {Intrinsic::xtensa_ae_round32x2f48sasym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round32x2f48ssym: +return {Intrinsic::xtensa_ae_round32x2f48ssym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round32x2f64sasym: +return {Intrinsic::xtensa_ae_round32x2f64sasym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_round32x2f64ssym: +return {Intrinsic::xtensa_ae_round32x2f64ssym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_roundsp16f24asym: +return {Intrinsic::xtensa_ae_roundsp16f24asym, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_roundsp16f24sym: +return {Intrinsic::xtensa_ae_roundsp16f24sym, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_roundsp16q48x2asym: +return {Intrinsic::xtensa_ae_roundsp16q48x2asym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_roundsp16q48x2sym: +return {Intrinsic::xtensa_ae_roundsp16q48x2sym, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_roundsq32f48asym: +return {Intrinsic::xtensa_ae_roundsq32f48asym, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_roundsq32f48sym: +return {Intrinsic::xtensa_ae_roundsq32f48sym, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_s16_0_i: +return {Intrinsic::xtensa_ae_s16_0_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16_0_ip: +return {Intrinsic::xtensa_ae_s16_0_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16_0_x: +return {Intrinsic::xtensa_ae_s16_0_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16_0_xc: +return {Intrinsic::xtensa_ae_s16_0_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16_0_xp: +return {Intrinsic::xtensa_ae_s16_0_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16m_l_i: +return {Intrinsic::xtensa_ae_s16m_l_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16m_l_iu: +return {Intrinsic::xtensa_ae_s16m_l_iu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16m_l_x: +return {Intrinsic::xtensa_ae_s16m_l_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16m_l_xc: +return {Intrinsic::xtensa_ae_s16m_l_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16m_l_xu: +return {Intrinsic::xtensa_ae_s16m_l_xu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x2m_i: +return {Intrinsic::xtensa_ae_s16x2m_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16x2m_iu: +return {Intrinsic::xtensa_ae_s16x2m_iu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x2m_x: +return {Intrinsic::xtensa_ae_s16x2m_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16x2m_xc: +return {Intrinsic::xtensa_ae_s16x2m_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x2m_xu: +return {Intrinsic::xtensa_ae_s16x2m_xu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_i: +return {Intrinsic::xtensa_ae_s16x4_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_ip: +return {Intrinsic::xtensa_ae_s16x4_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_ric: +return {Intrinsic::xtensa_ae_s16x4_ric, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_rip: +return {Intrinsic::xtensa_ae_s16x4_rip, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_x: +return {Intrinsic::xtensa_ae_s16x4_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_xc: +return {Intrinsic::xtensa_ae_s16x4_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s16x4_xp: +return {Intrinsic::xtensa_ae_s16x4_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_i: +return {Intrinsic::xtensa_ae_s24ra64s_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_ip: +return {Intrinsic::xtensa_ae_s24ra64s_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_x: +return {Intrinsic::xtensa_ae_s24ra64s_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_xc: +return {Intrinsic::xtensa_ae_s24ra64s_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s24ra64s_xp: +return {Intrinsic::xtensa_ae_s24ra64s_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s24x2ra64s_ip: +return {Intrinsic::xtensa_ae_s24x2ra64s_ip, 0, 0x30400}; +case Xtensa::BI__builtin_xtensa_ae_s32_l_i: +return {Intrinsic::xtensa_ae_s32_l_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32_l_ip: +return {Intrinsic::xtensa_ae_s32_l_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32_l_x: +return {Intrinsic::xtensa_ae_s32_l_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32_l_xc: +return {Intrinsic::xtensa_ae_s32_l_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32_l_xp: +return {Intrinsic::xtensa_ae_s32_l_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_i: +return {Intrinsic::xtensa_ae_s32f24_l_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_ip: +return {Intrinsic::xtensa_ae_s32f24_l_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_x: +return {Intrinsic::xtensa_ae_s32f24_l_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_xc: +return {Intrinsic::xtensa_ae_s32f24_l_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32f24_l_xp: +return {Intrinsic::xtensa_ae_s32f24_l_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32m_i: +return {Intrinsic::xtensa_ae_s32m_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32m_iu: +return {Intrinsic::xtensa_ae_s32m_iu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32m_x: +return {Intrinsic::xtensa_ae_s32m_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32m_xc: +return {Intrinsic::xtensa_ae_s32m_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32m_xu: +return {Intrinsic::xtensa_ae_s32m_xu, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_i: +return {Intrinsic::xtensa_ae_s32ra64s_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_ip: +return {Intrinsic::xtensa_ae_s32ra64s_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_x: +return {Intrinsic::xtensa_ae_s32ra64s_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_xc: +return {Intrinsic::xtensa_ae_s32ra64s_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32ra64s_xp: +return {Intrinsic::xtensa_ae_s32ra64s_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_i: +return {Intrinsic::xtensa_ae_s32x2_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_ip: +return {Intrinsic::xtensa_ae_s32x2_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_ric: +return {Intrinsic::xtensa_ae_s32x2_ric, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_rip: +return {Intrinsic::xtensa_ae_s32x2_rip, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_x: +return {Intrinsic::xtensa_ae_s32x2_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_xc: +return {Intrinsic::xtensa_ae_s32x2_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2_xp: +return {Intrinsic::xtensa_ae_s32x2_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_i: +return {Intrinsic::xtensa_ae_s32x2f24_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_ip: +return {Intrinsic::xtensa_ae_s32x2f24_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_ric: +return {Intrinsic::xtensa_ae_s32x2f24_ric, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_rip: +return {Intrinsic::xtensa_ae_s32x2f24_rip, 0, 0x10200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_x: +return {Intrinsic::xtensa_ae_s32x2f24_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_xc: +return {Intrinsic::xtensa_ae_s32x2f24_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2f24_xp: +return {Intrinsic::xtensa_ae_s32x2f24_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s32x2ra64s_ip: +return {Intrinsic::xtensa_ae_s32x2ra64s_ip, 0, 0x30400}; +case Xtensa::BI__builtin_xtensa_ae_s64_i: +return {Intrinsic::xtensa_ae_s64_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s64_ip: +return {Intrinsic::xtensa_ae_s64_ip, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s64_x: +return {Intrinsic::xtensa_ae_s64_x, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_s64_xc: +return {Intrinsic::xtensa_ae_s64_xc, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_s64_xp: +return {Intrinsic::xtensa_ae_s64_xp, 0, 0x50200}; +case Xtensa::BI__builtin_xtensa_ae_sa16x4_ic: +return {Intrinsic::xtensa_ae_sa16x4_ic, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa16x4_ip: +return {Intrinsic::xtensa_ae_sa16x4_ip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa16x4_ric: +return {Intrinsic::xtensa_ae_sa16x4_ric, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa16x4_rip: +return {Intrinsic::xtensa_ae_sa16x4_rip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24_l_ic: +return {Intrinsic::xtensa_ae_sa24_l_ic, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24_l_ip: +return {Intrinsic::xtensa_ae_sa24_l_ip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24_l_ric: +return {Intrinsic::xtensa_ae_sa24_l_ric, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24_l_rip: +return {Intrinsic::xtensa_ae_sa24_l_rip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24x2_ic: +return {Intrinsic::xtensa_ae_sa24x2_ic, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24x2_ip: +return {Intrinsic::xtensa_ae_sa24x2_ip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24x2_ric: +return {Intrinsic::xtensa_ae_sa24x2_ric, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa24x2_rip: +return {Intrinsic::xtensa_ae_sa24x2_rip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2_ic: +return {Intrinsic::xtensa_ae_sa32x2_ic, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2_ip: +return {Intrinsic::xtensa_ae_sa32x2_ip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2_ric: +return {Intrinsic::xtensa_ae_sa32x2_ric, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2_rip: +return {Intrinsic::xtensa_ae_sa32x2_rip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2f24_ic: +return {Intrinsic::xtensa_ae_sa32x2f24_ic, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2f24_ip: +return {Intrinsic::xtensa_ae_sa32x2f24_ip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2f24_ric: +return {Intrinsic::xtensa_ae_sa32x2f24_ric, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa32x2f24_rip: +return {Intrinsic::xtensa_ae_sa32x2f24_rip, 0, 0x10600}; +case Xtensa::BI__builtin_xtensa_ae_sa64neg_fp: +return {Intrinsic::xtensa_ae_sa64neg_fp, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_sa64pos_fp: +return {Intrinsic::xtensa_ae_sa64pos_fp, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_salign64_i: +return {Intrinsic::xtensa_ae_salign64_i, 0, 0x70000}; +case Xtensa::BI__builtin_xtensa_ae_sat16x4: +return {Intrinsic::xtensa_ae_sat16x4, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sat24s: +return {Intrinsic::xtensa_ae_sat24s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sat48s: +return {Intrinsic::xtensa_ae_sat48s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_satq56s: +return {Intrinsic::xtensa_ae_satq56s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sb: +return {Intrinsic::xtensa_ae_sb, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_sb_ic: +return {Intrinsic::xtensa_ae_sb_ic, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_sb_ip: +return {Intrinsic::xtensa_ae_sb_ip, 0, 0x20100}; +case Xtensa::BI__builtin_xtensa_ae_sbf: +return {Intrinsic::xtensa_ae_sbf, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_sbf_ic: +return {Intrinsic::xtensa_ae_sbf_ic, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_sbf_ip: +return {Intrinsic::xtensa_ae_sbf_ip, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_sbi: +return {Intrinsic::xtensa_ae_sbi, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_sbi_ic: +return {Intrinsic::xtensa_ae_sbi_ic, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_sbi_ip: +return {Intrinsic::xtensa_ae_sbi_ip, 0, 0x60100}; +case Xtensa::BI__builtin_xtensa_ae_sel16i: +return {Intrinsic::xtensa_ae_sel16i, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_sel16i_n: +return {Intrinsic::xtensa_ae_sel16i_n, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_sext32: +return {Intrinsic::xtensa_ae_sext32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sext32x2d16_10: +return {Intrinsic::xtensa_ae_sext32x2d16_10, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sext32x2d16_32: +return {Intrinsic::xtensa_ae_sext32x2d16_32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sha32: +return {Intrinsic::xtensa_ae_sha32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_shortswap: +return {Intrinsic::xtensa_ae_shortswap, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slaa16s: +return {Intrinsic::xtensa_ae_slaa16s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaa32: +return {Intrinsic::xtensa_ae_slaa32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaa32s: +return {Intrinsic::xtensa_ae_slaa32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaa64: +return {Intrinsic::xtensa_ae_slaa64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaa64s: +return {Intrinsic::xtensa_ae_slaa64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaaq56: +return {Intrinsic::xtensa_ae_slaaq56, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai16s: +return {Intrinsic::xtensa_ae_slai16s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai24: +return {Intrinsic::xtensa_ae_slai24, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai24s: +return {Intrinsic::xtensa_ae_slai24s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai32: +return {Intrinsic::xtensa_ae_slai32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai32s: +return {Intrinsic::xtensa_ae_slai32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai64: +return {Intrinsic::xtensa_ae_slai64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slai64s: +return {Intrinsic::xtensa_ae_slai64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slaisq56s: +return {Intrinsic::xtensa_ae_slaisq56s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_slas24: +return {Intrinsic::xtensa_ae_slas24, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slas24s: +return {Intrinsic::xtensa_ae_slas24s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slas32: +return {Intrinsic::xtensa_ae_slas32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slas32s: +return {Intrinsic::xtensa_ae_slas32s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slas64: +return {Intrinsic::xtensa_ae_slas64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slas64s: +return {Intrinsic::xtensa_ae_slas64s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slasq56: +return {Intrinsic::xtensa_ae_slasq56, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_slassq56s: +return {Intrinsic::xtensa_ae_slassq56s, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sra64_32: +return {Intrinsic::xtensa_ae_sra64_32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa16rs: +return {Intrinsic::xtensa_ae_sraa16rs, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa16s: +return {Intrinsic::xtensa_ae_sraa16s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa32: +return {Intrinsic::xtensa_ae_sraa32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa32rs: +return {Intrinsic::xtensa_ae_sraa32rs, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa32s: +return {Intrinsic::xtensa_ae_sraa32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sraa64: +return {Intrinsic::xtensa_ae_sraa64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai16: +return {Intrinsic::xtensa_ae_srai16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai16r: +return {Intrinsic::xtensa_ae_srai16r, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai24: +return {Intrinsic::xtensa_ae_srai24, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai32: +return {Intrinsic::xtensa_ae_srai32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai32r: +return {Intrinsic::xtensa_ae_srai32r, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srai64: +return {Intrinsic::xtensa_ae_srai64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sras24: +return {Intrinsic::xtensa_ae_sras24, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sras32: +return {Intrinsic::xtensa_ae_sras32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sras64: +return {Intrinsic::xtensa_ae_sras64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_srla32: +return {Intrinsic::xtensa_ae_srla32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srla64: +return {Intrinsic::xtensa_ae_srla64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srli24: +return {Intrinsic::xtensa_ae_srli24, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srli32: +return {Intrinsic::xtensa_ae_srli32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srli64: +return {Intrinsic::xtensa_ae_srli64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_srls24: +return {Intrinsic::xtensa_ae_srls24, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_srls32: +return {Intrinsic::xtensa_ae_srls32, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_srls64: +return {Intrinsic::xtensa_ae_srls64, 1, 0x20001}; +case Xtensa::BI__builtin_xtensa_ae_sub16: +return {Intrinsic::xtensa_ae_sub16, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub16s: +return {Intrinsic::xtensa_ae_sub16s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub24s: +return {Intrinsic::xtensa_ae_sub24s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub32: +return {Intrinsic::xtensa_ae_sub32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub32s: +return {Intrinsic::xtensa_ae_sub32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub64: +return {Intrinsic::xtensa_ae_sub64, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_sub64s: +return {Intrinsic::xtensa_ae_sub64s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_subadd32: +return {Intrinsic::xtensa_ae_subadd32, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_subadd32s: +return {Intrinsic::xtensa_ae_subadd32s, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_trunca32f64s_l: +return {Intrinsic::xtensa_ae_trunca32f64s_l, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_trunca32x2f64s: +return {Intrinsic::xtensa_ae_trunca32x2f64s, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_trunci32f64s_l: +return {Intrinsic::xtensa_ae_trunci32f64s_l, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_trunci32x2f64s: +return {Intrinsic::xtensa_ae_trunci32x2f64s, 1, 0xe0001}; +case Xtensa::BI__builtin_xtensa_ae_vldl16c: +return {Intrinsic::xtensa_ae_vldl16c, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_vldl16c_ic: +return {Intrinsic::xtensa_ae_vldl16c_ic, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_vldl16c_ip: +return {Intrinsic::xtensa_ae_vldl16c_ip, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_vldl16t: +return {Intrinsic::xtensa_ae_vldl16t, 0, 0x40003}; +case Xtensa::BI__builtin_xtensa_ae_vldl32t: +return {Intrinsic::xtensa_ae_vldl32t, 0, 0x40003}; +case Xtensa::BI__builtin_xtensa_ae_vldsht: +return {Intrinsic::xtensa_ae_vldsht, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_ae_vlel16t: +return {Intrinsic::xtensa_ae_vlel16t, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_vlel32t: +return {Intrinsic::xtensa_ae_vlel32t, 0, 0x40201}; +case Xtensa::BI__builtin_xtensa_ae_vles16c: +return {Intrinsic::xtensa_ae_vles16c, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_vles16c_ic: +return {Intrinsic::xtensa_ae_vles16c_ic, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_vles16c_ip: +return {Intrinsic::xtensa_ae_vles16c_ip, 0, 0x100}; +case Xtensa::BI__builtin_xtensa_ae_xor: +return {Intrinsic::xtensa_ae_xor, 1, 0x60001}; +case Xtensa::BI__builtin_xtensa_ae_zalign64: +return {Intrinsic::xtensa_ae_zalign64, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_bithead: +return {Intrinsic::xtensa_rur_ae_bithead, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_bitptr: +return {Intrinsic::xtensa_rur_ae_bitptr, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_bitsused: +return {Intrinsic::xtensa_rur_ae_bitsused, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_cbegin0: +return {Intrinsic::xtensa_rur_ae_cbegin0, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_cend0: +return {Intrinsic::xtensa_rur_ae_cend0, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_cw_sd_no: +return {Intrinsic::xtensa_rur_ae_cw_sd_no, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_cwrap: +return {Intrinsic::xtensa_rur_ae_cwrap, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_first_ts: +return {Intrinsic::xtensa_rur_ae_first_ts, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_nextoffset: +return {Intrinsic::xtensa_rur_ae_nextoffset, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_overflow: +return {Intrinsic::xtensa_rur_ae_overflow, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_ovf_sar: +return {Intrinsic::xtensa_rur_ae_ovf_sar, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_sar: +return {Intrinsic::xtensa_rur_ae_sar, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_searchdone: +return {Intrinsic::xtensa_rur_ae_searchdone, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_tablesize: +return {Intrinsic::xtensa_rur_ae_tablesize, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_rur_ae_ts_fts_bu_bp: +return {Intrinsic::xtensa_rur_ae_ts_fts_bu_bp, 1, 0x1}; +case Xtensa::BI__builtin_xtensa_wur_ae_bithead: +return {Intrinsic::xtensa_wur_ae_bithead, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_bitptr: +return {Intrinsic::xtensa_wur_ae_bitptr, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_bitsused: +return {Intrinsic::xtensa_wur_ae_bitsused, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_cbegin0: +return {Intrinsic::xtensa_wur_ae_cbegin0, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_cend0: +return {Intrinsic::xtensa_wur_ae_cend0, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_cw_sd_no: +return {Intrinsic::xtensa_wur_ae_cw_sd_no, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_cwrap: +return {Intrinsic::xtensa_wur_ae_cwrap, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_first_ts: +return {Intrinsic::xtensa_wur_ae_first_ts, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_nextoffset: +return {Intrinsic::xtensa_wur_ae_nextoffset, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_overflow: +return {Intrinsic::xtensa_wur_ae_overflow, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_ovf_sar: +return {Intrinsic::xtensa_wur_ae_ovf_sar, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_sar: +return {Intrinsic::xtensa_wur_ae_sar, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_searchdone: +return {Intrinsic::xtensa_wur_ae_searchdone, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_tablesize: +return {Intrinsic::xtensa_wur_ae_tablesize, 0, 0x10000}; +case Xtensa::BI__builtin_xtensa_wur_ae_ts_fts_bu_bp: +return {Intrinsic::xtensa_wur_ae_ts_fts_bu_bp, 0, 0x10000}; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index bf36a06f27062..cad709d7f041c 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -22085,45 +22085,193 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID, return Builder.CreateCall(F, Ops, ""); } -llvm::Value * -CodeGenFunction::EmitXtensaBuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch) { - +struct XtensaIntrinsicInfo { unsigned IntrinsicID; + unsigned Kind; + unsigned Arg; +}; + +static XtensaIntrinsicInfo GetXtensaIntrinsic(unsigned BuiltinID) { switch (BuiltinID) { - case Xtensa::BI__builtin_xtensa_xt_lsxp: - IntrinsicID = Intrinsic::xtensa_xt_lsxp; - break; case Xtensa::BI__builtin_xtensa_xt_lsip: - IntrinsicID = Intrinsic::xtensa_xt_lsip; - break; + return {Intrinsic::xtensa_xt_lsip, 2, 0x20100}; + case Xtensa::BI__builtin_xtensa_xt_lsxp: + return {Intrinsic::xtensa_xt_lsxp, 2, 0x20100}; +#include "clang/Basic/XtensaBuiltins.inc" default: llvm_unreachable("unexpected builtin ID"); } +} + +llvm::Value *CodeGenFunction::ConvertXtensaToC(Value *val, + llvm::Type *destType) { + Value *argCast; + llvm::Type *valType = val->getType(); + + if (valType != destType) { // i32 to C short or char + argCast = Builder.CreateTruncOrBitCast(val, destType, "cast"); + return argCast; + } else { + return val; + } +} + +llvm::Value *CodeGenFunction::ConvertXtensaToBc(const Expr *ArgExpr, + llvm::Type *destType) { + + Value *ArgVal = EmitScalarExpr(ArgExpr); + Value *ArgCast = ArgVal; + llvm::Type *ArgType = ArgVal->getType(); + bool sign = ArgExpr->getType()->isSignedIntegerType(); + + if (ArgType != destType) { // short,char + if (sign) + ArgCast = Builder.CreateSExtOrBitCast(ArgVal, destType, "cast"); + else + ArgCast = Builder.CreateZExtOrBitCast(ArgVal, destType, "cast"); + } + return ArgCast; +} + +llvm::Value * +CodeGenFunction::EmitXtensaBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { - llvm::Function *F = CGM.getIntrinsic(IntrinsicID); - // 1st argument is passed by pointer - /* float lsip(float **a, int off) => float p = *a - ret, p' = @int.xtensa.lsip(p, off) - *a = p' - */ - auto InoutPtrTy = F->getArg(0)->getType()->getPointerTo(); - Address InoutPtrAddr = Builder.CreateElementBitCast( - EmitPointerWithAlignment(E->getArg(0)), InoutPtrTy); - - unsigned NumArgs = E->getNumArgs(); - Value *InoutVal = Builder.CreateLoad(InoutPtrAddr); - SmallVector Args; - - Args.push_back(InoutVal); - for (unsigned i = 1; i < NumArgs; i++) - Args.push_back(EmitScalarExpr(E->getArg(i))); - - Value *Val = Builder.CreateCall(F, Args, "retval"); - Value *Val0 = Builder.CreateExtractValue(Val, 0); - Value *Val1 = Builder.CreateExtractValue(Val, 1); - // ret store - Builder.CreateStore(Val1, InoutPtrAddr); - return Val0; + XtensaIntrinsicInfo Info = GetXtensaIntrinsic(BuiltinID); + unsigned Intrinsic = Info.IntrinsicID; + + llvm::Function *F = CGM.getIntrinsic(Intrinsic); + + switch (Info.Kind) { + case 0: { + // void case + // + // void builtin(t1 *out /*out*/,..,t2 *inout, ..., t3 in, ..,) => + // load t2 inout, ... + // {t1 out1, ..., t2 inout, ... ,} = func(t2 inout, ..., t3 in, ...) + // store (extractvalue 0) t1, .. + + SmallVector Out; + SmallVector Inout; + SmallVector In; + SmallVector OutAddr; + + unsigned Code = Info.Arg; + unsigned CodeOut = Code & 0xff; + unsigned CodeInout = (Code >> 8) & 0xff; + unsigned CodeIn = (Code >> 16) & 0xff; + + for (unsigned i = 0; i < 8; ++i) { + if (CodeOut & (1 << i)) + Out.push_back(i); + if (CodeInout & (1 << i)) + Inout.push_back(i); + if (CodeIn & (1 << i)) + In.push_back(i); + } + + size_t asize = Inout.size() + In.size(); + SmallVector Args(asize, nullptr); + assert(Args.size() == asize); + + for (uint8_t idx : In) { + unsigned funArg = idx - Out.size(); + llvm::Type *destType = F->getArg(funArg)->getType(); + Args[funArg] = ConvertXtensaToBc(E->getArg(idx), destType); + } + + for (unsigned i = 0; i < Out.size(); ++i) { + unsigned idx = Out[i]; + Address AIn = EmitPointerWithAlignment(E->getArg(idx)); + Address AOut = AIn; + OutAddr.push_back(AOut); + } + + for (uint8_t idx : Inout) { + uint8_t FIdx = idx - Out.size(); + Address AIn = EmitPointerWithAlignment(E->getArg(idx)); + Address AOut = AIn; + OutAddr.push_back(AOut); + Value *Ptr = Builder.CreateLoad(AOut); + Args[FIdx] = Ptr; + } + + for (auto a : Args) + assert(a != nullptr); + + Value *Val = Builder.CreateCall(F, Args); + Value *Val0 = nullptr; + // check if out is a struct + if ((OutAddr.size() > 1)) { + for (unsigned i = 0; i < OutAddr.size(); ++i) { + Value *Out = Builder.CreateExtractValue(Val, i); + if (!Val0) // return the first value + Val0 = Out; + Address Addr = OutAddr[i]; + llvm::Type *DestType = Addr.getElementType(); + Value *OutConv = ConvertXtensaToC(Out, DestType); + Builder.CreateStore(OutConv, Addr); + } + } else if (OutAddr.size() == 1) { + Builder.CreateStore(Val, OutAddr[0]); + Val0 = Val; + } + assert(Val0); + return Val0; + } + case 1: { + // t_out bultin(t1 in1, t2 in2, ...) => + // t_out out1 = BcToXt( func(XtToBc(t1), XtToBc(t2), ...) ) + unsigned Code = Info.Arg; + uint8_t CodeOut = Code & 0xff; + uint8_t CodeInout = (Code >> 8) & 0xff; + uint8_t CodeIn = (Code >> 16) & 0xff; + + SmallVector In; + + assert(CodeOut == 1 && CodeInout == 0 && "Invalid signature"); + for (unsigned i = 0; i < 8; ++i) { + if (CodeIn & (1 << i)) + In.push_back(i); + } + SmallVector Args(In.size(), nullptr); + for (uint8_t idx : In) { + uint8_t aIdx = idx - 1; + llvm::Type *destType = F->getArg(aIdx)->getType(); + Args[aIdx] = ConvertXtensaToBc(E->getArg(aIdx), destType); + } + Value *Val = Builder.CreateCall(F, Args, "retval"); + llvm::Type *ResultType = ConvertType(E->getType()); + Value *ValConv = ConvertXtensaToC(Val, ResultType); + return ValConv; + } + case 2: { + // 1st argument is passed by pointer + /* float lsip(float **a, int off) => float p = *a + ret, p' = @int.xtensa.lsip(p, off) + *a = p' + */ + auto InoutPtrTy = F->getArg(0)->getType()->getPointerTo(); + Address InoutPtrAddr = EmitPointerWithAlignment(E->getArg(0)) + .withElementType(InoutPtrTy); + + unsigned NumArgs = E->getNumArgs(); + Value *InoutVal = Builder.CreateLoad(InoutPtrAddr); + SmallVector Args; + + Args.push_back(InoutVal); + for (unsigned i = 1; i < NumArgs; i++) + Args.push_back(EmitScalarExpr(E->getArg(i))); + + Value *Val = Builder.CreateCall(F, Args, "retval"); + Value *Val0 = Builder.CreateExtractValue(Val, 0); + Value *Val1 = Builder.CreateExtractValue(Val, 1); + // ret store + Builder.CreateStore(Val1, InoutPtrAddr); + return Val0; + } + default: + llvm_unreachable("unknown intrinsic kind"); + } } diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index b43346e5c37d1..de812830d395d 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4801,6 +4801,8 @@ class CodeGenFunction : public CodeGenTypeCache { /// Emits a reference binding to the passed in expression. RValue EmitReferenceBindingToExpr(const Expr *E); + llvm::Value *ConvertXtensaToBc(const Expr *Arg, llvm::Type *destType); + llvm::Value *ConvertXtensaToC(llvm::Value *arg, llvm::Type *destType); llvm::Value *EmitXtensaBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch); diff --git a/clang/test/CodeGen/Xtensa/xtensa-hifi-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-hifi-intrinsics.c new file mode 100644 index 0000000000000..13b07f3a5cc74 --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-hifi-intrinsics.c @@ -0,0 +1,21408 @@ +// RUN: split-file %s %t +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs16s.c | FileCheck %t/ae_abs16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs24s.c | FileCheck %t/ae_abs24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs32.c | FileCheck %t/ae_abs32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs32s.c | FileCheck %t/ae_abs32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs64.c | FileCheck %t/ae_abs64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_abs64s.c | FileCheck %t/ae_abs64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add16.c | FileCheck %t/ae_add16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add16s.c | FileCheck %t/ae_add16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add24s.c | FileCheck %t/ae_add24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add32.c | FileCheck %t/ae_add32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add32_hl_lh.c | FileCheck %t/ae_add32_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add32s.c | FileCheck %t/ae_add32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add64.c | FileCheck %t/ae_add64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_add64s.c | FileCheck %t/ae_add64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_addbrba32.c | FileCheck %t/ae_addbrba32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_addsub32.c | FileCheck %t/ae_addsub32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_addsub32s.c | FileCheck %t/ae_addsub32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_and.c | FileCheck %t/ae_and.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvt32x2f16_10.c | FileCheck %t/ae_cvt32x2f16_10.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvt32x2f16_32.c | FileCheck %t/ae_cvt32x2f16_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvt48a32.c | FileCheck %t/ae_cvt48a32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvt64a32.c | FileCheck %t/ae_cvt64a32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvt64f32_h.c | FileCheck %t/ae_cvt64f32_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvta32f24s_h.c | FileCheck %t/ae_cvta32f24s_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvta32f24s_l.c | FileCheck %t/ae_cvta32f24s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvtq56a32s.c | FileCheck %t/ae_cvtq56a32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvtq56p32s_h.c | FileCheck %t/ae_cvtq56p32s_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_cvtq56p32s_l.c | FileCheck %t/ae_cvtq56p32s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_db.c | FileCheck %t/ae_db.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_db_ic.c | FileCheck %t/ae_db_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_db_ip.c | FileCheck %t/ae_db_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_dbi.c | FileCheck %t/ae_dbi.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_dbi_ic.c | FileCheck %t/ae_dbi_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_dbi_ip.c | FileCheck %t/ae_dbi_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_div64d32_h.c | FileCheck %t/ae_div64d32_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_div64d32_l.c | FileCheck %t/ae_div64d32_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_eq16.c | FileCheck %t/ae_eq16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_eq32.c | FileCheck %t/ae_eq32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_eq64.c | FileCheck %t/ae_eq64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16_i.c | FileCheck %t/ae_l16_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16_ip.c | FileCheck %t/ae_l16_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16_x.c | FileCheck %t/ae_l16_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16_xc.c | FileCheck %t/ae_l16_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16_xp.c | FileCheck %t/ae_l16_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16m_i.c | FileCheck %t/ae_l16m_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16m_iu.c | FileCheck %t/ae_l16m_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16m_x.c | FileCheck %t/ae_l16m_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16m_xc.c | FileCheck %t/ae_l16m_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16m_xu.c | FileCheck %t/ae_l16m_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x2m_i.c | FileCheck %t/ae_l16x2m_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x2m_iu.c | FileCheck %t/ae_l16x2m_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x2m_x.c | FileCheck %t/ae_l16x2m_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x2m_xc.c | FileCheck %t/ae_l16x2m_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x2m_xu.c | FileCheck %t/ae_l16x2m_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_i.c | FileCheck %t/ae_l16x4_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_ip.c | FileCheck %t/ae_l16x4_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_ric.c | FileCheck %t/ae_l16x4_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_rip.c | FileCheck %t/ae_l16x4_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_x.c | FileCheck %t/ae_l16x4_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_xc.c | FileCheck %t/ae_l16x4_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l16x4_xp.c | FileCheck %t/ae_l16x4_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32_i.c | FileCheck %t/ae_l32_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32_ip.c | FileCheck %t/ae_l32_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32_x.c | FileCheck %t/ae_l32_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32_xc.c | FileCheck %t/ae_l32_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32_xp.c | FileCheck %t/ae_l32_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32f24_i.c | FileCheck %t/ae_l32f24_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32f24_ip.c | FileCheck %t/ae_l32f24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32f24_x.c | FileCheck %t/ae_l32f24_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32f24_xc.c | FileCheck %t/ae_l32f24_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32f24_xp.c | FileCheck %t/ae_l32f24_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32m_i.c | FileCheck %t/ae_l32m_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32m_iu.c | FileCheck %t/ae_l32m_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32m_x.c | FileCheck %t/ae_l32m_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32m_xc.c | FileCheck %t/ae_l32m_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32m_xu.c | FileCheck %t/ae_l32m_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_i.c | FileCheck %t/ae_l32x2_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_ip.c | FileCheck %t/ae_l32x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_ric.c | FileCheck %t/ae_l32x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_rip.c | FileCheck %t/ae_l32x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_x.c | FileCheck %t/ae_l32x2_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_xc.c | FileCheck %t/ae_l32x2_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2_xp.c | FileCheck %t/ae_l32x2_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_i.c | FileCheck %t/ae_l32x2f24_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_ip.c | FileCheck %t/ae_l32x2f24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_ric.c | FileCheck %t/ae_l32x2f24_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_rip.c | FileCheck %t/ae_l32x2f24_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_x.c | FileCheck %t/ae_l32x2f24_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_xc.c | FileCheck %t/ae_l32x2f24_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l32x2f24_xp.c | FileCheck %t/ae_l32x2f24_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l64_i.c | FileCheck %t/ae_l64_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l64_ip.c | FileCheck %t/ae_l64_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l64_x.c | FileCheck %t/ae_l64_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l64_xc.c | FileCheck %t/ae_l64_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_l64_xp.c | FileCheck %t/ae_l64_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4_ic.c | FileCheck %t/ae_la16x4_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4_ip.c | FileCheck %t/ae_la16x4_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4_ric.c | FileCheck %t/ae_la16x4_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4_rip.c | FileCheck %t/ae_la16x4_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4neg_pc.c | FileCheck %t/ae_la16x4neg_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la16x4pos_pc.c | FileCheck %t/ae_la16x4pos_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24_ic.c | FileCheck %t/ae_la24_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24_ip.c | FileCheck %t/ae_la24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24_ric.c | FileCheck %t/ae_la24_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24_rip.c | FileCheck %t/ae_la24_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24neg_pc.c | FileCheck %t/ae_la24neg_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24pos_pc.c | FileCheck %t/ae_la24pos_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2_ic.c | FileCheck %t/ae_la24x2_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2_ip.c | FileCheck %t/ae_la24x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2_ric.c | FileCheck %t/ae_la24x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2_rip.c | FileCheck %t/ae_la24x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2neg_pc.c | FileCheck %t/ae_la24x2neg_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la24x2pos_pc.c | FileCheck %t/ae_la24x2pos_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2_ic.c | FileCheck %t/ae_la32x2_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2_ip.c | FileCheck %t/ae_la32x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2_ric.c | FileCheck %t/ae_la32x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2_rip.c | FileCheck %t/ae_la32x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2f24_ic.c | FileCheck %t/ae_la32x2f24_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2f24_ip.c | FileCheck %t/ae_la32x2f24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2f24_ric.c | FileCheck %t/ae_la32x2f24_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2f24_rip.c | FileCheck %t/ae_la32x2f24_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2neg_pc.c | FileCheck %t/ae_la32x2neg_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la32x2pos_pc.c | FileCheck %t/ae_la32x2pos_pc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_la64_pp.c | FileCheck %t/ae_la64_pp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lalign64_i.c | FileCheck %t/ae_lalign64_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lb.c | FileCheck %t/ae_lb.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lbi.c | FileCheck %t/ae_lbi.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lbk.c | FileCheck %t/ae_lbk.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lbki.c | FileCheck %t/ae_lbki.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lbs.c | FileCheck %t/ae_lbs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lbsi.c | FileCheck %t/ae_lbsi.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_le16.c | FileCheck %t/ae_le16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_le32.c | FileCheck %t/ae_le32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_le64.c | FileCheck %t/ae_le64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lt16.c | FileCheck %t/ae_lt16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lt32.c | FileCheck %t/ae_lt32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_lt64.c | FileCheck %t/ae_lt64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_max32.c | FileCheck %t/ae_max32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_max64.c | FileCheck %t/ae_max64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_maxabs32s.c | FileCheck %t/ae_maxabs32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_maxabs64s.c | FileCheck %t/ae_maxabs64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_min32.c | FileCheck %t/ae_min32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_min64.c | FileCheck %t/ae_min64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_minabs32s.c | FileCheck %t/ae_minabs32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_minabs64s.c | FileCheck %t/ae_minabs64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mov.c | FileCheck %t/ae_mov.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad16_0.c | FileCheck %t/ae_movad16_0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad16_1.c | FileCheck %t/ae_movad16_1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad16_2.c | FileCheck %t/ae_movad16_2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad16_3.c | FileCheck %t/ae_movad16_3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad32_h.c | FileCheck %t/ae_movad32_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movad32_l.c | FileCheck %t/ae_movad32_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movalign.c | FileCheck %t/ae_movalign.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movda16.c | FileCheck %t/ae_movda16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movda16x2.c | FileCheck %t/ae_movda16x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movda32.c | FileCheck %t/ae_movda32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movda32x2.c | FileCheck %t/ae_movda32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movf16x4.c | FileCheck %t/ae_movf16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movf32x2.c | FileCheck %t/ae_movf32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movf64.c | FileCheck %t/ae_movf64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movi.c | FileCheck %t/ae_movi.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movt16x4.c | FileCheck %t/ae_movt16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movt32x2.c | FileCheck %t/ae_movt32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_movt64.c | FileCheck %t/ae_movt64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul16x4.c | FileCheck %t/ae_mul16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32_hh.c | FileCheck %t/ae_mul32_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32_lh.c | FileCheck %t/ae_mul32_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32_ll.c | FileCheck %t/ae_mul32_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32_ll_s2.c | FileCheck %t/ae_mul32_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32u_ll.c | FileCheck %t/ae_mul32u_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h0.c | FileCheck %t/ae_mul32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h0_s2.c | FileCheck %t/ae_mul32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h1.c | FileCheck %t/ae_mul32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h1_s2.c | FileCheck %t/ae_mul32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h2.c | FileCheck %t/ae_mul32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h2_s2.c | FileCheck %t/ae_mul32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h3.c | FileCheck %t/ae_mul32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_h3_s2.c | FileCheck %t/ae_mul32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l0.c | FileCheck %t/ae_mul32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l0_s2.c | FileCheck %t/ae_mul32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l1.c | FileCheck %t/ae_mul32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l1_s2.c | FileCheck %t/ae_mul32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l2.c | FileCheck %t/ae_mul32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l2_s2.c | FileCheck %t/ae_mul32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l3.c | FileCheck %t/ae_mul32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mul32x16_l3_s2.c | FileCheck %t/ae_mul32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula16x4.c | FileCheck %t/ae_mula16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32_hh.c | FileCheck %t/ae_mula32_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32_lh.c | FileCheck %t/ae_mula32_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32_ll.c | FileCheck %t/ae_mula32_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32_ll_s2.c | FileCheck %t/ae_mula32_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32u_ll.c | FileCheck %t/ae_mula32u_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h0.c | FileCheck %t/ae_mula32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h0_s2.c | FileCheck %t/ae_mula32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h1.c | FileCheck %t/ae_mula32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h1_s2.c | FileCheck %t/ae_mula32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h2.c | FileCheck %t/ae_mula32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h2_s2.c | FileCheck %t/ae_mula32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h3.c | FileCheck %t/ae_mula32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_h3_s2.c | FileCheck %t/ae_mula32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l0.c | FileCheck %t/ae_mula32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l0_s2.c | FileCheck %t/ae_mula32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l1.c | FileCheck %t/ae_mula32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l1_s2.c | FileCheck %t/ae_mula32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l2.c | FileCheck %t/ae_mula32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l2_s2.c | FileCheck %t/ae_mula32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l3.c | FileCheck %t/ae_mula32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mula32x16_l3_s2.c | FileCheck %t/ae_mula32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad24_hh_ll.c | FileCheck %t/ae_mulaad24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad24_hh_ll_s2.c | FileCheck %t/ae_mulaad24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad24_hl_lh.c | FileCheck %t/ae_mulaad24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad24_hl_lh_s2.c | FileCheck %t/ae_mulaad24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h0_l1.c | FileCheck %t/ae_mulaad32x16_h0_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h0_l1_s2.c | FileCheck %t/ae_mulaad32x16_h0_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h1_l0.c | FileCheck %t/ae_mulaad32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h1_l0_s2.c | FileCheck %t/ae_mulaad32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h2_l3.c | FileCheck %t/ae_mulaad32x16_h2_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h2_l3_s2.c | FileCheck %t/ae_mulaad32x16_h2_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h3_l2.c | FileCheck %t/ae_mulaad32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaad32x16_h3_l2_s2.c | FileCheck %t/ae_mulaad32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_11_00.c | FileCheck %t/ae_mulaafd16ss_11_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_11_00_s2.c | FileCheck %t/ae_mulaafd16ss_11_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_13_02.c | FileCheck %t/ae_mulaafd16ss_13_02.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_13_02_s2.c | FileCheck %t/ae_mulaafd16ss_13_02_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_33_22.c | FileCheck %t/ae_mulaafd16ss_33_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd16ss_33_22_s2.c | FileCheck %t/ae_mulaafd16ss_33_22_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd24_hh_ll.c | FileCheck %t/ae_mulaafd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd24_hh_ll_s2.c | FileCheck %t/ae_mulaafd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd24_hl_lh.c | FileCheck %t/ae_mulaafd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd24_hl_lh_s2.c | FileCheck %t/ae_mulaafd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h0_l1.c | FileCheck %t/ae_mulaafd32x16_h0_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h0_l1_s2.c | FileCheck %t/ae_mulaafd32x16_h0_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h1_l0.c | FileCheck %t/ae_mulaafd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h1_l0_s2.c | FileCheck %t/ae_mulaafd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h2_l3.c | FileCheck %t/ae_mulaafd32x16_h2_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h2_l3_s2.c | FileCheck %t/ae_mulaafd32x16_h2_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h3_l2.c | FileCheck %t/ae_mulaafd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaafd32x16_h3_l2_s2.c | FileCheck %t/ae_mulaafd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulac24.c | FileCheck %t/ae_mulac24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulac32x16_h.c | FileCheck %t/ae_mulac32x16_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulac32x16_l.c | FileCheck %t/ae_mulac32x16_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_00.c | FileCheck %t/ae_mulaf16ss_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_00_s2.c | FileCheck %t/ae_mulaf16ss_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_10.c | FileCheck %t/ae_mulaf16ss_10.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_11.c | FileCheck %t/ae_mulaf16ss_11.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_20.c | FileCheck %t/ae_mulaf16ss_20.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_21.c | FileCheck %t/ae_mulaf16ss_21.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_22.c | FileCheck %t/ae_mulaf16ss_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_30.c | FileCheck %t/ae_mulaf16ss_30.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_31.c | FileCheck %t/ae_mulaf16ss_31.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_32.c | FileCheck %t/ae_mulaf16ss_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16ss_33.c | FileCheck %t/ae_mulaf16ss_33.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf16x4ss.c | FileCheck %t/ae_mulaf16x4ss.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32r_hh.c | FileCheck %t/ae_mulaf32r_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32r_lh.c | FileCheck %t/ae_mulaf32r_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32r_ll.c | FileCheck %t/ae_mulaf32r_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32r_ll_s2.c | FileCheck %t/ae_mulaf32r_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32s_hh.c | FileCheck %t/ae_mulaf32s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32s_lh.c | FileCheck %t/ae_mulaf32s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32s_ll.c | FileCheck %t/ae_mulaf32s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32s_ll_s2.c | FileCheck %t/ae_mulaf32s_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h0.c | FileCheck %t/ae_mulaf32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h0_s2.c | FileCheck %t/ae_mulaf32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h1.c | FileCheck %t/ae_mulaf32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h1_s2.c | FileCheck %t/ae_mulaf32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h2.c | FileCheck %t/ae_mulaf32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h2_s2.c | FileCheck %t/ae_mulaf32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h3.c | FileCheck %t/ae_mulaf32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_h3_s2.c | FileCheck %t/ae_mulaf32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l0.c | FileCheck %t/ae_mulaf32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l0_s2.c | FileCheck %t/ae_mulaf32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l1.c | FileCheck %t/ae_mulaf32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l1_s2.c | FileCheck %t/ae_mulaf32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l2.c | FileCheck %t/ae_mulaf32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l2_s2.c | FileCheck %t/ae_mulaf32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l3.c | FileCheck %t/ae_mulaf32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf32x16_l3_s2.c | FileCheck %t/ae_mulaf32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf48q32sp16s_l.c | FileCheck %t/ae_mulaf48q32sp16s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf48q32sp16s_l_s2.c | FileCheck %t/ae_mulaf48q32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf48q32sp16u_l.c | FileCheck %t/ae_mulaf48q32sp16u_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaf48q32sp16u_l_s2.c | FileCheck %t/ae_mulaf48q32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafc24ra.c | FileCheck %t/ae_mulafc24ra.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafc32x16ras_h.c | FileCheck %t/ae_mulafc32x16ras_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafc32x16ras_l.c | FileCheck %t/ae_mulafc32x16ras_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd24x2_fir_h.c | FileCheck %t/ae_mulafd24x2_fir_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd24x2_fir_l.c | FileCheck %t/ae_mulafd24x2_fir_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd32x16x2_fir_hh.c | FileCheck %t/ae_mulafd32x16x2_fir_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd32x16x2_fir_hl.c | FileCheck %t/ae_mulafd32x16x2_fir_hl.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd32x16x2_fir_lh.c | FileCheck %t/ae_mulafd32x16x2_fir_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafd32x16x2_fir_ll.c | FileCheck %t/ae_mulafd32x16x2_fir_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp24x2r.c | FileCheck %t/ae_mulafp24x2r.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp24x2r_s2.c | FileCheck %t/ae_mulafp24x2r_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp24x2ra.c | FileCheck %t/ae_mulafp24x2ra.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp24x2ra_s2.c | FileCheck %t/ae_mulafp24x2ra_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2ras_h.c | FileCheck %t/ae_mulafp32x16x2ras_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2ras_h_s2.c | FileCheck %t/ae_mulafp32x16x2ras_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2ras_l.c | FileCheck %t/ae_mulafp32x16x2ras_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2ras_l_s2.c | FileCheck %t/ae_mulafp32x16x2ras_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2rs_h.c | FileCheck %t/ae_mulafp32x16x2rs_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2rs_h_s2.c | FileCheck %t/ae_mulafp32x16x2rs_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2rs_l.c | FileCheck %t/ae_mulafp32x16x2rs_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x16x2rs_l_s2.c | FileCheck %t/ae_mulafp32x16x2rs_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x2ras.c | FileCheck %t/ae_mulafp32x2ras.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafp32x2rs.c | FileCheck %t/ae_mulafp32x2rs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafq32sp24s_h_s2.c | FileCheck %t/ae_mulafq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulafq32sp24s_l_s2.c | FileCheck %t/ae_mulafq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulap24x2.c | FileCheck %t/ae_mulap24x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulap24x2_s2.c | FileCheck %t/ae_mulap24x2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulap32x16x2_h.c | FileCheck %t/ae_mulap32x16x2_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulap32x16x2_l.c | FileCheck %t/ae_mulap32x16x2_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulap32x2.c | FileCheck %t/ae_mulap32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaq32sp16s_l_s2.c | FileCheck %t/ae_mulaq32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulaq32sp16u_l_s2.c | FileCheck %t/ae_mulaq32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mularfq32sp24s_h_s2.c | FileCheck %t/ae_mularfq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mularfq32sp24s_l_s2.c | FileCheck %t/ae_mularfq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_hh.c | FileCheck %t/ae_mulas32f48p16s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_hh_s2.c | FileCheck %t/ae_mulas32f48p16s_hh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_lh.c | FileCheck %t/ae_mulas32f48p16s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_lh_s2.c | FileCheck %t/ae_mulas32f48p16s_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_ll.c | FileCheck %t/ae_mulas32f48p16s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulas32f48p16s_ll_s2.c | FileCheck %t/ae_mulas32f48p16s_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd24_hh_ll.c | FileCheck %t/ae_mulasd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd24_hh_ll_s2.c | FileCheck %t/ae_mulasd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd24_hl_lh.c | FileCheck %t/ae_mulasd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd24_hl_lh_s2.c | FileCheck %t/ae_mulasd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd32x16_h1_l0.c | FileCheck %t/ae_mulasd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd32x16_h1_l0_s2.c | FileCheck %t/ae_mulasd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd32x16_h3_l2.c | FileCheck %t/ae_mulasd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasd32x16_h3_l2_s2.c | FileCheck %t/ae_mulasd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd24_hh_ll.c | FileCheck %t/ae_mulasfd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd24_hh_ll_s2.c | FileCheck %t/ae_mulasfd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd24_hl_lh.c | FileCheck %t/ae_mulasfd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd24_hl_lh_s2.c | FileCheck %t/ae_mulasfd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd32x16_h1_l0.c | FileCheck %t/ae_mulasfd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd32x16_h1_l0_s2.c | FileCheck %t/ae_mulasfd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd32x16_h3_l2.c | FileCheck %t/ae_mulasfd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulasfd32x16_h3_l2_s2.c | FileCheck %t/ae_mulasfd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulc24.c | FileCheck %t/ae_mulc24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulc32x16_h.c | FileCheck %t/ae_mulc32x16_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulc32x16_l.c | FileCheck %t/ae_mulc32x16_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_00.c | FileCheck %t/ae_mulf16ss_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_00_s2.c | FileCheck %t/ae_mulf16ss_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_10.c | FileCheck %t/ae_mulf16ss_10.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_11.c | FileCheck %t/ae_mulf16ss_11.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_20.c | FileCheck %t/ae_mulf16ss_20.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_21.c | FileCheck %t/ae_mulf16ss_21.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_22.c | FileCheck %t/ae_mulf16ss_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_30.c | FileCheck %t/ae_mulf16ss_30.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_31.c | FileCheck %t/ae_mulf16ss_31.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_32.c | FileCheck %t/ae_mulf16ss_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16ss_33.c | FileCheck %t/ae_mulf16ss_33.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf16x4ss.c | FileCheck %t/ae_mulf16x4ss.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32r_hh.c | FileCheck %t/ae_mulf32r_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32r_lh.c | FileCheck %t/ae_mulf32r_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32r_ll.c | FileCheck %t/ae_mulf32r_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32r_ll_s2.c | FileCheck %t/ae_mulf32r_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32s_hh.c | FileCheck %t/ae_mulf32s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32s_lh.c | FileCheck %t/ae_mulf32s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32s_ll.c | FileCheck %t/ae_mulf32s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32s_ll_s2.c | FileCheck %t/ae_mulf32s_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h0.c | FileCheck %t/ae_mulf32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h0_s2.c | FileCheck %t/ae_mulf32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h1.c | FileCheck %t/ae_mulf32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h1_s2.c | FileCheck %t/ae_mulf32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h2.c | FileCheck %t/ae_mulf32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h2_s2.c | FileCheck %t/ae_mulf32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h3.c | FileCheck %t/ae_mulf32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_h3_s2.c | FileCheck %t/ae_mulf32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l0.c | FileCheck %t/ae_mulf32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l0_s2.c | FileCheck %t/ae_mulf32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l1.c | FileCheck %t/ae_mulf32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l1_s2.c | FileCheck %t/ae_mulf32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l2.c | FileCheck %t/ae_mulf32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l2_s2.c | FileCheck %t/ae_mulf32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l3.c | FileCheck %t/ae_mulf32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf32x16_l3_s2.c | FileCheck %t/ae_mulf32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf48q32sp16s_l.c | FileCheck %t/ae_mulf48q32sp16s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf48q32sp16s_l_s2.c | FileCheck %t/ae_mulf48q32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf48q32sp16u_l.c | FileCheck %t/ae_mulf48q32sp16u_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulf48q32sp16u_l_s2.c | FileCheck %t/ae_mulf48q32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfc24ra.c | FileCheck %t/ae_mulfc24ra.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfc32x16ras_h.c | FileCheck %t/ae_mulfc32x16ras_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfc32x16ras_l.c | FileCheck %t/ae_mulfc32x16ras_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd24x2_fir_h.c | FileCheck %t/ae_mulfd24x2_fir_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd24x2_fir_l.c | FileCheck %t/ae_mulfd24x2_fir_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd32x16x2_fir_hh.c | FileCheck %t/ae_mulfd32x16x2_fir_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd32x16x2_fir_hl.c | FileCheck %t/ae_mulfd32x16x2_fir_hl.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd32x16x2_fir_lh.c | FileCheck %t/ae_mulfd32x16x2_fir_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfd32x16x2_fir_ll.c | FileCheck %t/ae_mulfd32x16x2_fir_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp16x4ras.c | FileCheck %t/ae_mulfp16x4ras.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp16x4s.c | FileCheck %t/ae_mulfp16x4s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp24x2r.c | FileCheck %t/ae_mulfp24x2r.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp24x2r_s2.c | FileCheck %t/ae_mulfp24x2r_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp24x2ra.c | FileCheck %t/ae_mulfp24x2ra.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp24x2ra_s2.c | FileCheck %t/ae_mulfp24x2ra_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2ras_h.c | FileCheck %t/ae_mulfp32x16x2ras_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2ras_h_s2.c | FileCheck %t/ae_mulfp32x16x2ras_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2ras_l.c | FileCheck %t/ae_mulfp32x16x2ras_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2ras_l_s2.c | FileCheck %t/ae_mulfp32x16x2ras_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2rs_h.c | FileCheck %t/ae_mulfp32x16x2rs_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2rs_h_s2.c | FileCheck %t/ae_mulfp32x16x2rs_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2rs_l.c | FileCheck %t/ae_mulfp32x16x2rs_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x16x2rs_l_s2.c | FileCheck %t/ae_mulfp32x16x2rs_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x2ras.c | FileCheck %t/ae_mulfp32x2ras.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfp32x2rs.c | FileCheck %t/ae_mulfp32x2rs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfq32sp24s_h_s2.c | FileCheck %t/ae_mulfq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulfq32sp24s_l_s2.c | FileCheck %t/ae_mulfq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulp24x2.c | FileCheck %t/ae_mulp24x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulp24x2_s2.c | FileCheck %t/ae_mulp24x2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulp32x16x2_h.c | FileCheck %t/ae_mulp32x16x2_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulp32x16x2_l.c | FileCheck %t/ae_mulp32x16x2_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulp32x2.c | FileCheck %t/ae_mulp32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulq32sp16s_l_s2.c | FileCheck %t/ae_mulq32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulq32sp16u_l_s2.c | FileCheck %t/ae_mulq32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulrfq32sp24s_h_s2.c | FileCheck %t/ae_mulrfq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulrfq32sp24s_l_s2.c | FileCheck %t/ae_mulrfq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls16x4.c | FileCheck %t/ae_muls16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32_hh.c | FileCheck %t/ae_muls32_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32_lh.c | FileCheck %t/ae_muls32_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32_ll.c | FileCheck %t/ae_muls32_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_hh.c | FileCheck %t/ae_muls32f48p16s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_hh_s2.c | FileCheck %t/ae_muls32f48p16s_hh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_lh.c | FileCheck %t/ae_muls32f48p16s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_lh_s2.c | FileCheck %t/ae_muls32f48p16s_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_ll.c | FileCheck %t/ae_muls32f48p16s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32f48p16s_ll_s2.c | FileCheck %t/ae_muls32f48p16s_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32u_ll.c | FileCheck %t/ae_muls32u_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h0.c | FileCheck %t/ae_muls32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h0_s2.c | FileCheck %t/ae_muls32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h1.c | FileCheck %t/ae_muls32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h1_s2.c | FileCheck %t/ae_muls32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h2.c | FileCheck %t/ae_muls32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h2_s2.c | FileCheck %t/ae_muls32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h3.c | FileCheck %t/ae_muls32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_h3_s2.c | FileCheck %t/ae_muls32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l0.c | FileCheck %t/ae_muls32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l0_s2.c | FileCheck %t/ae_muls32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l1.c | FileCheck %t/ae_muls32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l1_s2.c | FileCheck %t/ae_muls32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l2.c | FileCheck %t/ae_muls32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l2_s2.c | FileCheck %t/ae_muls32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l3.c | FileCheck %t/ae_muls32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_muls32x16_l3_s2.c | FileCheck %t/ae_muls32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad24_hh_ll.c | FileCheck %t/ae_mulsad24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad24_hh_ll_s2.c | FileCheck %t/ae_mulsad24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad32x16_h1_l0.c | FileCheck %t/ae_mulsad32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad32x16_h1_l0_s2.c | FileCheck %t/ae_mulsad32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad32x16_h3_l2.c | FileCheck %t/ae_mulsad32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsad32x16_h3_l2_s2.c | FileCheck %t/ae_mulsad32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd24_hh_ll.c | FileCheck %t/ae_mulsafd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd24_hh_ll_s2.c | FileCheck %t/ae_mulsafd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd32x16_h1_l0.c | FileCheck %t/ae_mulsafd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd32x16_h1_l0_s2.c | FileCheck %t/ae_mulsafd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd32x16_h3_l2.c | FileCheck %t/ae_mulsafd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsafd32x16_h3_l2_s2.c | FileCheck %t/ae_mulsafd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_00.c | FileCheck %t/ae_mulsf16ss_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_00_s2.c | FileCheck %t/ae_mulsf16ss_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_10.c | FileCheck %t/ae_mulsf16ss_10.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_11.c | FileCheck %t/ae_mulsf16ss_11.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_20.c | FileCheck %t/ae_mulsf16ss_20.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_21.c | FileCheck %t/ae_mulsf16ss_21.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_22.c | FileCheck %t/ae_mulsf16ss_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_30.c | FileCheck %t/ae_mulsf16ss_30.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_31.c | FileCheck %t/ae_mulsf16ss_31.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_32.c | FileCheck %t/ae_mulsf16ss_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16ss_33.c | FileCheck %t/ae_mulsf16ss_33.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf16x4ss.c | FileCheck %t/ae_mulsf16x4ss.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32r_hh.c | FileCheck %t/ae_mulsf32r_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32r_lh.c | FileCheck %t/ae_mulsf32r_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32r_ll.c | FileCheck %t/ae_mulsf32r_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32r_ll_s2.c | FileCheck %t/ae_mulsf32r_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32s_hh.c | FileCheck %t/ae_mulsf32s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32s_lh.c | FileCheck %t/ae_mulsf32s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32s_ll.c | FileCheck %t/ae_mulsf32s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h0.c | FileCheck %t/ae_mulsf32x16_h0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h0_s2.c | FileCheck %t/ae_mulsf32x16_h0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h1.c | FileCheck %t/ae_mulsf32x16_h1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h1_s2.c | FileCheck %t/ae_mulsf32x16_h1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h2.c | FileCheck %t/ae_mulsf32x16_h2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h2_s2.c | FileCheck %t/ae_mulsf32x16_h2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h3.c | FileCheck %t/ae_mulsf32x16_h3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_h3_s2.c | FileCheck %t/ae_mulsf32x16_h3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l0.c | FileCheck %t/ae_mulsf32x16_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l0_s2.c | FileCheck %t/ae_mulsf32x16_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l1.c | FileCheck %t/ae_mulsf32x16_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l1_s2.c | FileCheck %t/ae_mulsf32x16_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l2.c | FileCheck %t/ae_mulsf32x16_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l2_s2.c | FileCheck %t/ae_mulsf32x16_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l3.c | FileCheck %t/ae_mulsf32x16_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf32x16_l3_s2.c | FileCheck %t/ae_mulsf32x16_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf48q32sp16s_l.c | FileCheck %t/ae_mulsf48q32sp16s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf48q32sp16s_l_s2.c | FileCheck %t/ae_mulsf48q32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf48q32sp16u_l.c | FileCheck %t/ae_mulsf48q32sp16u_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsf48q32sp16u_l_s2.c | FileCheck %t/ae_mulsf48q32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp24x2r.c | FileCheck %t/ae_mulsfp24x2r.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp24x2r_s2.c | FileCheck %t/ae_mulsfp24x2r_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp24x2ra.c | FileCheck %t/ae_mulsfp24x2ra.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp24x2ra_s2.c | FileCheck %t/ae_mulsfp24x2ra_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2ras_h.c | FileCheck %t/ae_mulsfp32x16x2ras_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2ras_h_s2.c | FileCheck %t/ae_mulsfp32x16x2ras_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2ras_l.c | FileCheck %t/ae_mulsfp32x16x2ras_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2ras_l_s2.c | FileCheck %t/ae_mulsfp32x16x2ras_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2rs_h.c | FileCheck %t/ae_mulsfp32x16x2rs_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2rs_h_s2.c | FileCheck %t/ae_mulsfp32x16x2rs_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2rs_l.c | FileCheck %t/ae_mulsfp32x16x2rs_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x16x2rs_l_s2.c | FileCheck %t/ae_mulsfp32x16x2rs_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x2ras.c | FileCheck %t/ae_mulsfp32x2ras.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfp32x2rs.c | FileCheck %t/ae_mulsfp32x2rs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfq32sp24s_h_s2.c | FileCheck %t/ae_mulsfq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsfq32sp24s_l_s2.c | FileCheck %t/ae_mulsfq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsp24x2.c | FileCheck %t/ae_mulsp24x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsp24x2_s2.c | FileCheck %t/ae_mulsp24x2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsp32x16x2_h.c | FileCheck %t/ae_mulsp32x16x2_h.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsp32x16x2_l.c | FileCheck %t/ae_mulsp32x16x2_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsp32x2.c | FileCheck %t/ae_mulsp32x2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsq32sp16s_l_s2.c | FileCheck %t/ae_mulsq32sp16s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsq32sp16u_l_s2.c | FileCheck %t/ae_mulsq32sp16u_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsrfq32sp24s_h_s2.c | FileCheck %t/ae_mulsrfq32sp24s_h_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulsrfq32sp24s_l_s2.c | FileCheck %t/ae_mulsrfq32sp24s_l_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_hh.c | FileCheck %t/ae_mulss32f48p16s_hh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_hh_s2.c | FileCheck %t/ae_mulss32f48p16s_hh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_lh.c | FileCheck %t/ae_mulss32f48p16s_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_lh_s2.c | FileCheck %t/ae_mulss32f48p16s_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_ll.c | FileCheck %t/ae_mulss32f48p16s_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulss32f48p16s_ll_s2.c | FileCheck %t/ae_mulss32f48p16s_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd24_hh_ll.c | FileCheck %t/ae_mulssd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd24_hh_ll_s2.c | FileCheck %t/ae_mulssd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd24_hl_lh.c | FileCheck %t/ae_mulssd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd24_hl_lh_s2.c | FileCheck %t/ae_mulssd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd32x16_h1_l0.c | FileCheck %t/ae_mulssd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd32x16_h1_l0_s2.c | FileCheck %t/ae_mulssd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd32x16_h3_l2.c | FileCheck %t/ae_mulssd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssd32x16_h3_l2_s2.c | FileCheck %t/ae_mulssd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_11_00.c | FileCheck %t/ae_mulssfd16ss_11_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_11_00_s2.c | FileCheck %t/ae_mulssfd16ss_11_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_13_02.c | FileCheck %t/ae_mulssfd16ss_13_02.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_13_02_s2.c | FileCheck %t/ae_mulssfd16ss_13_02_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_33_22.c | FileCheck %t/ae_mulssfd16ss_33_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd16ss_33_22_s2.c | FileCheck %t/ae_mulssfd16ss_33_22_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd24_hh_ll.c | FileCheck %t/ae_mulssfd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd24_hh_ll_s2.c | FileCheck %t/ae_mulssfd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd24_hl_lh.c | FileCheck %t/ae_mulssfd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd24_hl_lh_s2.c | FileCheck %t/ae_mulssfd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd32x16_h1_l0.c | FileCheck %t/ae_mulssfd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd32x16_h1_l0_s2.c | FileCheck %t/ae_mulssfd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd32x16_h3_l2.c | FileCheck %t/ae_mulssfd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulssfd32x16_h3_l2_s2.c | FileCheck %t/ae_mulssfd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad24_hh_ll.c | FileCheck %t/ae_mulzaad24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad24_hh_ll_s2.c | FileCheck %t/ae_mulzaad24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad24_hl_lh.c | FileCheck %t/ae_mulzaad24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad24_hl_lh_s2.c | FileCheck %t/ae_mulzaad24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h0_l1.c | FileCheck %t/ae_mulzaad32x16_h0_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h0_l1_s2.c | FileCheck %t/ae_mulzaad32x16_h0_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h1_l0.c | FileCheck %t/ae_mulzaad32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h1_l0_s2.c | FileCheck %t/ae_mulzaad32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h2_l3.c | FileCheck %t/ae_mulzaad32x16_h2_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h2_l3_s2.c | FileCheck %t/ae_mulzaad32x16_h2_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h3_l2.c | FileCheck %t/ae_mulzaad32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaad32x16_h3_l2_s2.c | FileCheck %t/ae_mulzaad32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_11_00.c | FileCheck %t/ae_mulzaafd16ss_11_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_11_00_s2.c | FileCheck %t/ae_mulzaafd16ss_11_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_13_02.c | FileCheck %t/ae_mulzaafd16ss_13_02.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_13_02_s2.c | FileCheck %t/ae_mulzaafd16ss_13_02_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_33_22.c | FileCheck %t/ae_mulzaafd16ss_33_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd16ss_33_22_s2.c | FileCheck %t/ae_mulzaafd16ss_33_22_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd24_hh_ll.c | FileCheck %t/ae_mulzaafd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd24_hh_ll_s2.c | FileCheck %t/ae_mulzaafd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd24_hl_lh.c | FileCheck %t/ae_mulzaafd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd24_hl_lh_s2.c | FileCheck %t/ae_mulzaafd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h0_l1.c | FileCheck %t/ae_mulzaafd32x16_h0_l1.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h0_l1_s2.c | FileCheck %t/ae_mulzaafd32x16_h0_l1_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h1_l0.c | FileCheck %t/ae_mulzaafd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzaafd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h2_l3.c | FileCheck %t/ae_mulzaafd32x16_h2_l3.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h2_l3_s2.c | FileCheck %t/ae_mulzaafd32x16_h2_l3_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h3_l2.c | FileCheck %t/ae_mulzaafd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzaafd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzaafd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd24_hh_ll.c | FileCheck %t/ae_mulzasd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd24_hh_ll_s2.c | FileCheck %t/ae_mulzasd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd24_hl_lh.c | FileCheck %t/ae_mulzasd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd24_hl_lh_s2.c | FileCheck %t/ae_mulzasd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd32x16_h1_l0.c | FileCheck %t/ae_mulzasd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzasd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd32x16_h3_l2.c | FileCheck %t/ae_mulzasd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzasd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd24_hh_ll.c | FileCheck %t/ae_mulzasfd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd24_hh_ll_s2.c | FileCheck %t/ae_mulzasfd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd24_hl_lh.c | FileCheck %t/ae_mulzasfd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd24_hl_lh_s2.c | FileCheck %t/ae_mulzasfd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd32x16_h1_l0.c | FileCheck %t/ae_mulzasfd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzasfd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd32x16_h3_l2.c | FileCheck %t/ae_mulzasfd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzasfd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzasfd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad24_hh_ll.c | FileCheck %t/ae_mulzsad24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad24_hh_ll_s2.c | FileCheck %t/ae_mulzsad24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad32x16_h1_l0.c | FileCheck %t/ae_mulzsad32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad32x16_h1_l0_s2.c | FileCheck %t/ae_mulzsad32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad32x16_h3_l2.c | FileCheck %t/ae_mulzsad32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsad32x16_h3_l2_s2.c | FileCheck %t/ae_mulzsad32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd24_hh_ll.c | FileCheck %t/ae_mulzsafd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd24_hh_ll_s2.c | FileCheck %t/ae_mulzsafd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd32x16_h1_l0.c | FileCheck %t/ae_mulzsafd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzsafd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd32x16_h3_l2.c | FileCheck %t/ae_mulzsafd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzsafd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzsafd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd24_hh_ll.c | FileCheck %t/ae_mulzssd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd24_hh_ll_s2.c | FileCheck %t/ae_mulzssd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd24_hl_lh.c | FileCheck %t/ae_mulzssd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd24_hl_lh_s2.c | FileCheck %t/ae_mulzssd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd32x16_h1_l0.c | FileCheck %t/ae_mulzssd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzssd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd32x16_h3_l2.c | FileCheck %t/ae_mulzssd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzssd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_11_00.c | FileCheck %t/ae_mulzssfd16ss_11_00.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_11_00_s2.c | FileCheck %t/ae_mulzssfd16ss_11_00_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_13_02.c | FileCheck %t/ae_mulzssfd16ss_13_02.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_13_02_s2.c | FileCheck %t/ae_mulzssfd16ss_13_02_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_33_22.c | FileCheck %t/ae_mulzssfd16ss_33_22.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd16ss_33_22_s2.c | FileCheck %t/ae_mulzssfd16ss_33_22_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd24_hh_ll.c | FileCheck %t/ae_mulzssfd24_hh_ll.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd24_hh_ll_s2.c | FileCheck %t/ae_mulzssfd24_hh_ll_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd24_hl_lh.c | FileCheck %t/ae_mulzssfd24_hl_lh.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd24_hl_lh_s2.c | FileCheck %t/ae_mulzssfd24_hl_lh_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd32x16_h1_l0.c | FileCheck %t/ae_mulzssfd32x16_h1_l0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd32x16_h1_l0_s2.c | FileCheck %t/ae_mulzssfd32x16_h1_l0_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd32x16_h3_l2.c | FileCheck %t/ae_mulzssfd32x16_h3_l2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_mulzssfd32x16_h3_l2_s2.c | FileCheck %t/ae_mulzssfd32x16_h3_l2_s2.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_nand.c | FileCheck %t/ae_nand.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg16s.c | FileCheck %t/ae_neg16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg24s.c | FileCheck %t/ae_neg24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg32.c | FileCheck %t/ae_neg32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg32s.c | FileCheck %t/ae_neg32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg64.c | FileCheck %t/ae_neg64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_neg64s.c | FileCheck %t/ae_neg64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_nsa64.c | FileCheck %t/ae_nsa64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_nsaz16_0.c | FileCheck %t/ae_nsaz16_0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_nsaz32_l.c | FileCheck %t/ae_nsaz32_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_or.c | FileCheck %t/ae_or.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_pksr24.c | FileCheck %t/ae_pksr24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_pksr32.c | FileCheck %t/ae_pksr32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round16x4f32sasym.c | FileCheck %t/ae_round16x4f32sasym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round16x4f32ssym.c | FileCheck %t/ae_round16x4f32ssym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round24x2f48sasym.c | FileCheck %t/ae_round24x2f48sasym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round24x2f48ssym.c | FileCheck %t/ae_round24x2f48ssym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round32x2f48sasym.c | FileCheck %t/ae_round32x2f48sasym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round32x2f48ssym.c | FileCheck %t/ae_round32x2f48ssym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round32x2f64sasym.c | FileCheck %t/ae_round32x2f64sasym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_round32x2f64ssym.c | FileCheck %t/ae_round32x2f64ssym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsp16f24asym.c | FileCheck %t/ae_roundsp16f24asym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsp16f24sym.c | FileCheck %t/ae_roundsp16f24sym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsp16q48x2asym.c | FileCheck %t/ae_roundsp16q48x2asym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsp16q48x2sym.c | FileCheck %t/ae_roundsp16q48x2sym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsq32f48asym.c | FileCheck %t/ae_roundsq32f48asym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_roundsq32f48sym.c | FileCheck %t/ae_roundsq32f48sym.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16_0_i.c | FileCheck %t/ae_s16_0_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16_0_ip.c | FileCheck %t/ae_s16_0_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16_0_x.c | FileCheck %t/ae_s16_0_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16_0_xc.c | FileCheck %t/ae_s16_0_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16_0_xp.c | FileCheck %t/ae_s16_0_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16m_l_i.c | FileCheck %t/ae_s16m_l_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16m_l_iu.c | FileCheck %t/ae_s16m_l_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16m_l_x.c | FileCheck %t/ae_s16m_l_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16m_l_xc.c | FileCheck %t/ae_s16m_l_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16m_l_xu.c | FileCheck %t/ae_s16m_l_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x2m_i.c | FileCheck %t/ae_s16x2m_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x2m_iu.c | FileCheck %t/ae_s16x2m_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x2m_x.c | FileCheck %t/ae_s16x2m_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x2m_xc.c | FileCheck %t/ae_s16x2m_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x2m_xu.c | FileCheck %t/ae_s16x2m_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_i.c | FileCheck %t/ae_s16x4_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_ip.c | FileCheck %t/ae_s16x4_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_ric.c | FileCheck %t/ae_s16x4_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_rip.c | FileCheck %t/ae_s16x4_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_x.c | FileCheck %t/ae_s16x4_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_xc.c | FileCheck %t/ae_s16x4_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s16x4_xp.c | FileCheck %t/ae_s16x4_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24ra64s_i.c | FileCheck %t/ae_s24ra64s_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24ra64s_ip.c | FileCheck %t/ae_s24ra64s_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24ra64s_x.c | FileCheck %t/ae_s24ra64s_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24ra64s_xc.c | FileCheck %t/ae_s24ra64s_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24ra64s_xp.c | FileCheck %t/ae_s24ra64s_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s24x2ra64s_ip.c | FileCheck %t/ae_s24x2ra64s_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32_l_i.c | FileCheck %t/ae_s32_l_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32_l_ip.c | FileCheck %t/ae_s32_l_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32_l_x.c | FileCheck %t/ae_s32_l_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32_l_xc.c | FileCheck %t/ae_s32_l_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32_l_xp.c | FileCheck %t/ae_s32_l_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32f24_l_i.c | FileCheck %t/ae_s32f24_l_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32f24_l_ip.c | FileCheck %t/ae_s32f24_l_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32f24_l_x.c | FileCheck %t/ae_s32f24_l_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32f24_l_xc.c | FileCheck %t/ae_s32f24_l_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32f24_l_xp.c | FileCheck %t/ae_s32f24_l_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32m_i.c | FileCheck %t/ae_s32m_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32m_iu.c | FileCheck %t/ae_s32m_iu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32m_x.c | FileCheck %t/ae_s32m_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32m_xc.c | FileCheck %t/ae_s32m_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32m_xu.c | FileCheck %t/ae_s32m_xu.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32ra64s_i.c | FileCheck %t/ae_s32ra64s_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32ra64s_ip.c | FileCheck %t/ae_s32ra64s_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32ra64s_x.c | FileCheck %t/ae_s32ra64s_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32ra64s_xc.c | FileCheck %t/ae_s32ra64s_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32ra64s_xp.c | FileCheck %t/ae_s32ra64s_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_i.c | FileCheck %t/ae_s32x2_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_ip.c | FileCheck %t/ae_s32x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_ric.c | FileCheck %t/ae_s32x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_rip.c | FileCheck %t/ae_s32x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_x.c | FileCheck %t/ae_s32x2_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_xc.c | FileCheck %t/ae_s32x2_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2_xp.c | FileCheck %t/ae_s32x2_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_i.c | FileCheck %t/ae_s32x2f24_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_ip.c | FileCheck %t/ae_s32x2f24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_ric.c | FileCheck %t/ae_s32x2f24_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_rip.c | FileCheck %t/ae_s32x2f24_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_x.c | FileCheck %t/ae_s32x2f24_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_xc.c | FileCheck %t/ae_s32x2f24_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2f24_xp.c | FileCheck %t/ae_s32x2f24_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s32x2ra64s_ip.c | FileCheck %t/ae_s32x2ra64s_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s64_i.c | FileCheck %t/ae_s64_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s64_ip.c | FileCheck %t/ae_s64_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s64_x.c | FileCheck %t/ae_s64_x.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s64_xc.c | FileCheck %t/ae_s64_xc.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_s64_xp.c | FileCheck %t/ae_s64_xp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa16x4_ic.c | FileCheck %t/ae_sa16x4_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa16x4_ip.c | FileCheck %t/ae_sa16x4_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa16x4_ric.c | FileCheck %t/ae_sa16x4_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa16x4_rip.c | FileCheck %t/ae_sa16x4_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24_l_ic.c | FileCheck %t/ae_sa24_l_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24_l_ip.c | FileCheck %t/ae_sa24_l_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24_l_ric.c | FileCheck %t/ae_sa24_l_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24_l_rip.c | FileCheck %t/ae_sa24_l_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24x2_ic.c | FileCheck %t/ae_sa24x2_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24x2_ip.c | FileCheck %t/ae_sa24x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24x2_ric.c | FileCheck %t/ae_sa24x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa24x2_rip.c | FileCheck %t/ae_sa24x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2_ic.c | FileCheck %t/ae_sa32x2_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2_ip.c | FileCheck %t/ae_sa32x2_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2_ric.c | FileCheck %t/ae_sa32x2_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2_rip.c | FileCheck %t/ae_sa32x2_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2f24_ic.c | FileCheck %t/ae_sa32x2f24_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2f24_ip.c | FileCheck %t/ae_sa32x2f24_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2f24_ric.c | FileCheck %t/ae_sa32x2f24_ric.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa32x2f24_rip.c | FileCheck %t/ae_sa32x2f24_rip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa64neg_fp.c | FileCheck %t/ae_sa64neg_fp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sa64pos_fp.c | FileCheck %t/ae_sa64pos_fp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_salign64_i.c | FileCheck %t/ae_salign64_i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sat16x4.c | FileCheck %t/ae_sat16x4.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sat24s.c | FileCheck %t/ae_sat24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sat48s.c | FileCheck %t/ae_sat48s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_satq56s.c | FileCheck %t/ae_satq56s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sb.c | FileCheck %t/ae_sb.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sb_ic.c | FileCheck %t/ae_sb_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sb_ip.c | FileCheck %t/ae_sb_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbf.c | FileCheck %t/ae_sbf.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbf_ic.c | FileCheck %t/ae_sbf_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbf_ip.c | FileCheck %t/ae_sbf_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbi.c | FileCheck %t/ae_sbi.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbi_ic.c | FileCheck %t/ae_sbi_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sbi_ip.c | FileCheck %t/ae_sbi_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sel16i.c | FileCheck %t/ae_sel16i.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sel16i_n.c | FileCheck %t/ae_sel16i_n.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sext32.c | FileCheck %t/ae_sext32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sext32x2d16_10.c | FileCheck %t/ae_sext32x2d16_10.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sext32x2d16_32.c | FileCheck %t/ae_sext32x2d16_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sha32.c | FileCheck %t/ae_sha32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_shortswap.c | FileCheck %t/ae_shortswap.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaa16s.c | FileCheck %t/ae_slaa16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaa32.c | FileCheck %t/ae_slaa32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaa32s.c | FileCheck %t/ae_slaa32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaa64.c | FileCheck %t/ae_slaa64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaa64s.c | FileCheck %t/ae_slaa64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaaq56.c | FileCheck %t/ae_slaaq56.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai16s.c | FileCheck %t/ae_slai16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai24.c | FileCheck %t/ae_slai24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai24s.c | FileCheck %t/ae_slai24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai32.c | FileCheck %t/ae_slai32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai32s.c | FileCheck %t/ae_slai32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai64.c | FileCheck %t/ae_slai64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slai64s.c | FileCheck %t/ae_slai64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slaisq56s.c | FileCheck %t/ae_slaisq56s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas24.c | FileCheck %t/ae_slas24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas24s.c | FileCheck %t/ae_slas24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas32.c | FileCheck %t/ae_slas32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas32s.c | FileCheck %t/ae_slas32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas64.c | FileCheck %t/ae_slas64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slas64s.c | FileCheck %t/ae_slas64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slasq56.c | FileCheck %t/ae_slasq56.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_slassq56s.c | FileCheck %t/ae_slassq56s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sra64_32.c | FileCheck %t/ae_sra64_32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa16rs.c | FileCheck %t/ae_sraa16rs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa16s.c | FileCheck %t/ae_sraa16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa32.c | FileCheck %t/ae_sraa32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa32rs.c | FileCheck %t/ae_sraa32rs.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa32s.c | FileCheck %t/ae_sraa32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sraa64.c | FileCheck %t/ae_sraa64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai16.c | FileCheck %t/ae_srai16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai16r.c | FileCheck %t/ae_srai16r.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai24.c | FileCheck %t/ae_srai24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai32.c | FileCheck %t/ae_srai32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai32r.c | FileCheck %t/ae_srai32r.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srai64.c | FileCheck %t/ae_srai64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sras24.c | FileCheck %t/ae_sras24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sras32.c | FileCheck %t/ae_sras32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sras64.c | FileCheck %t/ae_sras64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srla32.c | FileCheck %t/ae_srla32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srla64.c | FileCheck %t/ae_srla64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srli24.c | FileCheck %t/ae_srli24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srli32.c | FileCheck %t/ae_srli32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srli64.c | FileCheck %t/ae_srli64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srls24.c | FileCheck %t/ae_srls24.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srls32.c | FileCheck %t/ae_srls32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_srls64.c | FileCheck %t/ae_srls64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub16.c | FileCheck %t/ae_sub16.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub16s.c | FileCheck %t/ae_sub16s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub24s.c | FileCheck %t/ae_sub24s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub32.c | FileCheck %t/ae_sub32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub32s.c | FileCheck %t/ae_sub32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub64.c | FileCheck %t/ae_sub64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_sub64s.c | FileCheck %t/ae_sub64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_subadd32.c | FileCheck %t/ae_subadd32.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_subadd32s.c | FileCheck %t/ae_subadd32s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_trunca32f64s_l.c | FileCheck %t/ae_trunca32f64s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_trunca32x2f64s.c | FileCheck %t/ae_trunca32x2f64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_trunci32f64s_l.c | FileCheck %t/ae_trunci32f64s_l.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_trunci32x2f64s.c | FileCheck %t/ae_trunci32x2f64s.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldl16c.c | FileCheck %t/ae_vldl16c.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldl16c_ic.c | FileCheck %t/ae_vldl16c_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldl16c_ip.c | FileCheck %t/ae_vldl16c_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldl16t.c | FileCheck %t/ae_vldl16t.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldl32t.c | FileCheck %t/ae_vldl32t.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vldsht.c | FileCheck %t/ae_vldsht.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vlel16t.c | FileCheck %t/ae_vlel16t.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vlel32t.c | FileCheck %t/ae_vlel32t.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vles16c.c | FileCheck %t/ae_vles16c.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vles16c_ic.c | FileCheck %t/ae_vles16c_ic.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_vles16c_ip.c | FileCheck %t/ae_vles16c_ip.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_xor.c | FileCheck %t/ae_xor.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/ae_zalign64.c | FileCheck %t/ae_zalign64.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_bithead.c | FileCheck %t/rur_ae_bithead.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_bitptr.c | FileCheck %t/rur_ae_bitptr.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_bitsused.c | FileCheck %t/rur_ae_bitsused.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_cbegin0.c | FileCheck %t/rur_ae_cbegin0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_cend0.c | FileCheck %t/rur_ae_cend0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_cw_sd_no.c | FileCheck %t/rur_ae_cw_sd_no.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_cwrap.c | FileCheck %t/rur_ae_cwrap.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_first_ts.c | FileCheck %t/rur_ae_first_ts.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_nextoffset.c | FileCheck %t/rur_ae_nextoffset.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_overflow.c | FileCheck %t/rur_ae_overflow.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_ovf_sar.c | FileCheck %t/rur_ae_ovf_sar.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_sar.c | FileCheck %t/rur_ae_sar.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_searchdone.c | FileCheck %t/rur_ae_searchdone.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_tablesize.c | FileCheck %t/rur_ae_tablesize.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/rur_ae_ts_fts_bu_bp.c | FileCheck %t/rur_ae_ts_fts_bu_bp.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_bithead.c | FileCheck %t/wur_ae_bithead.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_bitptr.c | FileCheck %t/wur_ae_bitptr.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_bitsused.c | FileCheck %t/wur_ae_bitsused.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_cbegin0.c | FileCheck %t/wur_ae_cbegin0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_cend0.c | FileCheck %t/wur_ae_cend0.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_cw_sd_no.c | FileCheck %t/wur_ae_cw_sd_no.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_cwrap.c | FileCheck %t/wur_ae_cwrap.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_first_ts.c | FileCheck %t/wur_ae_first_ts.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_nextoffset.c | FileCheck %t/wur_ae_nextoffset.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_overflow.c | FileCheck %t/wur_ae_overflow.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_ovf_sar.c | FileCheck %t/wur_ae_ovf_sar.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_sar.c | FileCheck %t/wur_ae_sar.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_searchdone.c | FileCheck %t/wur_ae_searchdone.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_tablesize.c | FileCheck %t/wur_ae_tablesize.c +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/wur_ae_ts_fts_bu_bp.c | FileCheck %t/wur_ae_ts_fts_bu_bp.c +//--- ae_abs16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_abs16s(ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.abs16s(<4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_abs16s(ae_arth_v1); +} + +//--- ae_abs24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_abs24s(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.abs24s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_abs24s(ae_arth_v1); +} + +//--- ae_abs32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_abs32(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.abs32(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_abs32(ae_arth_v1); +} + +//--- ae_abs32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_abs32s(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.abs32s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_abs32s(ae_arth_v1); +} + +//--- ae_abs64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_abs64(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.abs64(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_abs64(ae_arth_v1); +} + +//--- ae_abs64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_abs64s(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_abs64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.abs64s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_abs64s(ae_arth_v1); +} + +//--- ae_add16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_add16(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_add16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.add16(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_add16(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_add16s(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_add16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.add16s(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_add16s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_add24s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_add24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.add24s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_add24s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_add32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_add32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.add32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_add32(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add32_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_add32_hl_lh(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_add32_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.add32.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_add32_hl_lh(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_add32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_add32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.add32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_add32s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_add64(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_add64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.add64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_add64(ae_arth_v0, ae_arth_v1); +} + +//--- ae_add64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_add64s(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_add64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.add64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_add64s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_addbrba32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_addbrba32(int art,int ars) { +// CHECK-LABEL: test_ae_addbrba32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.addbrba32(i32 {{.*}}, i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_addbrba32(art, ars); +} + +//--- ae_addsub32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_addsub32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_addsub32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.addsub32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_addsub32(ae_arth_v0, ae_arth_v1); +} + +//--- ae_addsub32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_addsub32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_addsub32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.addsub32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_addsub32s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_and.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_and(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) { +// CHECK-LABEL: test_ae_and +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.and(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_and(ae_dr_to_dr_v0, ae_dr_to_dr_v1); +} + +//--- ae_cvt32x2f16_10.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_cvt32x2f16_10(ae_int16x4 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_cvt32x2f16_10 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.cvt32x2f16.10(<4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_cvt32x2f16_10(ae_to_dr_v0); +} + +//--- ae_cvt32x2f16_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_cvt32x2f16_32(ae_int16x4 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_cvt32x2f16_32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.cvt32x2f16.32(<4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_cvt32x2f16_32(ae_to_dr_v0); +} + +//--- ae_cvt48a32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvt48a32(int ars) { +// CHECK-LABEL: test_ae_cvt48a32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvt48a32(i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvt48a32(ars); +} + +//--- ae_cvt64a32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvt64a32(int ars) { +// CHECK-LABEL: test_ae_cvt64a32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvt64a32(i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvt64a32(ars); +} + +//--- ae_cvt64f32_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvt64f32_h(ae_int32x2 ae_dr_to_dr_v0) { +// CHECK-LABEL: test_ae_cvt64f32_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvt64f32.h(<2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvt64f32_h(ae_dr_to_dr_v0); +} + +//--- ae_cvta32f24s_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_cvta32f24s_h(ae_int32x2 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_cvta32f24s_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.cvta32f24s.h(<2 x i32> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_cvta32f24s_h(ae_dr_to_ar_v0); +} + +//--- ae_cvta32f24s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_cvta32f24s_l(ae_int32x2 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_cvta32f24s_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.cvta32f24s.l(<2 x i32> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_cvta32f24s_l(ae_dr_to_ar_v0); +} + +//--- ae_cvtq56a32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvtq56a32s(int ars) { +// CHECK-LABEL: test_ae_cvtq56a32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvtq56a32s(i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvtq56a32s(ars); +} + +//--- ae_cvtq56p32s_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvtq56p32s_h(ae_int32x2 ae_dr_to_dr_v0) { +// CHECK-LABEL: test_ae_cvtq56p32s_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvtq56p32s.h(<2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvtq56p32s_h(ae_dr_to_dr_v0); +} + +//--- ae_cvtq56p32s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_cvtq56p32s_l(ae_int32x2 ae_dr_to_dr_v0) { +// CHECK-LABEL: test_ae_cvtq56p32s_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.cvtq56p32s.l(<2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_cvtq56p32s_l(ae_dr_to_dr_v0); +} + +//--- ae_db.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_db(const short** ars,int art) { +// CHECK-LABEL: test_ae_db +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.db(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_db(ars, art); +} + +//--- ae_db_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_db_ic(const short** ars,int art) { +// CHECK-LABEL: test_ae_db_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.db.ic(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_db_ic(ars, art); +} + +//--- ae_db_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_db_ip(const short** ars,int art) { +// CHECK-LABEL: test_ae_db_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.db.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_db_ip(ars, art); +} + +//--- ae_dbi.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_dbi(const short** ars,immediate ae_ohba) { +// CHECK-LABEL: test_ae_dbi +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.dbi(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_dbi(ars, 1); +} + +//--- ae_dbi_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_dbi_ic(const short** ars,immediate ae_ohba) { +// CHECK-LABEL: test_ae_dbi_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.dbi.ic(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_dbi_ic(ars, 1); +} + +//--- ae_dbi_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_dbi_ip(const short** ars,immediate ae_ohba) { +// CHECK-LABEL: test_ae_dbi_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.dbi.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_dbi_ip(ars, 1); +} + +//--- ae_div64d32_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_div64d32_h(ae_int64* ae_arth_v,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_div64d32_h +// CHECK: %[[LD_AE_ARTH_V:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.div64d32.h(<1 x i64> %[[LD_AE_ARTH_V]], <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_div64d32_h(ae_arth_v, ae_arth_v1); +} + +//--- ae_div64d32_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_div64d32_l(ae_int64* ae_arth_v,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_div64d32_l +// CHECK: %[[LD_AE_ARTH_V:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.div64d32.l(<1 x i64> %[[LD_AE_ARTH_V]], <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_div64d32_l(ae_arth_v, ae_arth_v1); +} + +//--- ae_eq16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool4 test_ae_eq16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_eq16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i1> @llvm.xtensa.ae.eq16(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i1> %[[RET]] +return __builtin_xtensa_ae_eq16(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_eq32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool2 test_ae_eq32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_eq32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i1> @llvm.xtensa.ae.eq32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i1> %[[RET]] +return __builtin_xtensa_ae_eq32(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_eq64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool test_ae_eq64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_eq64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i1> @llvm.xtensa.ae.eq64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i1> %[[RET]] +return __builtin_xtensa_ae_eq64(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_l16_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_l16_i(const ae_int16* ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_l16_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.l16.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_l16_i(ars, -16); +} + +//--- ae_l16_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16_ip(ae_int16x4* ae_ls_v,const ae_int16** ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_l16_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16_ip(ae_ls_v, ars, -16); +} + +//--- ae_l16_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_l16_x(const ae_int16* ars,int art) { +// CHECK-LABEL: test_ae_l16_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.l16.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_l16_x(ars, art); +} + +//--- ae_l16_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16_xc(ae_int16x4* ae_ls_v,const ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_l16_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16_xc(ae_ls_v, ars, art); +} + +//--- ae_l16_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16_xp(ae_int16x4* ae_ls_v,const ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_l16_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16_xp(ae_ls_v, ars, art); +} + +//--- ae_l16m_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l16m_i(const ae_int16* ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_l16m_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l16m.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l16m_i(ars, -16); +} + +//--- ae_l16m_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16m_iu(ae_int32x2* ae_ls_v,const ae_int16** ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_l16m_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.iu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16m_iu(ae_ls_v, ars, -16); +} + +//--- ae_l16m_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l16m_x(const ae_int16* ars,int art) { +// CHECK-LABEL: test_ae_l16m_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l16m.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l16m_x(ars, art); +} + +//--- ae_l16m_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16m_xc(ae_int32x2* ae_ls_v,const ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_l16m_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16m_xc(ae_ls_v, ars, art); +} + +//--- ae_l16m_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16m_xu(ae_int32x2* ae_ls_v,const ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_l16m_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16m.xu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16m_xu(ae_ls_v, ars, art); +} + +//--- ae_l16x2m_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l16x2m_i(const ae_int16x2* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l16x2m_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l16x2m.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l16x2m_i(ars, -32); +} + +//--- ae_l16x2m_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x2m_iu(ae_int32x2* ae_ls_v,const ae_int16x2** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l16x2m_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.iu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x2m_iu(ae_ls_v, ars, -32); +} + +//--- ae_l16x2m_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l16x2m_x(const ae_int16x2* ars,int art) { +// CHECK-LABEL: test_ae_l16x2m_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l16x2m.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l16x2m_x(ars, art); +} + +//--- ae_l16x2m_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x2m_xc(ae_int32x2* ae_ls_v,const ae_int16x2** ars,int art) { +// CHECK-LABEL: test_ae_l16x2m_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x2m_xc(ae_ls_v, ars, art); +} + +//--- ae_l16x2m_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x2m_xu(ae_int32x2* ae_ls_v,const ae_int16x2** ars,int art) { +// CHECK-LABEL: test_ae_l16x2m_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l16x2m.xu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x2m_xu(ae_ls_v, ars, art); +} + +//--- ae_l16x4_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_l16x4_i(const ae_int16x4* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_l16x4_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.l16x4.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_l16x4_i(ars, -64); +} + +//--- ae_l16x4_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x4_ip(ae_int16x4* ae_ls_v,const ae_int16x4** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_l16x4_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x4_ip(ae_ls_v, ars, 0); +} + +//--- ae_l16x4_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x4_ric(ae_int16x4* ae_ls_v,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_l16x4_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.ric(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x4_ric(ae_ls_v, ars); +} + +//--- ae_l16x4_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x4_rip(ae_int16x4* ae_ls_v,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_l16x4_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.rip(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x4_rip(ae_ls_v, ars); +} + +//--- ae_l16x4_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_l16x4_x(const ae_int16x4* ars,int art) { +// CHECK-LABEL: test_ae_l16x4_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.l16x4.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_l16x4_x(ars, art); +} + +//--- ae_l16x4_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x4_xc(ae_int16x4* ae_ls_v,const ae_int16x4** ars,int art) { +// CHECK-LABEL: test_ae_l16x4_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x4_xc(ae_ls_v, ars, art); +} + +//--- ae_l16x4_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l16x4_xp(ae_int16x4* ae_ls_v,const ae_int16x4** ars,int art) { +// CHECK-LABEL: test_ae_l16x4_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, ptr } @llvm.xtensa.ae.l16x4.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l16x4_xp(ae_ls_v, ars, art); +} + +//--- ae_l32_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32_i(const ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32_i(ars, -32); +} + +//--- ae_l32_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32_ip(ae_int32x2* ae_ls_v,const ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32_ip(ae_ls_v, ars, -32); +} + +//--- ae_l32_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32_x(const ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_l32_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32_x(ars, art); +} + +//--- ae_l32_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32_xc(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32_xc(ae_ls_v, ars, art); +} + +//--- ae_l32_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32_xp(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32_xp(ae_ls_v, ars, art); +} + +//--- ae_l32f24_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32f24_i(const ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32f24_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32f24.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32f24_i(ars, -32); +} + +//--- ae_l32f24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32f24_ip(ae_int32x2* ae_ls_v,const ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32f24_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32f24_ip(ae_ls_v, ars, -32); +} + +//--- ae_l32f24_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32f24_x(const ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_l32f24_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32f24.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32f24_x(ars, art); +} + +//--- ae_l32f24_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32f24_xc(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32f24_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32f24_xc(ae_ls_v, ars, art); +} + +//--- ae_l32f24_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32f24_xp(ae_int32x2* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32f24_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32f24.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32f24_xp(ae_ls_v, ars, art); +} + +//--- ae_l32m_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_l32m_i(const ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32m_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.l32m.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_l32m_i(ars, -32); +} + +//--- ae_l32m_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32m_iu(ae_int64* ae_ls_v,const ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_l32m_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.iu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32m_iu(ae_ls_v, ars, -32); +} + +//--- ae_l32m_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_l32m_x(const ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_l32m_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.l32m.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_l32m_x(ars, art); +} + +//--- ae_l32m_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32m_xc(ae_int64* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32m_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32m_xc(ae_ls_v, ars, art); +} + +//--- ae_l32m_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32m_xu(ae_int64* ae_ls_v,const ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_l32m_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l32m.xu(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32m_xu(ae_ls_v, ars, art); +} + +//--- ae_l32x2_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32x2_i(const ae_int32x2* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_l32x2_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32x2.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32x2_i(ars, -64); +} + +//--- ae_l32x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2_ip(ae_int32x2* ae_ls_v,const ae_int32x2** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_l32x2_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2_ip(ae_ls_v, ars, 0); +} + +//--- ae_l32x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2_ric(ae_int32x2* ae_ls_v,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_l32x2_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.ric(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2_ric(ae_ls_v, ars); +} + +//--- ae_l32x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2_rip(ae_int32x2* ae_ls_v,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_l32x2_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.rip(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2_rip(ae_ls_v, ars); +} + +//--- ae_l32x2_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32x2_x(const ae_int32x2* ars,int art) { +// CHECK-LABEL: test_ae_l32x2_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32x2.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32x2_x(ars, art); +} + +//--- ae_l32x2_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2_xc(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_l32x2_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2_xc(ae_ls_v, ars, art); +} + +//--- ae_l32x2_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2_xp(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_l32x2_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2_xp(ae_ls_v, ars, art); +} + +//--- ae_l32x2f24_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32x2f24_i(const ae_int32x2* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_l32x2f24_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32x2f24.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32x2f24_i(ars, -64); +} + +//--- ae_l32x2f24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2f24_ip(ae_int32x2* ae_ls_v,const ae_int32x2** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_l32x2f24_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2f24_ip(ae_ls_v, ars, 0); +} + +//--- ae_l32x2f24_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2f24_ric(ae_int32x2* ae_ls_v,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_l32x2f24_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.ric(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2f24_ric(ae_ls_v, ars); +} + +//--- ae_l32x2f24_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2f24_rip(ae_int32x2* ae_ls_v,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_l32x2f24_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.rip(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2f24_rip(ae_ls_v, ars); +} + +//--- ae_l32x2f24_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_l32x2f24_x(const ae_int32x2* ars,int art) { +// CHECK-LABEL: test_ae_l32x2f24_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.l32x2f24.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_l32x2f24_x(ars, art); +} + +//--- ae_l32x2f24_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2f24_xc(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_l32x2f24_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2f24_xc(ae_ls_v, ars, art); +} + +//--- ae_l32x2f24_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l32x2f24_xp(ae_int32x2* ae_ls_v,const ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_l32x2f24_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, ptr } @llvm.xtensa.ae.l32x2f24.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l32x2f24_xp(ae_ls_v, ars, art); +} + +//--- ae_l64_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_l64_i(const ae_int64* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_l64_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.l64.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_l64_i(ars, -64); +} + +//--- ae_l64_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l64_ip(ae_int64* ae_ls_v,const ae_int64** ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_l64_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l64.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l64_ip(ae_ls_v, ars, -64); +} + +//--- ae_l64_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_l64_x(const ae_int64* ars,int art) { +// CHECK-LABEL: test_ae_l64_x +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.l64.x(ptr {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_l64_x(ars, art); +} + +//--- ae_l64_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l64_xc(ae_int64* ae_ls_v,const ae_int64** ars,int art) { +// CHECK-LABEL: test_ae_l64_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l64.xc(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l64_xc(ae_ls_v, ars, art); +} + +//--- ae_l64_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_l64_xp(ae_int64* ae_ls_v,const ae_int64** ars,int art) { +// CHECK-LABEL: test_ae_l64_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, ptr } @llvm.xtensa.ae.l64.xp(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_l64_xp(ae_ls_v, ars, art); +} + +//--- ae_la16x4_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4_ic(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4_ic +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ic(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4_ic(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la16x4_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4_ip(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4_ip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4_ip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la16x4_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4_ric(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4_ric +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.ric(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4_ric(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la16x4_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4_rip(ae_int16x4* ae_ls_av,ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4_rip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <4 x i16>, <8 x i8>, ptr } @llvm.xtensa.ae.la16x4.rip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <4 x i16>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4_rip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la16x4neg_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4neg_pc(ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4neg_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la16x4neg.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4neg_pc(ae_ls_uu, ars); +} + +//--- ae_la16x4pos_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la16x4pos_pc(ae_valign* ae_ls_uu,const ae_int16x4** ars) { +// CHECK-LABEL: test_ae_la16x4pos_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la16x4pos.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la16x4pos_pc(ae_ls_uu, ars); +} + +//--- ae_la24_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24_ic +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ic(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24_ic(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24_ip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24_ip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24_ric +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.ric(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24_ric(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24_rip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24.rip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24_rip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24neg_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24neg_pc(ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24neg_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la24neg.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24neg_pc(ae_ls_uu, ars); +} + +//--- ae_la24pos_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24pos_pc(ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24pos_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la24pos.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24pos_pc(ae_ls_uu, ars); +} + +//--- ae_la24x2_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2_ic +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ic(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2_ic(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2_ip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2_ip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2_ric +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.ric(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2_ric(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2_rip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la24x2.rip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2_rip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la24x2neg_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2neg_pc(ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2neg_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la24x2neg.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2neg_pc(ae_ls_uu, ars); +} + +//--- ae_la24x2pos_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la24x2pos_pc(ae_valign* ae_ls_uu,const void** ars) { +// CHECK-LABEL: test_ae_la24x2pos_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la24x2pos.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la24x2pos_pc(ae_ls_uu, ars); +} + +//--- ae_la32x2_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2_ic +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ic(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2_ic(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2_ip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2_ip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2_ric +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.ric(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2_ric(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2_rip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2.rip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2_rip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2f24_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2f24_ic(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2f24_ic +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ic(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2f24_ic(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2f24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2f24_ip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2f24_ip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2f24_ip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2f24_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2f24_ric(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2f24_ric +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.ric(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2f24_ric(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2f24_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2f24_rip(ae_int32x2* ae_ls_av,ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2f24_rip +// CHECK: %[[LD_AE_LS_UU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <8 x i8>, ptr } @llvm.xtensa.ae.la32x2f24.rip(<8 x i8> %[[LD_AE_LS_UU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2f24_rip(ae_ls_av, ae_ls_uu, ars); +} + +//--- ae_la32x2neg_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2neg_pc(ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2neg_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la32x2neg.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2neg_pc(ae_ls_uu, ars); +} + +//--- ae_la32x2pos_pc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_la32x2pos_pc(ae_valign* ae_ls_uu,const ae_int32x2** ars) { +// CHECK-LABEL: test_ae_la32x2pos_pc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.la32x2pos.pc(ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_la32x2pos_pc(ae_ls_uu, ars); +} + +//--- ae_la64_pp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_valign test_ae_la64_pp(const void* ars) { +// CHECK-LABEL: test_ae_la64_pp +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.la64.pp(ptr {{.*}}) +// CHECK: %[[CAST:.*]] = bitcast <8 x i8> %[[RET]] to i64 +// CHECK: ret i64 %[[CAST]] +return __builtin_xtensa_ae_la64_pp(ars); +} + +//--- ae_lalign64_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_valign test_ae_lalign64_i(const ae_valign* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_lalign64_i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.lalign64.i(ptr {{.*}}, i32 {{.*}}) +// CHECK: %[[CAST:.*]] = bitcast <8 x i8> %[[RET]] to i64 +// CHECK: ret i64 %[[CAST]] +return __builtin_xtensa_ae_lalign64_i(ars, -64); +} + +//--- ae_lb.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lb(int art) { +// CHECK-LABEL: test_ae_lb +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lb(i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lb(art); +} + +//--- ae_lbi.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lbi(immediate ae_ohba) { +// CHECK-LABEL: test_ae_lbi +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lbi(i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lbi(1); +} + +//--- ae_lbk.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lbk(int ars,int art) { +// CHECK-LABEL: test_ae_lbk +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lbk(i32 {{.*}}, i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lbk(ars, art); +} + +//--- ae_lbki.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lbki(int ars,immediate ae_ohba) { +// CHECK-LABEL: test_ae_lbki +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lbki(i32 {{.*}}, i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lbki(ars, 1); +} + +//--- ae_lbs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lbs(int art) { +// CHECK-LABEL: test_ae_lbs +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lbs(i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lbs(art); +} + +//--- ae_lbsi.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_lbsi(immediate ae_ohba) { +// CHECK-LABEL: test_ae_lbsi +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.lbsi(i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_lbsi(1); +} + +//--- ae_le16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool4 test_ae_le16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_le16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i1> @llvm.xtensa.ae.le16(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i1> %[[RET]] +return __builtin_xtensa_ae_le16(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_le32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool2 test_ae_le32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_le32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i1> @llvm.xtensa.ae.le32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i1> %[[RET]] +return __builtin_xtensa_ae_le32(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_le64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool test_ae_le64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_le64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i1> @llvm.xtensa.ae.le64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i1> %[[RET]] +return __builtin_xtensa_ae_le64(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_lt16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool4 test_ae_lt16(ae_int16x4 ae_cmpp_v0,ae_int16x4 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_lt16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i1> @llvm.xtensa.ae.lt16(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i1> %[[RET]] +return __builtin_xtensa_ae_lt16(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_lt32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool2 test_ae_lt32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_lt32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i1> @llvm.xtensa.ae.lt32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i1> %[[RET]] +return __builtin_xtensa_ae_lt32(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_lt64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +xtbool test_ae_lt64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_lt64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i1> @llvm.xtensa.ae.lt64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i1> %[[RET]] +return __builtin_xtensa_ae_lt64(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_max32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_max32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_max32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.max32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_max32(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_max64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_max64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_max64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.max64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_max64(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_maxabs32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_maxabs32s(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_maxabs32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.maxabs32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_maxabs32s(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_maxabs64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_maxabs64s(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_maxabs64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.maxabs64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_maxabs64s(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_min32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_min32(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_min32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.min32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_min32(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_min64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_min64(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_min64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.min64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_min64(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_minabs32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_minabs32s(ae_int32x2 ae_cmpp_v0,ae_int32x2 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_minabs32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.minabs32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_minabs32s(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_minabs64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_minabs64s(ae_int64 ae_cmpp_v0,ae_int64 ae_cmpp_v1) { +// CHECK-LABEL: test_ae_minabs64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.minabs64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_minabs64s(ae_cmpp_v0, ae_cmpp_v1); +} + +//--- ae_mov.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mov(ae_int64 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_mov +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mov(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mov(ae_to_dr_v0); +} + +//--- ae_movad16_0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad16_0(ae_int16x4 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad16_0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad16.0(<4 x i16> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad16_0(ae_dr_to_ar_v0); +} + +//--- ae_movad16_1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad16_1(ae_int16x4 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad16_1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad16.1(<4 x i16> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad16_1(ae_dr_to_ar_v0); +} + +//--- ae_movad16_2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad16_2(ae_int16x4 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad16_2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad16.2(<4 x i16> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad16_2(ae_dr_to_ar_v0); +} + +//--- ae_movad16_3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad16_3(ae_int16x4 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad16_3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad16.3(<4 x i16> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad16_3(ae_dr_to_ar_v0); +} + +//--- ae_movad32_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad32_h(ae_int32x2 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad32_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad32.h(<2 x i32> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad32_h(ae_dr_to_ar_v0); +} + +//--- ae_movad32_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_movad32_l(ae_int32x2 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_movad32_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.movad32.l(<2 x i32> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_movad32_l(ae_dr_to_ar_v0); +} + +//--- ae_movalign.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_valign test_ae_movalign(ae_valign ae_uu_v) { +// CHECK-LABEL: test_ae_movalign +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.movalign(<8 x i8> {{.*}}) +// CHECK: %[[CAST:.*]] = bitcast <8 x i8> %[[RET]] to i64 +// CHECK: ret i64 %[[CAST]] +return __builtin_xtensa_ae_movalign(ae_uu_v); +} + +//--- ae_movda16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_movda16(int ars) { +// CHECK-LABEL: test_ae_movda16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.movda16(i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_movda16(ars); +} + +//--- ae_movda16x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_movda16x2(int ars,int art) { +// CHECK-LABEL: test_ae_movda16x2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.movda16x2(i32 {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_movda16x2(ars, art); +} + +//--- ae_movda32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32 test_ae_movda32(int ars) { +// CHECK-LABEL: test_ae_movda32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i32> @llvm.xtensa.ae.movda32(i32 {{.*}}) +// CHECK: ret <1 x i32> %[[RET]] +return __builtin_xtensa_ae_movda32(ars); +} + +//--- ae_movda32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_movda32x2(int ars,int art) { +// CHECK-LABEL: test_ae_movda32x2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.movda32x2(i32 {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_movda32x2(ars, art); +} + +//--- ae_movf16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movf16x4(ae_int16x4* ae_cmov_v,ae_int16x4 ae_cmov_v0,xtbool4 bt4) { +// CHECK-LABEL: test_ae_movf16x4 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <4 x i16>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.movf16x4(<4 x i16> %[[LD_AE_CMOV_V]], <4 x i16> {{.*}}, <4 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movf16x4(ae_cmov_v, ae_cmov_v0, bt4); +} + +//--- ae_movf32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movf32x2(ae_int32x2* ae_cmov_v,ae_int32x2 ae_cmov_v0,xtbool2 bt2) { +// CHECK-LABEL: test_ae_movf32x2 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.movf32x2(<2 x i32> %[[LD_AE_CMOV_V]], <2 x i32> {{.*}}, <2 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movf32x2(ae_cmov_v, ae_cmov_v0, bt2); +} + +//--- ae_movf64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movf64(ae_int64* ae_cmov_v,ae_int64 ae_cmov_v0,xtbool bt) { +// CHECK-LABEL: test_ae_movf64 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.movf64(<1 x i64> %[[LD_AE_CMOV_V]], <1 x i64> {{.*}}, <1 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movf64(ae_cmov_v, ae_cmov_v0, bt); +} + +//--- ae_movi.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_movi(immediate movi_imm) { +// CHECK-LABEL: test_ae_movi +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.movi(i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_movi(-16); +} + +//--- ae_movt16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movt16x4(ae_int16x4* ae_cmov_v,ae_int16x4 ae_cmov_v0,xtbool4 bt4) { +// CHECK-LABEL: test_ae_movt16x4 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <4 x i16>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.movt16x4(<4 x i16> %[[LD_AE_CMOV_V]], <4 x i16> {{.*}}, <4 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movt16x4(ae_cmov_v, ae_cmov_v0, bt4); +} + +//--- ae_movt32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movt32x2(ae_int32x2* ae_cmov_v,ae_int32x2 ae_cmov_v0,xtbool2 bt2) { +// CHECK-LABEL: test_ae_movt32x2 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.movt32x2(<2 x i32> %[[LD_AE_CMOV_V]], <2 x i32> {{.*}}, <2 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movt32x2(ae_cmov_v, ae_cmov_v0, bt2); +} + +//--- ae_movt64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_movt64(ae_int64* ae_cmov_v,ae_int64 ae_cmov_v0,xtbool bt) { +// CHECK-LABEL: test_ae_movt64 +// CHECK: %[[LD_AE_CMOV_V:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.movt64(<1 x i64> %[[LD_AE_CMOV_V]], <1 x i64> {{.*}}, <1 x i1> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_movt64(ae_cmov_v, ae_cmov_v0, bt); +} + +//--- ae_mul16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mul16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_mul16x4 +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mul16x4(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mul16x4(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_mul32_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32_hh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32.hh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32_hh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32_lh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32_ll(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32u_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32u_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32u_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32u.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32u_ll(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_h0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_h0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_h1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_h1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_h2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_h2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_h3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_h3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_h3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.h3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_h3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_l1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_l1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mul32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mul32x16_l3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mul32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mul32x16_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mul32x16_l3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mul32x16.l3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mul32x16_l3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_mula16x4 +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mula16x4(<2 x i32> %[[LD_AE_MUL_Q1]], <2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mula16x4(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_mula32_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32u_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32u_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32u_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32u.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32u_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_h0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_h0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_h1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_h1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_h2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_h2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_h3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_h3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.h3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_h3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mula32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mula32x16_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mula32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mula32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mula32x16_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mula32x16.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mula32x16_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaad24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaad24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaad24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaad24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad32x16_h0_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h0_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h0_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h0.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h0_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaad32x16_h0_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h0_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h0_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h0.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h0_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaad32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad32x16_h2_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h2_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h2_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h2.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h2_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaad32x16_h2_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h2_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h2_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h2.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h2_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaad32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaad32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaad32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaad32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaad32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaad32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd16ss_11_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_11_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_11_00 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.11.00(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_11_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd16ss_11_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_11_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_11_00_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.11.00.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_11_00_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd16ss_13_02.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_13_02(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_13_02 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.13.02(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_13_02(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd16ss_13_02_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_13_02_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_13_02_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.13.02.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_13_02_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd16ss_33_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_33_22(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_33_22 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.33.22(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_33_22(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd16ss_33_22_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd16ss_33_22_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd16ss_33_22_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaafd16ss.33.22.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd16ss_33_22_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaafd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaafd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaafd24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaafd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd32x16_h0_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h0_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h0_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h0.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h0_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd32x16_h0_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h0_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h0_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h0.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h0_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd32x16_h2_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h2_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h2_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h2.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h2_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd32x16_h2_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h2_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h2_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h2.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h2_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaafd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaafd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaafd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaafd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaafd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaafd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulac24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulac24(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulac24 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulac24(<2 x i32> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulac24(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulac32x16_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulac32x16_h(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulac32x16_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X4_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulac32x16.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X4_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulac32x16_h(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulac32x16_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulac32x16_l(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulac32x16_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X4_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulac32x16.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X4_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulac32x16_l(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulaf16ss_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_00 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.00(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf16ss_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_00_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.00.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_00_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf16ss_10.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_10(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_10 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.10(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_10(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_11.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_11(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_11 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.11(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_11(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_20.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_20(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_20 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.20(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_20(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_21.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_21(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_21 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.21(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_21(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_22(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_22 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.22(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_22(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_30.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_30(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_30 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.30(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_30(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_31.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_31(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_31 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.31(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_31(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_32(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_32 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.32(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_32(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16ss_33.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16ss_33(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulaf16ss_33 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulaf16ss.33(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf16ss_33(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulaf16x4ss.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_mulaf16x4ss +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulaf16x4ss(<2 x i32> %[[LD_AE_MUL_Q1]], <2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulaf16x4ss(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_mulaf32r_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32r_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32r_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32r.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32r_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32r_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32r_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32r_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32r.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32r_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32r_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32r_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32r_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32r.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32r_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32r_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32r_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32r_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32r.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32r_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32s_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32s.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32s_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32s_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32s.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32s_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32s_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32s.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32s_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32s_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32s_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32s.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32s_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_h3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.h3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_h3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf32x16_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf32x16.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf32x16_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf48q32sp16s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf48q32sp16s_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf48q32sp16s_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16s.l(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf48q32sp16s_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf48q32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf48q32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf48q32sp16s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf48q32sp16s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaf48q32sp16u_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf48q32sp16u_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulaf48q32sp16u_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16u.l(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf48q32sp16u_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaf48q32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaf48q32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaf48q32sp16u_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaf48q32sp16u.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaf48q32sp16u_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafc24ra.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafc24ra(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int32x2 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulafc24ra +// CHECK: %[[LD_OPND_AE_SEM_MUL_X4_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafc24ra(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X4_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafc24ra(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulafc32x16ras_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafc32x16ras_h(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulafc32x16ras_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X4_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafc32x16ras.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X4_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafc32x16ras_h(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulafc32x16ras_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafc32x16ras_l(ae_int32x2* opnd_ae_sem_mul_x4_q0,ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulafc32x16ras_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X4_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafc32x16ras.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X4_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafc32x16ras_l(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulafd24x2_fir_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd24x2_fir_h(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd24x2_fir_h +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd24x2.fir.h(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd24x2_fir_h(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafd24x2_fir_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd24x2_fir_l(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd24x2_fir_l +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd24x2.fir.l(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd24x2_fir_l(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafd32x16x2_fir_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd32x16x2_fir_hh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd32x16x2_fir_hh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.hh(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd32x16x2_fir_hh(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafd32x16x2_fir_hl.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd32x16x2_fir_hl(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd32x16x2_fir_hl +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.hl(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd32x16x2_fir_hl(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafd32x16x2_fir_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd32x16x2_fir_lh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd32x16x2_fir_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd32x16x2_fir_lh(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafd32x16x2_fir_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafd32x16x2_fir_ll(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulafd32x16x2_fir_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulafd32x16x2.fir.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <1 x i64> %[[LD_AE_MUL_Q1]], <2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulafd32x16x2_fir_ll(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulafp24x2r.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp24x2r(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp24x2r +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp24x2r(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp24x2r(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp24x2r_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp24x2r_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp24x2r_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp24x2r.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp24x2r_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp24x2ra.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp24x2ra(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp24x2ra +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp24x2ra(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp24x2ra(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp24x2ra_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp24x2ra_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp24x2ra_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp24x2ra.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp24x2ra_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp32x16x2ras_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2ras_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2ras_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2ras_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp32x16x2ras_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2ras_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2ras_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.h.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2ras_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp32x16x2ras_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2ras_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2ras_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2ras_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp32x16x2ras_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2ras_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2ras_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2ras.l.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2ras_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp32x16x2rs_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2rs_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2rs_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2rs_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp32x16x2rs_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2rs_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2rs_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.h.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2rs_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp32x16x2rs_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2rs_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2rs_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2rs_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp32x16x2rs_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x16x2rs_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafp32x16x2rs_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x16x2rs.l.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x16x2rs_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafp32x2ras.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x2ras(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x2ras +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x2ras(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x2ras(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafp32x2rs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafp32x2rs(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulafp32x2rs +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulafp32x2rs(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafp32x2rs(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulafq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafq32sp24s_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulafq32sp24s.h.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafq32sp24s_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulafq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulafq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulafq32sp24s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulafq32sp24s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulafq32sp24s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulap24x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulap24x2(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulap24x2 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulap24x2(<2 x i32> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulap24x2(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulap24x2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulap24x2_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulap24x2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulap24x2.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulap24x2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulap32x16x2_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulap32x16x2_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulap32x16x2_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulap32x16x2.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulap32x16x2_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulap32x16x2_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulap32x16x2_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulap32x16x2_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulap32x16x2.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulap32x16x2_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulap32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulap32x2(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulap32x2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulap32x2(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulap32x2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulaq32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaq32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaq32sp16s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaq32sp16s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaq32sp16s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulaq32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulaq32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulaq32sp16u_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulaq32sp16u.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulaq32sp16u_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mularfq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mularfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mularfq32sp24s_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mularfq32sp24s.h.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mularfq32sp24s_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mularfq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mularfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mularfq32sp24s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mularfq32sp24s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mularfq32sp24s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulas32f48p16s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulas32f48p16s_hh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_hh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_hh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.hh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_hh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulas32f48p16s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulas32f48p16s_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulas32f48p16s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulas32f48p16s_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulas32f48p16s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulas32f48p16s_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulas32f48p16s.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulas32f48p16s_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulasd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulasd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulasd24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulasd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasd24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulasd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulasd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulasd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulasd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasfd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulasfd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulasfd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasfd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasfd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulasfd24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulasfd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasfd24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasfd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulasfd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulasfd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasfd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulasfd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulasfd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulasfd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulasfd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulasfd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulasfd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulasfd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulc24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulc24(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulc24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulc24(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulc24(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulc32x16_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulc32x16_h(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulc32x16_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulc32x16.h(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulc32x16_h(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulc32x16_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulc32x16_l(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulc32x16_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulc32x16.l(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulc32x16_l(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulf16ss_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf16ss_00 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.00(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_00(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf16ss_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf16ss_00_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.00.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_00_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf16ss_10.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_10(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_10 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.10(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_10(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_11.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_11(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_11 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.11(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_11(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_20.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_20(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_20 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.20(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_20(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_21.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_21(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_21 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.21(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_21(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_22(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_22 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.22(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_22(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_30.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_30(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_30 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.30(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_30(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_31.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_31(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_31 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.31(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_31(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_32(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.32(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_32(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16ss_33.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulf16ss_33(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulf16ss_33 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulf16ss.33(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulf16ss_33(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulf16x4ss.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_mulf16x4ss +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulf16x4ss(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulf16x4ss(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_mulf32r_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32r_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32r_hh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32r.hh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32r_hh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32r_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32r_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32r_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32r.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32r_lh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32r_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32r_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32r_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32r.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32r_ll(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32r_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32r_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32r_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32r.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32r_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32s_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32s_hh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32s.hh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32s_hh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32s_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32s_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32s.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32s_lh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32s_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32s_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32s.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32s_ll(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32s_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32s_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32s_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32s.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32s_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_h3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_h3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.h3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_h3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf32x16_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf32x16_l3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf32x16.l3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf32x16_l3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf48q32sp16s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf48q32sp16s_l(ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf48q32sp16s_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16s.l(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf48q32sp16s_l(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf48q32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf48q32sp16s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf48q32sp16s_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16s.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf48q32sp16s_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulf48q32sp16u_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf48q32sp16u_l(ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulf48q32sp16u_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16u.l(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf48q32sp16u_l(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulf48q32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulf48q32sp16u_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulf48q32sp16u_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulf48q32sp16u.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulf48q32sp16u_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfc24ra.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfc24ra(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int32x2 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulfc24ra +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfc24ra(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfc24ra(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulfc32x16ras_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfc32x16ras_h(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulfc32x16ras_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfc32x16ras.h(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfc32x16ras_h(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulfc32x16ras_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfc32x16ras_l(ae_int32x2 opnd_ae_sem_mul_x4_d0,ae_int16x4 opnd_ae_sem_mul_x4_d1) { +// CHECK-LABEL: test_ae_mulfc32x16ras_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfc32x16ras.l(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfc32x16ras_l(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1); +} + +//--- ae_mulfd24x2_fir_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd24x2_fir_h(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd24x2_fir_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd24x2.fir.h(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd24x2_fir_h(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfd24x2_fir_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd24x2_fir_l(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int32x2 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd24x2_fir_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd24x2.fir.l(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd24x2_fir_l(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfd32x16x2_fir_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd32x16x2_fir_hh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd32x16x2_fir_hh +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.hh(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd32x16x2_fir_hh(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfd32x16x2_fir_hl.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd32x16x2_fir_hl(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd32x16x2_fir_hl +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.hl(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd32x16x2_fir_hl(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfd32x16x2_fir_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd32x16x2_fir_lh(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd32x16x2_fir_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd32x16x2_fir_lh(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfd32x16x2_fir_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulfd32x16x2_fir_ll(ae_int64* ae_mul_q0,ae_int64* ae_mul_q1,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1,ae_int16x4 ae_mul_d2) { +// CHECK-LABEL: test_ae_mulfd32x16x2_fir_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i64>, <1 x i64> } @llvm.xtensa.ae.mulfd32x16x2.fir.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i64>, <1 x i64> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulfd32x16x2_fir_ll(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, ae_mul_d2); +} + +//--- ae_mulfp16x4ras.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_mulfp16x4ras(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulfp16x4ras +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.mulfp16x4ras(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_mulfp16x4ras(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulfp16x4s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_mulfp16x4s(ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulfp16x4s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.mulfp16x4s(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_mulfp16x4s(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulfp24x2r.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp24x2r(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp24x2r +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp24x2r(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp24x2r(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp24x2r_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp24x2r_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp24x2r_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp24x2r.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp24x2r_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp24x2ra.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp24x2ra(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp24x2ra +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp24x2ra(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp24x2ra(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp24x2ra_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp24x2ra_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp24x2ra_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp24x2ra.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp24x2ra_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp32x16x2ras_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2ras_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2ras_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.h(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2ras_h(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp32x16x2ras_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2ras_h_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2ras_h_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.h.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2ras_h_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp32x16x2ras_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2ras_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2ras_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.l(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2ras_l(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp32x16x2ras_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2ras_l_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2ras_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2ras.l.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2ras_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp32x16x2rs_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2rs_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2rs_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.h(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2rs_h(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp32x16x2rs_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2rs_h_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2rs_h_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.h.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2rs_h_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp32x16x2rs_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2rs_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2rs_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.l(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2rs_l(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp32x16x2rs_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x16x2rs_l_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfp32x16x2rs_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x16x2rs.l.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x16x2rs_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfp32x2ras.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x2ras(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x2ras +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x2ras(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x2ras(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfp32x2rs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulfp32x2rs(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulfp32x2rs +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulfp32x2rs(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulfp32x2rs(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulfq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulfq32sp24s_h_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfq32sp24s_h_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulfq32sp24s.h.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulfq32sp24s_h_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulfq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulfq32sp24s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulfq32sp24s_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulfq32sp24s.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulfq32sp24s_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulp24x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulp24x2(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulp24x2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulp24x2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulp24x2(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulp24x2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulp24x2_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulp24x2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulp24x2.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulp24x2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulp32x16x2_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulp32x16x2_h(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulp32x16x2_h +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulp32x16x2.h(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulp32x16x2_h(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulp32x16x2_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulp32x16x2_l(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulp32x16x2_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulp32x16x2.l(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulp32x16x2_l(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulp32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulp32x2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulp32x2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulp32x2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulp32x2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulq32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulq32sp16s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulq32sp16s_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulq32sp16s.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulq32sp16s_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulq32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulq32sp16u_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulq32sp16u_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulq32sp16u.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulq32sp16u_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulrfq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulrfq32sp24s_h_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulrfq32sp24s_h_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulrfq32sp24s.h.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulrfq32sp24s_h_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulrfq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulrfq32sp24s_l_s2(ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulrfq32sp24s_l_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulrfq32sp24s.l.s2(<1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulrfq32sp24s_l_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls16x4(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_muls16x4 +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.muls16x4(<2 x i32> %[[LD_AE_MUL_Q1]], <2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_muls16x4(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_muls32_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32f48p16s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_hh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_hh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.hh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_hh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32f48p16s_hh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_hh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_hh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.hh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_hh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32f48p16s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_lh(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_lh(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32f48p16s_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32f48p16s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_ll(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_ll(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32f48p16s_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_muls32f48p16s_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32f48p16s_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32f48p16s.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_muls32f48p16s_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32u_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32u_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32u_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32u.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32u_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_h0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_h0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_h1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_h1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_h2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_h2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_h3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_h3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.h3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_h3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_muls32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_muls32x16_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_muls32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_muls32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_muls32x16_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.muls32x16.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_muls32x16_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsad24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsad24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsad24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsad24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsad32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad32x16_h1_l0(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsad32x16_h1_l0 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h1.l0(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad32x16_h1_l0(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsad32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsad32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsad32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad32x16_h3_l2(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsad32x16_h3_l2 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h3.l2(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad32x16_h3_l2(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsad32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsad32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsad32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsad32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsad32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsafd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsafd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsafd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsafd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsafd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsafd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsafd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsafd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsafd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsafd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsafd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsafd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsafd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsafd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsafd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf16ss_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_00 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.00(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf16ss_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_00_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.00.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_00_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf16ss_10.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_10(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_10 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.10(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_10(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_11.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_11(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_11 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.11(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_11(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_20.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_20(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_20 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.20(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_20(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_21.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_21(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_21 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.21(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_21(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_22(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_22 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.22(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_22(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_30.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_30(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_30 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.30(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_30(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_31.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_31(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_31 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.31(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_31(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_32(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_32 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.32(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_32(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16ss_33.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16ss_33(ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsf16ss_33 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsf16ss.33(<2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf16ss_33(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsf16x4ss.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf16x4ss(ae_int32x2* ae_mul_q1,ae_int32x2* ae_mul_q0,ae_int16x4 ae_mul_d1,ae_int16x4 ae_mul_d0) { +// CHECK-LABEL: test_ae_mulsf16x4ss +// CHECK: %[[LD_AE_MUL_Q1:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <2 x i32>, <2 x i32> } @llvm.xtensa.ae.mulsf16x4ss(<2 x i32> %[[LD_AE_MUL_Q1]], <2 x i32> %[[LD_AE_MUL_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <2 x i32>, <2 x i32> } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_mulsf16x4ss(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0); +} + +//--- ae_mulsf32r_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32r_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32r_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32r.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32r_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32r_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32r_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32r_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32r.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32r_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32r_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32r_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32r_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32r.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32r_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32r_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32r_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32r_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32r.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32r_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32s_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32s.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32s_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32s_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32s.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32s_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32s_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32s.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32s_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_h0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_h0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_h1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_h1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_h2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_h2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_h3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_h3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_h3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_h3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.h3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_h3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l1(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l1 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l1(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l1_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l1_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l1.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l1_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf32x16_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l3(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l3 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l3(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf32x16_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf32x16_l3_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf32x16_l3_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf32x16.l3.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf32x16_l3_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf48q32sp16s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf48q32sp16s_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf48q32sp16s_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16s.l(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf48q32sp16s_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf48q32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf48q32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf48q32sp16s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf48q32sp16s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsf48q32sp16u_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf48q32sp16u_l(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int64 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsf48q32sp16u_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16u.l(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf48q32sp16u_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsf48q32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsf48q32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsf48q32sp16u_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsf48q32sp16u.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsf48q32sp16u_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp24x2r.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp24x2r(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp24x2r +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp24x2r(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp24x2r(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp24x2r_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp24x2r_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp24x2r_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp24x2r.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp24x2r_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp24x2ra.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp24x2ra(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp24x2ra +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp24x2ra(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp24x2ra(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp24x2ra_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp24x2ra_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp24x2ra_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp24x2ra.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp24x2ra_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp32x16x2ras_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2ras_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2ras_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2ras_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp32x16x2ras_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2ras_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2ras_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.h.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2ras_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp32x16x2ras_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2ras_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2ras_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2ras_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp32x16x2ras_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2ras_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2ras_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2ras.l.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2ras_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp32x16x2rs_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2rs_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2rs_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2rs_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp32x16x2rs_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2rs_h_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2rs_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.h.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2rs_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp32x16x2rs_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2rs_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2rs_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2rs_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp32x16x2rs_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x16x2rs_l_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfp32x16x2rs_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x16x2rs.l.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x16x2rs_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfp32x2ras.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x2ras(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x2ras +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x2ras(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x2ras(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfp32x2rs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfp32x2rs(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsfp32x2rs +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsfp32x2rs(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfp32x2rs(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsfq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfq32sp24s_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsfq32sp24s.h.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfq32sp24s_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsfq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsfq32sp24s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsfq32sp24s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsfq32sp24s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsp24x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsp24x2(ae_int32x2* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulsp24x2 +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsp24x2(<2 x i32> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsp24x2(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulsp24x2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsp24x2_s2(ae_int32x2* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsp24x2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsp24x2.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsp24x2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsp32x16x2_h.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsp32x16x2_h(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsp32x16x2_h +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsp32x16x2.h(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsp32x16x2_h(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsp32x16x2_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsp32x16x2_l(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsp32x16x2_l +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsp32x16x2.l(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsp32x16x2_l(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsp32x2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsp32x2(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulsp32x2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulsp32x2(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsp32x2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulsq32sp16s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsq32sp16s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsq32sp16s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsq32sp16s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsq32sp16s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsq32sp16u_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsq32sp16u_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsq32sp16u_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsq32sp16u.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsq32sp16u_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsrfq32sp24s_h_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsrfq32sp24s_h_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsrfq32sp24s_h_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsrfq32sp24s.h.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsrfq32sp24s_h_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulsrfq32sp24s_l_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulsrfq32sp24s_l_s2(ae_int64* ae_mul_S2_q0,ae_int64 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulsrfq32sp24s_l_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulsrfq32sp24s.l.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <1 x i64> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulsrfq32sp24s_l_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulss32f48p16s_hh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_hh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_hh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.hh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_hh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulss32f48p16s_hh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_hh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_hh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.hh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_hh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulss32f48p16s_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_lh(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_lh +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.lh(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_lh(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulss32f48p16s_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulss32f48p16s_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_ll(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_ll +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.ll(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_ll(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulss32f48p16s_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulss32f48p16s_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulss32f48p16s_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulss32f48p16s.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulss32f48p16s_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulssd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulssd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulssd24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulssd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssd24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd16ss_11_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_11_00(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_11_00 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.11.00(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_11_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssfd16ss_11_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_11_00_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_11_00_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.11.00.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_11_00_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd16ss_13_02.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_13_02(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_13_02 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.13.02(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_13_02(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssfd16ss_13_02_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_13_02_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_13_02_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.13.02.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_13_02_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd16ss_33_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_33_22(ae_int32x2* opnd_ae_sem_mul_x2_S1_q0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_33_22 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.33.22(<2 x i32> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_33_22(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssfd16ss_33_22_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd16ss_33_22_s2(ae_int32x2* ae_mul_S2_q0,ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd16ss_33_22_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulssfd16ss.33.22.s2(<2 x i32> %[[LD_AE_MUL_S2_Q0]], <4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd16ss_33_22_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd24_hh_ll(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulssfd24_hh_ll +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd24.hh.ll(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd24_hh_ll(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulssfd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd24_hh_ll_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd24_hh_ll_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd24.hh.ll.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd24_hh_ll_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd24_hl_lh(ae_int64* ae_mul_q0,ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulssfd24_hl_lh +// CHECK: %[[LD_AE_MUL_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd24.hl.lh(<1 x i64> %[[LD_AE_MUL_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd24_hl_lh(ae_mul_q0, ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulssfd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd24_hl_lh_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd24_hl_lh_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd24.hl.lh.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd24_hl_lh_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd32x16_h1_l0(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssfd32x16_h1_l0 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h1.l0(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssfd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd32x16_h1_l0_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd32x16_h1_l0_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h1.l0.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd32x16_h1_l0_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulssfd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd32x16_h3_l2(ae_int64* opnd_ae_sem_mul_x2_S1_q0,ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulssfd32x16_h3_l2 +// CHECK: %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h3.l2(<1 x i64> %[[LD_OPND_AE_SEM_MUL_X2_S1_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulssfd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_mulssfd32x16_h3_l2_s2(ae_int64* ae_mul_S2_q0,ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulssfd32x16_h3_l2_s2 +// CHECK: %[[LD_AE_MUL_S2_Q0:.*]] = load <1 x i64>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulssfd32x16.h3.l2.s2(<1 x i64> %[[LD_AE_MUL_S2_Q0]], <2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_mulssfd32x16_h3_l2_s2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzaad24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzaad24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzaad24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzaad24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad32x16_h0_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h0_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h0_l1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h0.l1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h0_l1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaad32x16_h0_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h0_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h0_l1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h0.l1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h0_l1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaad32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad32x16_h2_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h2_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h2_l3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h2.l3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h2_l3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaad32x16_h2_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h2_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h2_l3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h2.l3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h2_l3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaad32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaad32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaad32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaad32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaad32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaad32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd16ss_11_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_11_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_11_00 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.11.00(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_11_00(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd16ss_11_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_11_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_11_00_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.11.00.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_11_00_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd16ss_13_02.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_13_02(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_13_02 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.13.02(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_13_02(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd16ss_13_02_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_13_02_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_13_02_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.13.02.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_13_02_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd16ss_33_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_33_22(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_33_22 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.33.22(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_33_22(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd16ss_33_22_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzaafd16ss_33_22_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd16ss_33_22_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzaafd16ss.33.22.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzaafd16ss_33_22_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzaafd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzaafd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzaafd24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzaafd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd32x16_h0_l1.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h0_l1(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h0_l1 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h0.l1(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h0_l1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd32x16_h0_l1_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h0_l1_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h0_l1_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h0.l1.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd32x16_h2_l3.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h2_l3(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h2_l3 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h2.l3(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h2_l3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd32x16_h2_l3_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h2_l3_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h2_l3_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h2.l3.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzaafd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzaafd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzaafd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzaafd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzaafd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzasd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzasd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzasd24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzasd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasd24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzasd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzasd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzasd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzasd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasfd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzasfd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzasfd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasfd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasfd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzasfd24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzasfd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasfd24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasfd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzasfd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzasfd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasfd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzasfd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzasfd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzasfd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzasfd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzasfd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzasfd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsad24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzsad24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzsad24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsad24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsad32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad32x16_h1_l0(ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzsad32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad32x16_h1_l0(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzsad32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsad32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsad32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad32x16_h3_l2(ae_int32x2 ae_mul_d0,ae_int16x4 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzsad32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad32x16_h3_l2(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzsad32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsad32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsad32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsad32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsad32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsafd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzsafd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzsafd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsafd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsafd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzsafd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzsafd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsafd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzsafd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzsafd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzsafd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzsafd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzsafd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzsafd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzssd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzssd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzssd24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzssd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssd24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd16ss_11_00.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_11_00(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_11_00 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.11.00(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_11_00(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssfd16ss_11_00_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_11_00_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_11_00_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.11.00.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_11_00_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd16ss_13_02.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_13_02(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_13_02 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.13.02(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_13_02(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssfd16ss_13_02_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_13_02_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_13_02_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.13.02.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_13_02_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd16ss_33_22.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_33_22(ae_int16x4 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_33_22 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.33.22(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_33_22(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssfd16ss_33_22_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_mulzssfd16ss_33_22_s2(ae_int16x4 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd16ss_33_22_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.mulzssfd16ss.33.22.s2(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_mulzssfd16ss_33_22_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd24_hh_ll.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd24_hh_ll(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzssfd24_hh_ll +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hh.ll(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd24_hh_ll(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzssfd24_hh_ll_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd24_hh_ll_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd24_hh_ll_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hh.ll.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd24_hh_ll_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd24_hl_lh.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd24_hl_lh(ae_int32x2 ae_mul_d0,ae_int32x2 ae_mul_d1) { +// CHECK-LABEL: test_ae_mulzssfd24_hl_lh +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hl.lh(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd24_hl_lh(ae_mul_d0, ae_mul_d1); +} + +//--- ae_mulzssfd24_hl_lh_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd24_hl_lh_s2(ae_int32x2 ae_mul_S2_d0,ae_int32x2 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd24_hl_lh_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd24.hl.lh.s2(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd24_hl_lh_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd32x16_h1_l0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd32x16_h1_l0(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssfd32x16_h1_l0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h1.l0(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd32x16_h1_l0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssfd32x16_h1_l0_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd32x16_h1_l0_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd32x16_h1_l0_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h1.l0.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_mulzssfd32x16_h3_l2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd32x16_h3_l2(ae_int32x2 opnd_ae_sem_mul_x2_S1_d0,ae_int16x4 opnd_ae_sem_mul_x2_S1_d1) { +// CHECK-LABEL: test_ae_mulzssfd32x16_h3_l2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h3.l2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd32x16_h3_l2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1); +} + +//--- ae_mulzssfd32x16_h3_l2_s2.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_mulzssfd32x16_h3_l2_s2(ae_int32x2 ae_mul_S2_d0,ae_int16x4 ae_mul_S2_d1) { +// CHECK-LABEL: test_ae_mulzssfd32x16_h3_l2_s2 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.mulzssfd32x16.h3.l2.s2(<2 x i32> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2(ae_mul_S2_d0, ae_mul_S2_d1); +} + +//--- ae_nand.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_nand(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) { +// CHECK-LABEL: test_ae_nand +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.nand(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_nand(ae_dr_to_dr_v0, ae_dr_to_dr_v1); +} + +//--- ae_neg16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_neg16s(ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.neg16s(<4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_neg16s(ae_arth_v1); +} + +//--- ae_neg24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_neg24s(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.neg24s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_neg24s(ae_arth_v1); +} + +//--- ae_neg32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_neg32(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.neg32(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_neg32(ae_arth_v1); +} + +//--- ae_neg32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_neg32s(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.neg32s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_neg32s(ae_arth_v1); +} + +//--- ae_neg64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_neg64(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.neg64(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_neg64(ae_arth_v1); +} + +//--- ae_neg64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_neg64s(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_neg64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.neg64s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_neg64s(ae_arth_v1); +} + +//--- ae_nsa64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_nsa64(ae_int64 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_nsa64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.nsa64(<1 x i64> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_nsa64(ae_dr_to_ar_v0); +} + +//--- ae_nsaz16_0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_nsaz16_0(ae_int16x4 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_nsaz16_0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.nsaz16.0(<4 x i16> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_nsaz16_0(ae_dr_to_ar_v0); +} + +//--- ae_nsaz32_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_nsaz32_l(ae_int32x2 ae_dr_to_ar_v0) { +// CHECK-LABEL: test_ae_nsaz32_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.nsaz32.l(<2 x i32> {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_nsaz32_l(ae_dr_to_ar_v0); +} + +//--- ae_or.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_or(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) { +// CHECK-LABEL: test_ae_or +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.or(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_or(ae_dr_to_dr_v0, ae_dr_to_dr_v1); +} + +//--- ae_pksr24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_pksr24(ae_int32x2* ae_pks_d,ae_int64 ae_pks_s,immediate ae_imm2) { +// CHECK-LABEL: test_ae_pksr24 +// CHECK: %[[LD_AE_PKS_D:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.pksr24(<2 x i32> %[[LD_AE_PKS_D]], <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_pksr24(ae_pks_d, ae_pks_s, 0); +} + +//--- ae_pksr32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_pksr32(ae_int32x2* ae_pks_d,ae_int64 ae_pks_s,immediate ae_imm2) { +// CHECK-LABEL: test_ae_pksr32 +// CHECK: %[[LD_AE_PKS_D:.*]] = load <2 x i32>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.pksr32(<2 x i32> %[[LD_AE_PKS_D]], <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_pksr32(ae_pks_d, ae_pks_s, 0); +} + +//--- ae_round16x4f32sasym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_round16x4f32sasym(ae_int32x2 ae_arth_v1,ae_int32x2 ae_arth_v0) { +// CHECK-LABEL: test_ae_round16x4f32sasym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.round16x4f32sasym(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_round16x4f32sasym(ae_arth_v1, ae_arth_v0); +} + +//--- ae_round16x4f32ssym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_round16x4f32ssym(ae_int32x2 ae_arth_v1,ae_int32x2 ae_arth_v0) { +// CHECK-LABEL: test_ae_round16x4f32ssym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.round16x4f32ssym(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_round16x4f32ssym(ae_arth_v1, ae_arth_v0); +} + +//--- ae_round24x2f48sasym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round24x2f48sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round24x2f48sasym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round24x2f48sasym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round24x2f48sasym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_round24x2f48ssym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round24x2f48ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round24x2f48ssym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round24x2f48ssym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round24x2f48ssym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_round32x2f48sasym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round32x2f48sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round32x2f48sasym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round32x2f48sasym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round32x2f48sasym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_round32x2f48ssym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round32x2f48ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round32x2f48ssym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round32x2f48ssym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round32x2f48ssym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_round32x2f64sasym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round32x2f64sasym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round32x2f64sasym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round32x2f64sasym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round32x2f64sasym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_round32x2f64ssym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_round32x2f64ssym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_round32x2f64ssym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.round32x2f64ssym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_round32x2f64ssym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_roundsp16f24asym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_roundsp16f24asym(ae_int32x2 ae_arth_v0) { +// CHECK-LABEL: test_ae_roundsp16f24asym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.roundsp16f24asym(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_roundsp16f24asym(ae_arth_v0); +} + +//--- ae_roundsp16f24sym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_roundsp16f24sym(ae_int32x2 ae_arth_v0) { +// CHECK-LABEL: test_ae_roundsp16f24sym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.roundsp16f24sym(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_roundsp16f24sym(ae_arth_v0); +} + +//--- ae_roundsp16q48x2asym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_roundsp16q48x2asym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_roundsp16q48x2asym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.roundsp16q48x2asym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_roundsp16q48x2asym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_roundsp16q48x2sym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_roundsp16q48x2sym(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_roundsp16q48x2sym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.roundsp16q48x2sym(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_roundsp16q48x2sym(ae_arth_v0, ae_arth_v1); +} + +//--- ae_roundsq32f48asym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_roundsq32f48asym(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_roundsq32f48asym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.roundsq32f48asym(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_roundsq32f48asym(ae_arth_v1); +} + +//--- ae_roundsq32f48sym.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_roundsq32f48sym(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_roundsq32f48sym +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.roundsq32f48sym(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_roundsq32f48sym(ae_arth_v1); +} + +//--- ae_s16_0_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16_0_i(ae_int16x4 ae_ls_v,ae_int16* ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_s16_0_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16.0.i(<4 x i16> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16_0_i(ae_ls_v, ars, -16); +} + +//--- ae_s16_0_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16_0_ip(ae_int16x4 ae_ls_v,ae_int16** ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_s16_0_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16.0.ip(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16_0_ip(ae_ls_v, ars, -16); +} + +//--- ae_s16_0_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16_0_x(ae_int16x4 ae_ls_v,ae_int16* ars,int art) { +// CHECK-LABEL: test_ae_s16_0_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16.0.x(<4 x i16> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16_0_x(ae_ls_v, ars, art); +} + +//--- ae_s16_0_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16_0_xc(ae_int16x4 ae_ls_v,ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_s16_0_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16.0.xc(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16_0_xc(ae_ls_v, ars, art); +} + +//--- ae_s16_0_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16_0_xp(ae_int16x4 ae_ls_v,ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_s16_0_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16.0.xp(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16_0_xp(ae_ls_v, ars, art); +} + +//--- ae_s16m_l_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16m_l_i(ae_int32x2 ae_ls_v,ae_int16* ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_s16m_l_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16m.l.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16m_l_i(ae_ls_v, ars, -16); +} + +//--- ae_s16m_l_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16m_l_iu(ae_int32x2 ae_ls_v,ae_int16** ars,immediate ae_immls16) { +// CHECK-LABEL: test_ae_s16m_l_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16m.l.iu(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16m_l_iu(ae_ls_v, ars, -16); +} + +//--- ae_s16m_l_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16m_l_x(ae_int32x2 ae_ls_v,ae_int16* ars,int art) { +// CHECK-LABEL: test_ae_s16m_l_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16m.l.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16m_l_x(ae_ls_v, ars, art); +} + +//--- ae_s16m_l_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16m_l_xc(ae_int32x2 ae_ls_v,ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_s16m_l_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16m.l.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16m_l_xc(ae_ls_v, ars, art); +} + +//--- ae_s16m_l_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16m_l_xu(ae_int32x2 ae_ls_v,ae_int16** ars,int art) { +// CHECK-LABEL: test_ae_s16m_l_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16m.l.xu(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16m_l_xu(ae_ls_v, ars, art); +} + +//--- ae_s16x2m_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x2m_i(ae_int32x2 ae_ls_v,ae_int16x2* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s16x2m_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16x2m.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x2m_i(ae_ls_v, ars, -32); +} + +//--- ae_s16x2m_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x2m_iu(ae_int32x2 ae_ls_v,ae_int16x2** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s16x2m_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x2m.iu(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x2m_iu(ae_ls_v, ars, -32); +} + +//--- ae_s16x2m_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x2m_x(ae_int32x2 ae_ls_v,ae_int16x2* ars,int art) { +// CHECK-LABEL: test_ae_s16x2m_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16x2m.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x2m_x(ae_ls_v, ars, art); +} + +//--- ae_s16x2m_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x2m_xc(ae_int32x2 ae_ls_v,ae_int16x2** ars,int art) { +// CHECK-LABEL: test_ae_s16x2m_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x2m.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x2m_xc(ae_ls_v, ars, art); +} + +//--- ae_s16x2m_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x2m_xu(ae_int32x2 ae_ls_v,ae_int16x2** ars,int art) { +// CHECK-LABEL: test_ae_s16x2m_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x2m.xu(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x2m_xu(ae_ls_v, ars, art); +} + +//--- ae_s16x4_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_i(ae_int16x4 ae_ls_v,ae_int16x4* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_s16x4_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16x4.i(<4 x i16> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_i(ae_ls_v, ars, -64); +} + +//--- ae_s16x4_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_ip(ae_int16x4 ae_ls_v,ae_int16x4** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_s16x4_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x4.ip(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_ip(ae_ls_v, ars, 0); +} + +//--- ae_s16x4_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_ric(ae_int16x4 ae_ls_v,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_s16x4_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x4.ric(<4 x i16> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_ric(ae_ls_v, ars); +} + +//--- ae_s16x4_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_rip(ae_int16x4 ae_ls_v,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_s16x4_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x4.rip(<4 x i16> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_rip(ae_ls_v, ars); +} + +//--- ae_s16x4_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_x(ae_int16x4 ae_ls_v,ae_int16x4* ars,int art) { +// CHECK-LABEL: test_ae_s16x4_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s16x4.x(<4 x i16> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_x(ae_ls_v, ars, art); +} + +//--- ae_s16x4_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_xc(ae_int16x4 ae_ls_v,ae_int16x4** ars,int art) { +// CHECK-LABEL: test_ae_s16x4_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x4.xc(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_xc(ae_ls_v, ars, art); +} + +//--- ae_s16x4_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s16x4_xp(ae_int16x4 ae_ls_v,ae_int16x4** ars,int art) { +// CHECK-LABEL: test_ae_s16x4_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s16x4.xp(<4 x i16> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s16x4_xp(ae_ls_v, ars, art); +} + +//--- ae_s24ra64s_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24ra64s_i(ae_int64 ae_ls_v1,ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s24ra64s_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s24ra64s.i(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s24ra64s_i(ae_ls_v1, ars, -32); +} + +//--- ae_s24ra64s_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24ra64s_ip(ae_int64 ae_ls_v1,ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s24ra64s_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s24ra64s.ip(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s24ra64s_ip(ae_ls_v1, ars, -32); +} + +//--- ae_s24ra64s_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24ra64s_x(ae_int64 ae_ls_v1,ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_s24ra64s_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s24ra64s.x(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s24ra64s_x(ae_ls_v1, ars, art); +} + +//--- ae_s24ra64s_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24ra64s_xc(ae_int64 ae_ls_v1,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s24ra64s_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s24ra64s.xc(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s24ra64s_xc(ae_ls_v1, ars, art); +} + +//--- ae_s24ra64s_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24ra64s_xp(ae_int64 ae_ls_v1,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s24ra64s_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s24ra64s.xp(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s24ra64s_xp(ae_ls_v1, ars, art); +} + +//--- ae_s24x2ra64s_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s24x2ra64s_ip(ae_int64 ae_ls_v2,ae_int64 ae_ls_v1,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s24x2ra64s_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s24x2ra64s.ip(<1 x i64> {{.*}}, <1 x i64> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s24x2ra64s_ip(ae_ls_v2, ae_ls_v1, ars); +} + +//--- ae_s32_l_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32_l_i(ae_int32x2 ae_ls_v,ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32_l_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32.l.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32_l_i(ae_ls_v, ars, -32); +} + +//--- ae_s32_l_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32_l_ip(ae_int32x2 ae_ls_v,ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32_l_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32.l.ip(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32_l_ip(ae_ls_v, ars, -32); +} + +//--- ae_s32_l_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32_l_x(ae_int32x2 ae_ls_v,ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_s32_l_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32.l.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32_l_x(ae_ls_v, ars, art); +} + +//--- ae_s32_l_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32_l_xc(ae_int32x2 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32_l_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32.l.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32_l_xc(ae_ls_v, ars, art); +} + +//--- ae_s32_l_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32_l_xp(ae_int32x2 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32_l_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32.l.xp(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32_l_xp(ae_ls_v, ars, art); +} + +//--- ae_s32f24_l_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32f24_l_i(ae_int32x2 ae_ls_v,ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32f24_l_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32f24.l.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32f24_l_i(ae_ls_v, ars, -32); +} + +//--- ae_s32f24_l_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32f24_l_ip(ae_int32x2 ae_ls_v,ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32f24_l_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32f24.l.ip(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32f24_l_ip(ae_ls_v, ars, -32); +} + +//--- ae_s32f24_l_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32f24_l_x(ae_int32x2 ae_ls_v,ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_s32f24_l_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32f24.l.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32f24_l_x(ae_ls_v, ars, art); +} + +//--- ae_s32f24_l_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32f24_l_xc(ae_int32x2 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32f24_l_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32f24.l.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32f24_l_xc(ae_ls_v, ars, art); +} + +//--- ae_s32f24_l_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32f24_l_xp(ae_int32x2 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32f24_l_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32f24.l.xp(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32f24_l_xp(ae_ls_v, ars, art); +} + +//--- ae_s32m_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32m_i(ae_int64 ae_ls_v,ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32m_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32m.i(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32m_i(ae_ls_v, ars, -32); +} + +//--- ae_s32m_iu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32m_iu(ae_int64 ae_ls_v,ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32m_iu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32m.iu(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32m_iu(ae_ls_v, ars, -32); +} + +//--- ae_s32m_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32m_x(ae_int64 ae_ls_v,ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_s32m_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32m.x(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32m_x(ae_ls_v, ars, art); +} + +//--- ae_s32m_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32m_xc(ae_int64 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32m_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32m.xc(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32m_xc(ae_ls_v, ars, art); +} + +//--- ae_s32m_xu.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32m_xu(ae_int64 ae_ls_v,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32m_xu +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32m.xu(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32m_xu(ae_ls_v, ars, art); +} + +//--- ae_s32ra64s_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32ra64s_i(ae_int64 ae_ls_v1,ae_int32* ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32ra64s_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32ra64s.i(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32ra64s_i(ae_ls_v1, ars, -32); +} + +//--- ae_s32ra64s_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32ra64s_ip(ae_int64 ae_ls_v1,ae_int32** ars,immediate ae_immls32) { +// CHECK-LABEL: test_ae_s32ra64s_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32ra64s.ip(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32ra64s_ip(ae_ls_v1, ars, -32); +} + +//--- ae_s32ra64s_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32ra64s_x(ae_int64 ae_ls_v1,ae_int32* ars,int art) { +// CHECK-LABEL: test_ae_s32ra64s_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32ra64s.x(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32ra64s_x(ae_ls_v1, ars, art); +} + +//--- ae_s32ra64s_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32ra64s_xc(ae_int64 ae_ls_v1,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32ra64s_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32ra64s.xc(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32ra64s_xc(ae_ls_v1, ars, art); +} + +//--- ae_s32ra64s_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32ra64s_xp(ae_int64 ae_ls_v1,ae_int32** ars,int art) { +// CHECK-LABEL: test_ae_s32ra64s_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32ra64s.xp(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32ra64s_xp(ae_ls_v1, ars, art); +} + +//--- ae_s32x2_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_i(ae_int32x2 ae_ls_v,ae_int32x2* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_s32x2_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32x2.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_i(ae_ls_v, ars, -64); +} + +//--- ae_s32x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_ip(ae_int32x2 ae_ls_v,ae_int32x2** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_s32x2_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2.ip(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_ip(ae_ls_v, ars, 0); +} + +//--- ae_s32x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_ric(ae_int32x2 ae_ls_v,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s32x2_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2.ric(<2 x i32> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_ric(ae_ls_v, ars); +} + +//--- ae_s32x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_rip(ae_int32x2 ae_ls_v,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s32x2_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2.rip(<2 x i32> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_rip(ae_ls_v, ars); +} + +//--- ae_s32x2_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_x(ae_int32x2 ae_ls_v,ae_int32x2* ars,int art) { +// CHECK-LABEL: test_ae_s32x2_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32x2.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_x(ae_ls_v, ars, art); +} + +//--- ae_s32x2_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_xc(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_s32x2_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_xc(ae_ls_v, ars, art); +} + +//--- ae_s32x2_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2_xp(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_s32x2_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2.xp(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2_xp(ae_ls_v, ars, art); +} + +//--- ae_s32x2f24_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_i(ae_int32x2 ae_ls_v,ae_int32x2* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_s32x2f24_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32x2f24.i(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_i(ae_ls_v, ars, -64); +} + +//--- ae_s32x2f24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_ip(ae_int32x2 ae_ls_v,ae_int32x2** ars,immediate ae_immls64pos) { +// CHECK-LABEL: test_ae_s32x2f24_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2f24.ip(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_ip(ae_ls_v, ars, 0); +} + +//--- ae_s32x2f24_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_ric(ae_int32x2 ae_ls_v,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s32x2f24_ric +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2f24.ric(<2 x i32> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_ric(ae_ls_v, ars); +} + +//--- ae_s32x2f24_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_rip(ae_int32x2 ae_ls_v,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s32x2f24_rip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2f24.rip(<2 x i32> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_rip(ae_ls_v, ars); +} + +//--- ae_s32x2f24_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_x(ae_int32x2 ae_ls_v,ae_int32x2* ars,int art) { +// CHECK-LABEL: test_ae_s32x2f24_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s32x2f24.x(<2 x i32> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_x(ae_ls_v, ars, art); +} + +//--- ae_s32x2f24_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_xc(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_s32x2f24_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2f24.xc(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_xc(ae_ls_v, ars, art); +} + +//--- ae_s32x2f24_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2f24_xp(ae_int32x2 ae_ls_v,ae_int32x2** ars,int art) { +// CHECK-LABEL: test_ae_s32x2f24_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2f24.xp(<2 x i32> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s32x2f24_xp(ae_ls_v, ars, art); +} + +//--- ae_s32x2ra64s_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s32x2ra64s_ip(ae_int64 ae_ls_v2,ae_int64 ae_ls_v1,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_s32x2ra64s_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s32x2ra64s.ip(<1 x i64> {{.*}}, <1 x i64> {{.*}}, ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_s32x2ra64s_ip(ae_ls_v2, ae_ls_v1, ars); +} + +//--- ae_s64_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s64_i(ae_int64 ae_ls_v,ae_int64* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_s64_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s64.i(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s64_i(ae_ls_v, ars, -64); +} + +//--- ae_s64_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s64_ip(ae_int64 ae_ls_v,ae_int64** ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_s64_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s64.ip(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s64_ip(ae_ls_v, ars, -64); +} + +//--- ae_s64_x.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s64_x(ae_int64 ae_ls_v,ae_int64* ars,int art) { +// CHECK-LABEL: test_ae_s64_x +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.s64.x(<1 x i64> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s64_x(ae_ls_v, ars, art); +} + +//--- ae_s64_xc.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s64_xc(ae_int64 ae_ls_v,ae_int64** ars,int art) { +// CHECK-LABEL: test_ae_s64_xc +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s64.xc(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s64_xc(ae_ls_v, ars, art); +} + +//--- ae_s64_xp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_s64_xp(ae_int64 ae_ls_v,ae_int64** ars,int art) { +// CHECK-LABEL: test_ae_s64_xp +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.s64.xp(<1 x i64> {{.*}}, ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_s64_xp(ae_ls_v, ars, art); +} + +//--- ae_sa16x4_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa16x4_ic(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_sa16x4_ic +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ic(<4 x i16> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa16x4_ic(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa16x4_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa16x4_ip(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_sa16x4_ip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ip(<4 x i16> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa16x4_ip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa16x4_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa16x4_ric(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_sa16x4_ric +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.ric(<4 x i16> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa16x4_ric(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa16x4_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa16x4_rip(ae_int16x4 ae_ls_v,ae_valign* ae_ls_su,ae_int16x4** ars) { +// CHECK-LABEL: test_ae_sa16x4_rip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa16x4.rip(<4 x i16> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa16x4_rip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24_l_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24_l_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24_l_ic +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ic(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24_l_ic(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24_l_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24_l_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24_l_ip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24_l_ip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24_l_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24_l_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24_l_ric +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.ric(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24_l_ric(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24_l_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24_l_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24_l_rip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24.l.rip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24_l_rip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24x2_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24x2_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24x2_ic +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ic(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24x2_ic(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24x2_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24x2_ip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24x2_ip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24x2_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24x2_ric +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.ric(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24x2_ric(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa24x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa24x2_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,void** ars) { +// CHECK-LABEL: test_ae_sa24x2_rip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa24x2.rip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa24x2_rip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2_ic +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ic(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2_ic(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2_ip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2_ip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2_ric +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.ric(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2_ric(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2_rip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2.rip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2_rip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2f24_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2f24_ic(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2f24_ic +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ic(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2f24_ic(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2f24_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2f24_ip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2f24_ip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2f24_ip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2f24_ric.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2f24_ric(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2f24_ric +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.ric(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2f24_ric(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa32x2f24_rip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa32x2f24_rip(ae_int32x2 ae_ls_v,ae_valign* ae_ls_su,ae_int32x2** ars) { +// CHECK-LABEL: test_ae_sa32x2f24_rip +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <8 x i8>, ptr } @llvm.xtensa.ae.sa32x2f24.rip(<2 x i32> {{.*}}, <8 x i8> %[[LD_AE_LS_SU]], ptr %[[LD_ARS]]) +// CHECK: %[[EV:.*]] = extractvalue { <8 x i8>, ptr } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_sa32x2f24_rip(ae_ls_v, ae_ls_su, ars); +} + +//--- ae_sa64neg_fp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa64neg_fp(ae_valign* ae_ls_su,void* ars) { +// CHECK-LABEL: test_ae_sa64neg_fp +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.sa64neg.fp(<8 x i8> %[[LD_AE_LS_SU]], ptr {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sa64neg_fp(ae_ls_su, ars); +} + +//--- ae_sa64pos_fp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sa64pos_fp(ae_valign* ae_ls_su,void* ars) { +// CHECK-LABEL: test_ae_sa64pos_fp +// CHECK: %[[LD_AE_LS_SU:.*]] = load <8 x i8>, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.sa64pos.fp(<8 x i8> %[[LD_AE_LS_SU]], ptr {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sa64pos_fp(ae_ls_su, ars); +} + +//--- ae_salign64_i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_salign64_i(ae_valign ae_ls_su,ae_valign* ars,immediate ae_immls64) { +// CHECK-LABEL: test_ae_salign64_i +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.salign64.i(<8 x i8> {{.*}}, ptr {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_salign64_i(ae_ls_su, ars, -64); +} + +//--- ae_sat16x4.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sat16x4(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_sat16x4 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sat16x4(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sat16x4(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sat24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sat24s(ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_sat24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sat24s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sat24s(ae_arth_v1); +} + +//--- ae_sat48s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sat48s(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_sat48s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sat48s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sat48s(ae_arth_v1); +} + +//--- ae_satq56s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_satq56s(ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_satq56s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.satq56s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_satq56s(ae_arth_v1); +} + +//--- ae_sb.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sb(short** ars,int art) { +// CHECK-LABEL: test_ae_sb +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sb(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sb(ars, art); +} + +//--- ae_sb_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sb_ic(short** ars,int art) { +// CHECK-LABEL: test_ae_sb_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sb.ic(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sb_ic(ars, art); +} + +//--- ae_sb_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sb_ip(short** ars,int art) { +// CHECK-LABEL: test_ae_sb_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sb.ip(ptr %[[LD_ARS]], i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sb_ip(ars, art); +} + +//--- ae_sbf.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbf(short** ars) { +// CHECK-LABEL: test_ae_sbf +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbf(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_sbf(ars); +} + +//--- ae_sbf_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbf_ic(short** ars) { +// CHECK-LABEL: test_ae_sbf_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbf.ic(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_sbf_ic(ars); +} + +//--- ae_sbf_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbf_ip(short** ars) { +// CHECK-LABEL: test_ae_sbf_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbf.ip(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_sbf_ip(ars); +} + +//--- ae_sbi.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbi(short** ars,int art,immediate ae_ohba2) { +// CHECK-LABEL: test_ae_sbi +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbi(ptr %[[LD_ARS]], i32 {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sbi(ars, art, 1); +} + +//--- ae_sbi_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbi_ic(short** ars,int art,immediate ae_ohba2) { +// CHECK-LABEL: test_ae_sbi_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbi.ic(ptr %[[LD_ARS]], i32 {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sbi_ic(ars, art, 1); +} + +//--- ae_sbi_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_sbi_ip(short** ars,int art,immediate ae_ohba2) { +// CHECK-LABEL: test_ae_sbi_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.sbi.ip(ptr %[[LD_ARS]], i32 {{.*}}, i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_sbi_ip(ars, art, 1); +} + +//--- ae_sel16i.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sel16i(ae_int16x4 ae_dr_to_dr_v0,ae_int16x4 ae_dr_to_dr_v1,immediate ae_selimm) { +// CHECK-LABEL: test_ae_sel16i +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sel16i(<4 x i16> {{.*}}, <4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sel16i(ae_dr_to_dr_v0, ae_dr_to_dr_v1, 0); +} + +//--- ae_sel16i_n.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sel16i_n(ae_int16x4 ae_dr_to_dr_v0,ae_int16x4 ae_dr_to_dr_v1,immediate ae_selimm_N) { +// CHECK-LABEL: test_ae_sel16i_n +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sel16i.n(<4 x i16> {{.*}}, <4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sel16i_n(ae_dr_to_dr_v0, ae_dr_to_dr_v1, 0); +} + +//--- ae_sext32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sext32(ae_int32x2 ae_dr_to_dr_v0,immediate ae_opnd_tp7) { +// CHECK-LABEL: test_ae_sext32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sext32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sext32(ae_dr_to_dr_v0, 7); +} + +//--- ae_sext32x2d16_10.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sext32x2d16_10(ae_int16x4 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_sext32x2d16_10 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sext32x2d16.10(<4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sext32x2d16_10(ae_to_dr_v0); +} + +//--- ae_sext32x2d16_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sext32x2d16_32(ae_int16x4 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_sext32x2d16_32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sext32x2d16.32(<4 x i16> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sext32x2d16_32(ae_to_dr_v0); +} + +//--- ae_sha32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_ae_sha32(int ars) { +// CHECK-LABEL: test_ae_sha32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.ae.sha32(i32 {{.*}}) +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_ae_sha32(ars); +} + +//--- ae_shortswap.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_shortswap(ae_int16x4 ae_to_dr_v0) { +// CHECK-LABEL: test_ae_shortswap +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.shortswap(<4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_shortswap(ae_to_dr_v0); +} + +//--- ae_slaa16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_slaa16s(ae_int16x4 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaa16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.slaa16s(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_slaa16s(ae_shift_d0, ars); +} + +//--- ae_slaa32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slaa32(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaa32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slaa32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slaa32(ae_shift_d0, ars); +} + +//--- ae_slaa32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slaa32s(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaa32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slaa32s(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slaa32s(ae_shift_d0, ars); +} + +//--- ae_slaa64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slaa64(ae_int64 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaa64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slaa64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slaa64(ae_shift_d0, ars); +} + +//--- ae_slaa64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slaa64s(ae_int64 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaa64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slaa64s(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slaa64s(ae_shift_d0, ars); +} + +//--- ae_slaaq56.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slaaq56(ae_int64 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_slaaq56 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slaaq56(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slaaq56(ae_shift_d0, ars); +} + +//--- ae_slai16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_slai16s(ae_int16x4 ae_shift_d0,immediate ae_osa16) { +// CHECK-LABEL: test_ae_slai16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.slai16s(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_slai16s(ae_shift_d0, 0); +} + +//--- ae_slai24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slai24(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_slai24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slai24(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slai24(ae_shift_d0, 0); +} + +//--- ae_slai24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slai24s(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_slai24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slai24s(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slai24s(ae_shift_d0, 0); +} + +//--- ae_slai32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slai32(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_slai32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slai32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slai32(ae_shift_d0, 0); +} + +//--- ae_slai32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slai32s(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_slai32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slai32s(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slai32s(ae_shift_d0, 0); +} + +//--- ae_slai64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slai64(ae_int64 ae_shift_d0,immediate ae_osa64) { +// CHECK-LABEL: test_ae_slai64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slai64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slai64(ae_shift_d0, 0); +} + +//--- ae_slai64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slai64s(ae_int64 ae_shift_d0,immediate ae_osa64) { +// CHECK-LABEL: test_ae_slai64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slai64s(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slai64s(ae_shift_d0, 0); +} + +//--- ae_slaisq56s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slaisq56s(ae_int64 ae_shift_d0,immediate ae_osa64) { +// CHECK-LABEL: test_ae_slaisq56s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slaisq56s(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slaisq56s(ae_shift_d0, 0); +} + +//--- ae_slas24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slas24(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slas24(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slas24(ae_shift_d0); +} + +//--- ae_slas24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slas24s(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slas24s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slas24s(ae_shift_d0); +} + +//--- ae_slas32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slas32(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slas32(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slas32(ae_shift_d0); +} + +//--- ae_slas32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_slas32s(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.slas32s(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_slas32s(ae_shift_d0); +} + +//--- ae_slas64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slas64(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slas64(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slas64(ae_shift_d0); +} + +//--- ae_slas64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slas64s(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_slas64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slas64s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slas64s(ae_shift_d0); +} + +//--- ae_slasq56.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slasq56(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_slasq56 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slasq56(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slasq56(ae_shift_d0); +} + +//--- ae_slassq56s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_slassq56s(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_slassq56s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.slassq56s(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_slassq56s(ae_shift_d0); +} + +//--- ae_sra64_32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sra64_32(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sra64_32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sra64.32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sra64_32(ae_shift_d0, ars); +} + +//--- ae_sraa16rs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sraa16rs(ae_int16x4 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa16rs +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sraa16rs(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sraa16rs(ae_shift_d0, ars); +} + +//--- ae_sraa16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sraa16s(ae_int16x4 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sraa16s(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sraa16s(ae_shift_d0, ars); +} + +//--- ae_sraa32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sraa32(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sraa32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sraa32(ae_shift_d0, ars); +} + +//--- ae_sraa32rs.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sraa32rs(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa32rs +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sraa32rs(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sraa32rs(ae_shift_d0, ars); +} + +//--- ae_sraa32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sraa32s(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sraa32s(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sraa32s(ae_shift_d0, ars); +} + +//--- ae_sraa64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sraa64(ae_int64 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_sraa64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sraa64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sraa64(ae_shift_d0, ars); +} + +//--- ae_srai16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_srai16(ae_int16x4 ae_shift_d0,immediate ae_osa16) { +// CHECK-LABEL: test_ae_srai16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.srai16(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_srai16(ae_shift_d0, 0); +} + +//--- ae_srai16r.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_srai16r(ae_int16x4 ae_shift_d0,immediate ae_osa16) { +// CHECK-LABEL: test_ae_srai16r +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.srai16r(<4 x i16> {{.*}}, i32 {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_srai16r(ae_shift_d0, 0); +} + +//--- ae_srai24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srai24(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_srai24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srai24(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srai24(ae_shift_d0, 0); +} + +//--- ae_srai32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srai32(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_srai32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srai32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srai32(ae_shift_d0, 0); +} + +//--- ae_srai32r.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srai32r(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_srai32r +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srai32r(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srai32r(ae_shift_d0, 0); +} + +//--- ae_srai64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_srai64(ae_int64 ae_shift_d0,immediate ae_osa64) { +// CHECK-LABEL: test_ae_srai64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.srai64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_srai64(ae_shift_d0, 0); +} + +//--- ae_sras24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sras24(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_sras24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sras24(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sras24(ae_shift_d0); +} + +//--- ae_sras32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sras32(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_sras32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sras32(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sras32(ae_shift_d0); +} + +//--- ae_sras64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sras64(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_sras64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sras64(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sras64(ae_shift_d0); +} + +//--- ae_srla32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srla32(ae_int32x2 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_srla32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srla32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srla32(ae_shift_d0, ars); +} + +//--- ae_srla64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_srla64(ae_int64 ae_shift_d0,int ars) { +// CHECK-LABEL: test_ae_srla64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.srla64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_srla64(ae_shift_d0, ars); +} + +//--- ae_srli24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srli24(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_srli24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srli24(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srli24(ae_shift_d0, 0); +} + +//--- ae_srli32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srli32(ae_int32x2 ae_shift_d0,immediate ae_osa32) { +// CHECK-LABEL: test_ae_srli32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srli32(<2 x i32> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srli32(ae_shift_d0, 0); +} + +//--- ae_srli64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_srli64(ae_int64 ae_shift_d0,immediate ae_osa64) { +// CHECK-LABEL: test_ae_srli64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.srli64(<1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_srli64(ae_shift_d0, 0); +} + +//--- ae_srls24.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srls24(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_srls24 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srls24(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srls24(ae_shift_d0); +} + +//--- ae_srls32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_srls32(ae_int32x2 ae_shift_d0) { +// CHECK-LABEL: test_ae_srls32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.srls32(<2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_srls32(ae_shift_d0); +} + +//--- ae_srls64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_srls64(ae_int64 ae_shift_d0) { +// CHECK-LABEL: test_ae_srls64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.srls64(<1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_srls64(ae_shift_d0); +} + +//--- ae_sub16.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sub16(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub16 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sub16(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sub16(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub16s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int16x4 test_ae_sub16s(ae_int16x4 ae_arth_v0,ae_int16x4 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub16s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <4 x i16> @llvm.xtensa.ae.sub16s(<4 x i16> {{.*}}, <4 x i16> {{.*}}) +// CHECK: ret <4 x i16> %[[RET]] +return __builtin_xtensa_ae_sub16s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub24s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sub24s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub24s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sub24s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sub24s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sub32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sub32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sub32(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_sub32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.sub32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_sub32s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sub64(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sub64(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sub64(ae_arth_v0, ae_arth_v1); +} + +//--- ae_sub64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_sub64s(ae_int64 ae_arth_v0,ae_int64 ae_arth_v1) { +// CHECK-LABEL: test_ae_sub64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.sub64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_sub64s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_subadd32.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_subadd32(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_subadd32 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.subadd32(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_subadd32(ae_arth_v0, ae_arth_v1); +} + +//--- ae_subadd32s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_subadd32s(ae_int32x2 ae_arth_v0,ae_int32x2 ae_arth_v1) { +// CHECK-LABEL: test_ae_subadd32s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.subadd32s(<2 x i32> {{.*}}, <2 x i32> {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_subadd32s(ae_arth_v0, ae_arth_v1); +} + +//--- ae_trunca32f64s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_trunca32f64s_l(ae_int32x2 ae_shift_d0,ae_int64 ae_shift_sd,int ars) { +// CHECK-LABEL: test_ae_trunca32f64s_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.trunca32f64s.l(<2 x i32> {{.*}}, <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_trunca32f64s_l(ae_shift_d0, ae_shift_sd, ars); +} + +//--- ae_trunca32x2f64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_trunca32x2f64s(ae_int64 ae_shift_d0,ae_int64 ae_shift_sd,int ars) { +// CHECK-LABEL: test_ae_trunca32x2f64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.trunca32x2f64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_trunca32x2f64s(ae_shift_d0, ae_shift_sd, ars); +} + +//--- ae_trunci32f64s_l.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_trunci32f64s_l(ae_int32x2 ae_shift_d0,ae_int64 ae_shift_sd,immediate ae_osa16) { +// CHECK-LABEL: test_ae_trunci32f64s_l +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.trunci32f64s.l(<2 x i32> {{.*}}, <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_trunci32f64s_l(ae_shift_d0, ae_shift_sd, 0); +} + +//--- ae_trunci32x2f64s.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int32x2 test_ae_trunci32x2f64s(ae_int64 ae_shift_d0,ae_int64 ae_shift_sd,immediate ae_osa16) { +// CHECK-LABEL: test_ae_trunci32x2f64s +// CHECK: %[[RET:.*]] = {{(tail)?}} call <2 x i32> @llvm.xtensa.ae.trunci32x2f64s(<1 x i64> {{.*}}, <1 x i64> {{.*}}, i32 {{.*}}) +// CHECK: ret <2 x i32> %[[RET]] +return __builtin_xtensa_ae_trunci32x2f64s(ae_shift_d0, ae_shift_sd, 0); +} + +//--- ae_vldl16c.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldl16c(const short** ars) { +// CHECK-LABEL: test_ae_vldl16c +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vldl16c(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vldl16c(ars); +} + +//--- ae_vldl16c_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldl16c_ic(const short** ars) { +// CHECK-LABEL: test_ae_vldl16c_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vldl16c.ic(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vldl16c_ic(ars); +} + +//--- ae_vldl16c_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldl16c_ip(const short** ars) { +// CHECK-LABEL: test_ae_vldl16c_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vldl16c.ip(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vldl16c_ip(ars); +} + +//--- ae_vldl16t.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldl16t(xtbool* br,int* art,const short* ars) { +// CHECK-LABEL: test_ae_vldl16t +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i1>, i32 } @llvm.xtensa.ae.vldl16t(ptr {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i1>, i32 } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_vldl16t(br, art, ars); +} + +//--- ae_vldl32t.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldl32t(xtbool* br,int* art,const int* ars) { +// CHECK-LABEL: test_ae_vldl32t +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i1>, i32 } @llvm.xtensa.ae.vldl32t(ptr {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i1>, i32 } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_vldl32t(br, art, ars); +} + +//--- ae_vldsht.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vldsht(int art) { +// CHECK-LABEL: test_ae_vldsht +// CHECK: {{(tail)?}} call void @llvm.xtensa.ae.vldsht(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_ae_vldsht(art); +} + +//--- ae_vlel16t.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vlel16t(xtbool* br,int* art,const short* ars) { +// CHECK-LABEL: test_ae_vlel16t +// CHECK: %[[LD_ART:.*]] = load i32, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i1>, i32 } @llvm.xtensa.ae.vlel16t(i32 %[[LD_ART]], ptr {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i1>, i32 } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_vlel16t(br, art, ars); +} + +//--- ae_vlel32t.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vlel32t(xtbool* br,int* art,const int* ars) { +// CHECK-LABEL: test_ae_vlel32t +// CHECK: %[[LD_ART:.*]] = load i32, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call { <1 x i1>, i32 } @llvm.xtensa.ae.vlel32t(i32 %[[LD_ART]], ptr {{.*}}) +// CHECK: %[[EV:.*]] = extractvalue { <1 x i1>, i32 } %[[RET]], 0 +// CHECK: ret void + __builtin_xtensa_ae_vlel32t(br, art, ars); +} + +//--- ae_vles16c.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vles16c(short** ars) { +// CHECK-LABEL: test_ae_vles16c +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vles16c(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vles16c(ars); +} + +//--- ae_vles16c_ic.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vles16c_ic(short** ars) { +// CHECK-LABEL: test_ae_vles16c_ic +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vles16c.ic(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vles16c_ic(ars); +} + +//--- ae_vles16c_ip.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_ae_vles16c_ip(short** ars) { +// CHECK-LABEL: test_ae_vles16c_ip +// CHECK: %[[LD_ARS:.*]] = load ptr, ptr {{.*}} +// CHECK: %[[RET:.*]] = {{(tail)?}} call ptr @llvm.xtensa.ae.vles16c.ip(ptr %[[LD_ARS]]) +// CHECK: ret void + __builtin_xtensa_ae_vles16c_ip(ars); +} + +//--- ae_xor.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_int64 test_ae_xor(ae_int64 ae_dr_to_dr_v0,ae_int64 ae_dr_to_dr_v1) { +// CHECK-LABEL: test_ae_xor +// CHECK: %[[RET:.*]] = {{(tail)?}} call <1 x i64> @llvm.xtensa.ae.xor(<1 x i64> {{.*}}, <1 x i64> {{.*}}) +// CHECK: ret <1 x i64> %[[RET]] +return __builtin_xtensa_ae_xor(ae_dr_to_dr_v0, ae_dr_to_dr_v1); +} + +//--- ae_zalign64.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +ae_valign test_ae_zalign64() { +// CHECK-LABEL: test_ae_zalign64 +// CHECK: %[[RET:.*]] = {{(tail)?}} call <8 x i8> @llvm.xtensa.ae.zalign64() +// CHECK: %[[CAST:.*]] = bitcast <8 x i8> %[[RET]] to i64 +// CHECK: ret i64 %[[CAST]] +return __builtin_xtensa_ae_zalign64(); +} + +//--- rur_ae_bithead.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_bithead() { +// CHECK-LABEL: test_rur_ae_bithead +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.bithead() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_bithead(); +} + +//--- rur_ae_bitptr.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_bitptr() { +// CHECK-LABEL: test_rur_ae_bitptr +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.bitptr() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_bitptr(); +} + +//--- rur_ae_bitsused.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_bitsused() { +// CHECK-LABEL: test_rur_ae_bitsused +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.bitsused() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_bitsused(); +} + +//--- rur_ae_cbegin0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_cbegin0() { +// CHECK-LABEL: test_rur_ae_cbegin0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.cbegin0() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_cbegin0(); +} + +//--- rur_ae_cend0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_cend0() { +// CHECK-LABEL: test_rur_ae_cend0 +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.cend0() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_cend0(); +} + +//--- rur_ae_cw_sd_no.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_cw_sd_no() { +// CHECK-LABEL: test_rur_ae_cw_sd_no +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.cw.sd.no() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_cw_sd_no(); +} + +//--- rur_ae_cwrap.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_cwrap() { +// CHECK-LABEL: test_rur_ae_cwrap +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.cwrap() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_cwrap(); +} + +//--- rur_ae_first_ts.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_first_ts() { +// CHECK-LABEL: test_rur_ae_first_ts +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.first.ts() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_first_ts(); +} + +//--- rur_ae_nextoffset.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_nextoffset() { +// CHECK-LABEL: test_rur_ae_nextoffset +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.nextoffset() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_nextoffset(); +} + +//--- rur_ae_overflow.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_overflow() { +// CHECK-LABEL: test_rur_ae_overflow +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.overflow() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_overflow(); +} + +//--- rur_ae_ovf_sar.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_ovf_sar() { +// CHECK-LABEL: test_rur_ae_ovf_sar +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.ovf.sar() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_ovf_sar(); +} + +//--- rur_ae_sar.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_sar() { +// CHECK-LABEL: test_rur_ae_sar +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.sar() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_sar(); +} + +//--- rur_ae_searchdone.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_searchdone() { +// CHECK-LABEL: test_rur_ae_searchdone +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.searchdone() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_searchdone(); +} + +//--- rur_ae_tablesize.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_tablesize() { +// CHECK-LABEL: test_rur_ae_tablesize +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.tablesize() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_tablesize(); +} + +//--- rur_ae_ts_fts_bu_bp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +int test_rur_ae_ts_fts_bu_bp() { +// CHECK-LABEL: test_rur_ae_ts_fts_bu_bp +// CHECK: %[[RET:.*]] = {{(tail)?}} call i32 @llvm.xtensa.rur.ae.ts.fts.bu.bp() +// CHECK: ret i32 %[[RET]] +return __builtin_xtensa_rur_ae_ts_fts_bu_bp(); +} + +//--- wur_ae_bithead.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_bithead(int art) { +// CHECK-LABEL: test_wur_ae_bithead +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.bithead(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_bithead(art); +} + +//--- wur_ae_bitptr.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_bitptr(int art) { +// CHECK-LABEL: test_wur_ae_bitptr +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.bitptr(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_bitptr(art); +} + +//--- wur_ae_bitsused.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_bitsused(int art) { +// CHECK-LABEL: test_wur_ae_bitsused +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.bitsused(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_bitsused(art); +} + +//--- wur_ae_cbegin0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_cbegin0(int art) { +// CHECK-LABEL: test_wur_ae_cbegin0 +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.cbegin0(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_cbegin0(art); +} + +//--- wur_ae_cend0.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_cend0(int art) { +// CHECK-LABEL: test_wur_ae_cend0 +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.cend0(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_cend0(art); +} + +//--- wur_ae_cw_sd_no.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_cw_sd_no(int art) { +// CHECK-LABEL: test_wur_ae_cw_sd_no +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.cw.sd.no(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_cw_sd_no(art); +} + +//--- wur_ae_cwrap.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_cwrap(int art) { +// CHECK-LABEL: test_wur_ae_cwrap +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.cwrap(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_cwrap(art); +} + +//--- wur_ae_first_ts.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_first_ts(int art) { +// CHECK-LABEL: test_wur_ae_first_ts +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.first.ts(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_first_ts(art); +} + +//--- wur_ae_nextoffset.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_nextoffset(int art) { +// CHECK-LABEL: test_wur_ae_nextoffset +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.nextoffset(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_nextoffset(art); +} + +//--- wur_ae_overflow.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_overflow(int art) { +// CHECK-LABEL: test_wur_ae_overflow +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.overflow(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_overflow(art); +} + +//--- wur_ae_ovf_sar.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_ovf_sar(int art) { +// CHECK-LABEL: test_wur_ae_ovf_sar +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.ovf.sar(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_ovf_sar(art); +} + +//--- wur_ae_sar.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_sar(int art) { +// CHECK-LABEL: test_wur_ae_sar +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.sar(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_sar(art); +} + +//--- wur_ae_searchdone.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_searchdone(int art) { +// CHECK-LABEL: test_wur_ae_searchdone +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.searchdone(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_searchdone(art); +} + +//--- wur_ae_tablesize.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_tablesize(int art) { +// CHECK-LABEL: test_wur_ae_tablesize +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.tablesize(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_tablesize(art); +} + +//--- wur_ae_ts_fts_bu_bp.c + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); +typedef short ae_int16 __attribute__(( vector_size(2))); +typedef short ae_int16x2 __attribute__(( vector_size(4))); +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef long long ae_int64 __attribute__(( vector_size(8))); +typedef unsigned char ae_valign __attribute__(( vector_size(8))); + +typedef int immediate; + +void test_wur_ae_ts_fts_bu_bp(int art) { +// CHECK-LABEL: test_wur_ae_ts_fts_bu_bp +// CHECK: {{(tail)?}} call void @llvm.xtensa.wur.ae.ts.fts.bu.bp(i32 {{.*}}) +// CHECK: ret void + __builtin_xtensa_wur_ae_ts_fts_bu_bp(art); +} + From 014eb1493754f6a0e67f1152f0ca465c30bb6f3e Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 13:59:58 +0000 Subject: [PATCH 211/289] [Xtensa] Fix xt_lsxp builtin definition --- clang/include/clang/Basic/BuiltinsXtensa.def | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/include/clang/Basic/BuiltinsXtensa.def b/clang/include/clang/Basic/BuiltinsXtensa.def index 8c0fcc17133e6..7cdbfefabc46a 100644 --- a/clang/include/clang/Basic/BuiltinsXtensa.def +++ b/clang/include/clang/Basic/BuiltinsXtensa.def @@ -203,7 +203,7 @@ BUILTIN(__builtin_xtensa_xt_lsip, "ff**i", "n") // xtfloat __builtin_xtensa___builtin_xtensa_xt_lsx(const xtfloat*,int) BUILTIN(__builtin_xtensa_xt_lsx, "ff*i", "n") -BUILTIN(__builtin_xtensa_xt_lsxp, "ff*i", "n") +BUILTIN(__builtin_xtensa_xt_lsxp, "ff**i", "n") // xtfloat __builtin_xtensa___builtin_xtensa_xt_madd_s(xtfloat,xtfloat,xtfloat) BUILTIN(__builtin_xtensa_xt_madd_s, "ffff", "n") From 6750aa0308a3ea2a35afb30128d4f410004d8914 Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 14:01:04 +0000 Subject: [PATCH 212/289] [Xtensa] Support bool vectors in LLVM calls --- clang/lib/CodeGen/Targets/Xtensa.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/clang/lib/CodeGen/Targets/Xtensa.cpp b/clang/lib/CodeGen/Targets/Xtensa.cpp index 82692d49be4e7..4ad07648fef40 100644 --- a/clang/lib/CodeGen/Targets/Xtensa.cpp +++ b/clang/lib/CodeGen/Targets/Xtensa.cpp @@ -11,6 +11,7 @@ using namespace clang; using namespace clang::CodeGen; + //===----------------------------------------------------------------------===// // Xtensa ABI Implementation //===----------------------------------------------------------------------===// @@ -99,9 +100,13 @@ ABIArgInfo XtensaABIInfo::classifyArgumentType(QualType Ty, } // xtbool - if (getTarget().hasFeature("bool") && Size == 1 && Ty->isVectorType()) { + if (getTarget().hasFeature("bool") && Size <= 8 && Ty->isVectorType()) { + // The type size is rounded up to the power of two and at least 8 bits, + // so we need to get the "true" size from num of vector elements + const VectorType *VT = Ty->getAs(); + unsigned NumBits = VT->getNumElements(); llvm::Type *ResType = - llvm::FixedVectorType::get(llvm::Type::getInt1Ty(getVMContext()), 1); + llvm::FixedVectorType::get(llvm::Type::getInt1Ty(getVMContext()), NumBits); return ABIArgInfo::getDirect(ResType); } // Vector arguments From 1c902f051fd5be8e37a122d53c5a4542aa734b13 Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Thu, 29 Jun 2023 14:03:00 +0000 Subject: [PATCH 213/289] [Xtensa] Add --text-section-literals option This option is passed to GNU AS and makes Xtensa compiler driver compatible with GCC. --- clang/lib/Driver/ToolChains/Xtensa.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp index 76fab61d3ab6e..1f26e6e02ceb2 100644 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ b/clang/lib/Driver/ToolChains/Xtensa.cpp @@ -250,6 +250,9 @@ void tools::xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, if (!A->getOption().matches(options::OPT_g0)) CmdArgs.push_back("-g"); + if (Args.getLastArg(options::OPT_mtext_section_literals)) + CmdArgs.push_back("--text-section-literals"); + if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm, false)) CmdArgs.push_back("-fverbose-asm"); From 2035eca1fadcbe9bf04e3ac17b5a20a4723ea606 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 1 Oct 2024 16:23:58 +0300 Subject: [PATCH 214/289] [Xtensa] Add vector conversion builtins Intrinsics: __builtin_xtensa_ae_int32x2 and builtin_xtensa_int32 are convenience functions for easy integer-vector and vector-vector conversions that conform to Xtensa coding style. Xtensa C dialect allows for implicit conversion between wider and narrower vector (via shuffle) and between integer and any vector (via broadcast). Standard Clang vectors do not support this, so these functions provide a handicap for better portability. --- .../clang/Basic/BuiltinsXtensaHIFI.def | 4 +- clang/include/clang/Sema/SemaXtensa.h | 2 + clang/lib/CodeGen/CGBuiltin.cpp | 49 +++++++++++++++++++ clang/lib/CodeGen/CodeGenFunction.h | 3 ++ clang/lib/Sema/SemaXtensa.cpp | 42 ++++++++++++++++ .../CodeGen/Xtensa/xtensa-hifi-conversions.c | 48 ++++++++++++++++++ 6 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 clang/test/CodeGen/Xtensa/xtensa-hifi-conversions.c diff --git a/clang/include/clang/Basic/BuiltinsXtensaHIFI.def b/clang/include/clang/Basic/BuiltinsXtensaHIFI.def index d0ac10aeab39b..1ccfb0cdeda15 100644 --- a/clang/include/clang/Basic/BuiltinsXtensaHIFI.def +++ b/clang/include/clang/Basic/BuiltinsXtensaHIFI.def @@ -2614,4 +2614,6 @@ BUILTIN(__builtin_xtensa_wur_ae_tablesize, "vi", "n") // void __builtin_xtensa_wur_ae_ts_fts_bu_bp(int art) BUILTIN(__builtin_xtensa_wur_ae_ts_fts_bu_bp, "vi", "n") -#undef BUILTIN +// Type conversion builtins +BUILTIN(__builtin_xtensa_ae_int32x2, "V2i.", "nct") +BUILTIN(__builtin_xtensa_ae_int32, "V1i.", "nct") diff --git a/clang/include/clang/Sema/SemaXtensa.h b/clang/include/clang/Sema/SemaXtensa.h index 2dccfd10fa4d2..cadd7705bfe81 100644 --- a/clang/include/clang/Sema/SemaXtensa.h +++ b/clang/include/clang/Sema/SemaXtensa.h @@ -24,6 +24,8 @@ class SemaXtensa : public SemaBase { bool CheckXtensaBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); + + bool SemaBuiltinXtensaConversion(unsigned BuiltinID, CallExpr *TheCall); }; } // namespace clang diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index cad709d7f041c..866de38267b87 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -22132,12 +22132,61 @@ llvm::Value *CodeGenFunction::ConvertXtensaToBc(const Expr *ArgExpr, } return ArgCast; } +llvm::Value * +CodeGenFunction::EmitXtensaConversionExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { + unsigned MaxElems; + switch (BuiltinID) { + case Xtensa::BI__builtin_xtensa_ae_int32x2: + MaxElems = 2; + break; + case Xtensa::BI__builtin_xtensa_ae_int32: + MaxElems = 1; + break; + default: + llvm_unreachable("Unknown intrinsic ID"); + } + + Value *ArgVal = EmitScalarExpr(E->getArg(0)); + QualType QT = E->getArg(0)->getType(); + if (auto *VecTy = QT->getAs()) { + unsigned NumEl = VecTy->getNumElements(); + llvm::Type *ElType = ConvertType(VecTy->getElementType()); + if (ElType != Int32Ty || NumEl > MaxElems) { + CGM.Error(E->getExprLoc(), "Expected int32x1 or int32x2"); + return ArgVal; + } + if (NumEl == MaxElems) + return ArgVal; // no-op + int Mask[] = {0,0}; + Value *Result = + Builder.CreateShuffleVector(ArgVal, ArgVal, ArrayRef(Mask, MaxElems)); + return Result; + } else if (QT->isIntegerType()) { + Value *Int32Val = (QT->isSignedIntegerType()) + ? Builder.CreateSExtOrTrunc(ArgVal, Int32Ty, "cast") + : Builder.CreateZExtOrTrunc(ArgVal, Int32Ty, "cast"); + Value *VecOps[] = {Int32Val,Int32Val}; + Value *Result = BuildVector(ArrayRef(VecOps, MaxElems)); + return Result; + } + llvm_unreachable("Invalid Argument type"); +} llvm::Value * CodeGenFunction::EmitXtensaBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch) { + switch (BuiltinID) { + case Xtensa::BI__builtin_xtensa_ae_int32x2: + case Xtensa::BI__builtin_xtensa_ae_int32: + return EmitXtensaConversionExpr(BuiltinID, E, ReturnValue, Arch); + default: + break; + }; + XtensaIntrinsicInfo Info = GetXtensaIntrinsic(BuiltinID); unsigned Intrinsic = Info.IntrinsicID; diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index de812830d395d..3fdc16035823a 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4806,6 +4806,9 @@ class CodeGenFunction : public CodeGenTypeCache { llvm::Value *EmitXtensaBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch); + llvm::Value *EmitXtensaConversionExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); //===--------------------------------------------------------------------===// // Expression Emission //===--------------------------------------------------------------------===// diff --git a/clang/lib/Sema/SemaXtensa.cpp b/clang/lib/Sema/SemaXtensa.cpp index 10f51339a1f51..4138c7fb7a670 100644 --- a/clang/lib/Sema/SemaXtensa.cpp +++ b/clang/lib/Sema/SemaXtensa.cpp @@ -341,8 +341,50 @@ bool SemaXtensa::CheckXtensaBuiltinFunctionCall(const TargetInfo &TI, SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 7) && SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 7) && SemaRef.BuiltinConstantArgRange(TheCall, 5, 0, 7); + case Xtensa::BI__builtin_xtensa_ae_int32x2: + case Xtensa::BI__builtin_xtensa_ae_int32: + return SemaBuiltinXtensaConversion(BuiltinID, TheCall); } return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u); } +bool SemaXtensa::SemaBuiltinXtensaConversion(unsigned BuiltinID, CallExpr *TheCall) { + ASTContext &Context = getASTContext(); + unsigned MaxElems; + switch (BuiltinID) { + case Xtensa::BI__builtin_xtensa_ae_int32x2: + MaxElems = 2; + break; + case Xtensa::BI__builtin_xtensa_ae_int32: + MaxElems = 1; + break; + default: + llvm_unreachable("Unknown intrinsic ID"); + } + if (SemaRef.checkArgCount(TheCall, 1)) + return true; + Expr *Arg = TheCall->getArg(0); + QualType QT = Arg->getType(); + if (auto *VecTy = QT->getAs()) { + unsigned NumEl = VecTy->getNumElements(); + QualType ElType = VecTy->getElementType(); + unsigned ElWidth = Context.getIntWidth(ElType); + QualType VecType = Context.getVectorType(Context.IntTy, MaxElems, + VectorKind::Generic); + if (ElWidth != 32 || NumEl > MaxElems) + return Diag(TheCall->getBeginLoc(), + diag::err_typecheck_convert_incompatible) + << QT << VecType << 1 << 0 << 0; + return false; + } else { + if (!QT->isIntegerType()) + return Diag(TheCall->getBeginLoc(), + diag::err_typecheck_convert_incompatible) + << QT << Context.IntTy << 1 << 0 << 0; + + return false; + } + return false; +} + } // namespace clang diff --git a/clang/test/CodeGen/Xtensa/xtensa-hifi-conversions.c b/clang/test/CodeGen/Xtensa/xtensa-hifi-conversions.c new file mode 100644 index 0000000000000..506e785717e6d --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-hifi-conversions.c @@ -0,0 +1,48 @@ +// RUN: split-file %s %t +// RUN: %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/correct.c | FileCheck %t/correct.c +// RUN: not %clang -target xtensa -mcpu=cnl -S -emit-llvm -O1 -o - %t/bad_vec.c 2>&1 | FileCheck %t/bad_vec.c + +//--- correct.c + +typedef int ae_int32 __attribute__(( vector_size(4))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); + +ae_int32x2 test_ae_int32x2_from_int(int a) { +// CHECK-LABEL: @test_ae_int32x2_from_int(i +// CHECK: %[[INS:.*]] = insertelement <2 x i32> poison, i32 %{{.*}}, i64 0 +// CHECK: %[[SHUF:.*]] = shufflevector <2 x i32> %[[INS]], <2 x i32> poison, <2 x i32> zeroinitializer +// CHECK: ret <2 x i32> %[[SHUF]] +return __builtin_xtensa_ae_int32x2(a); +} + +ae_int32x2 test_ae_int32x2_from_ae_int32(ae_int32 a) { +// CHECK-LABEL: @test_ae_int32x2_from_ae_int32( +// CHECK: %[[SHUF:.*]] = shufflevector <1 x i32> %{{.*}}, <1 x i32> poison, <2 x i32> zeroinitializer +// CHECK: ret <2 x i32> %[[SHUF]] +return __builtin_xtensa_ae_int32x2(a); +} + +ae_int32x2 test_ae_int32x2_from_ae_int32x2(ae_int32x2 a) { +// CHECK: {{.*}}<2 x i32> @test_ae_int32x2_from_ae_int32x2(<2 x i32>{{.*}} %[[A:.*]]) +// CHECK: ret <2 x i32> %[[A]] +return __builtin_xtensa_ae_int32x2(a); +} + +ae_int32x2 test_ae_int32x2_from_short(short a) { +// CHECK-LABEL: @test_ae_int32x2_from_short( +// CHECK: %[[SEXT:.*]] = sext i16 %{{.*}} to i32 +// CHECK: %[[INS:.*]] = insertelement <2 x i32> poison, i32 %[[SEXT]], i64 0 +// CHECK: %[[SHUF:.*]] = shufflevector <2 x i32> %[[INS]], <2 x i32> poison, <2 x i32> zeroinitializer +// CHECK: ret <2 x i32> %[[SHUF]] +return __builtin_xtensa_ae_int32x2(a); +} + +//--- bad_vec.c + +typedef short ae_int16x4 __attribute__(( vector_size(8))); +typedef int ae_int32x2 __attribute__(( vector_size(8))); + +ae_int32x2 test_ae_int32x2_from_bad_vec(ae_int16x4 a) { +// CHECK: error: passing 'ae_int16x4' {{.*}} to parameter of incompatible type +return __builtin_xtensa_ae_int32x2(a); +} From 9df9ec2854e2f6d1b3fe6681469aa3f587abe44f Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 1 Oct 2024 16:28:49 +0300 Subject: [PATCH 215/289] [Xtensa] Add HIFI3 C types and intrinsics --- clang/lib/Headers/CMakeLists.txt | 13 + clang/lib/Headers/xtensa_defs.h | 66 + clang/lib/Headers/xtensa_protos.h | 6894 +++++++++++++++++++++++++++++ 3 files changed, 6973 insertions(+) create mode 100644 clang/lib/Headers/xtensa_defs.h create mode 100644 clang/lib/Headers/xtensa_protos.h diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt index 89fa0ecd45eb4..6c5f9f6bee528 100644 --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -253,6 +253,11 @@ set(x86_files cpuid.h ) +set(xtensa_files + xtensa_defs.h + xtensa_protos.h +) + set(windows_only_files intrin0.h intrin.h @@ -281,6 +286,7 @@ set(files ${systemz_files} ${ve_files} ${x86_files} + ${xtensa_files} ${webassembly_files} ${windows_only_files} ${utility_files} @@ -497,6 +503,7 @@ add_header_target("systemz-resource-headers" "${systemz_files};${zos_wrapper_fil add_header_target("ve-resource-headers" "${ve_files}") add_header_target("webassembly-resource-headers" "${webassembly_files}") add_header_target("x86-resource-headers" "${x86_files}") +add_header_target("xtensa-resource-headers" "${xtensa_files}") # Other header groupings add_header_target("hlsl-resource-headers" ${hlsl_files}) @@ -681,6 +688,12 @@ if(NOT CLANG_ENABLE_HLSL) set(EXCLUDE_HLSL EXCLUDE_FROM_ALL) endif() +install( + FILES ${xtensa_files} + DESTINATION ${header_install_dir} + EXCLUDE_FROM_ALL + COMPONENT xtensa-resource-headers) + install( FILES ${hlsl_h} DESTINATION ${header_install_dir} diff --git a/clang/lib/Headers/xtensa_defs.h b/clang/lib/Headers/xtensa_defs.h new file mode 100644 index 0000000000000..d47cacf71803b --- /dev/null +++ b/clang/lib/Headers/xtensa_defs.h @@ -0,0 +1,66 @@ +/*===---- xtensa_defs.h - Xtensa definitions -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __XTENSAHIFI3_H +#define __XTENSAHIFI3_H + +typedef __attribute__((ext_vector_type(1))) _Bool xtbool; +typedef __attribute__((ext_vector_type(2))) _Bool xtbool2; +typedef __attribute__((ext_vector_type(4))) _Bool xtbool4; + +typedef int ae_int32 __attribute__((vector_size(4))); +typedef int ae_int32x2 __attribute__((vector_size(8))); +typedef short ae_int16 __attribute__((vector_size(2))); +typedef short ae_int16x2 __attribute__((vector_size(4))); +typedef short ae_int16x4 __attribute__((vector_size(8))); +typedef long long ae_int64 __attribute__((vector_size(8))); +typedef unsigned char ae_valign __attribute__((vector_size(8))); +typedef ae_int16x4 ae_f16x4; +typedef ae_int32x2 ae_f32x2; +typedef ae_int32 ae_f32; +typedef ae_int64 ae_f64; +typedef ae_int32 ae_f24; +typedef ae_int32x2 ae_f24x2; +typedef ae_int16 ae_f16; + +#include + +#define AE_SETCBEGIN0(x) WUR_AE_CBEGIN0(x) +#define AE_SETCEND0(x) WUR_AE_CEND0(x) +#define AE_ZERO64(x) AE_MOVI(0) +#define AE_ZERO24(x) AE_MOVI(0) +#define AE_ZERO32(x) AE_MOVI(0) +#define AE_ZERO16(x) AE_MOVI(0) +#define AE_ZEROQ56(x) AE_ZERO64(x) + +#define AE_SEL32_L(a) \ + ({ \ + ae_int32x2 _a = a; \ + __builtin_shufflevector(_a, _a, 0); \ + }) + +#define AE_INT32(a) __builtin_xtensa_ae_int32(a); +#define AE_INT32X2(a) __builtin_xtensa_ae_int32x2(a); + +#define AE_F32X2 AE_INT32X2 +#define AE_F32 AE_INT32 + +#define AE_MOVINT16X4_FROMINT32X2(a) ((ae_int32x2)(a)) + +#define AE_F32_ADDS_F32(s1, s2) \ + AE_F32(AE_ADD32S(AE_INT32X2(s1), AE_INT32X2(s2))) + +typedef float xtfloat; + +#define XT_xtfloat_storeip(x, a, i) ({ a = __builtin_xtensa_xt_ssip(x, a, i); }) +#define XT_xtfloat_loadip(x, a, i) \ + ({ x = __builtin_xtensa_xt_lsip((xtfloat **)&a, i); }) +#define XT_xtfloat_loadxp(x, a, i) \ + ({ x = __builtin_xtensa_xt_lsxp((xtfloat **)&a, i); }) + +#endif /* __XTENSAHIFI3_H */ \ No newline at end of file diff --git a/clang/lib/Headers/xtensa_protos.h b/clang/lib/Headers/xtensa_protos.h new file mode 100644 index 0000000000000..0cc75cc8431ab --- /dev/null +++ b/clang/lib/Headers/xtensa_protos.h @@ -0,0 +1,6894 @@ + +/*===---- xtensa_protos.h - Xtensa intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __XTENSAHIFI3INTRIN_H +#define __XTENSAHIFI3INTRIN_H + +#define AE_ABS16S(ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs16s((__ae_arth_v1)); \ + }) + +#define AE_ABS24S(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs24s((__ae_arth_v1)); \ + }) + +#define AE_ABS32(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs32((__ae_arth_v1)); \ + }) + +#define AE_ABS32S(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs32s((__ae_arth_v1)); \ + }) + +#define AE_ABS64(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs64((__ae_arth_v1)); \ + }) + +#define AE_ABS64S(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_abs64s((__ae_arth_v1)); \ + }) + +#define AE_ADD16(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v0 = (ae_arth_v0); \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add16((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD16S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v0 = (ae_arth_v0); \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add16s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD24S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add24s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD32(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add32((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD32_HL_LH(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add32_hl_lh((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD32S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add32s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD64(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add64((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADD64S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_add64s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADDBRBA32(art, ars) \ + ({ \ + int __art = (int)(art); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_addbrba32((__art), (__ars)); \ + }) + +#define AE_ADDSUB32(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_addsub32((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ADDSUB32S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_addsub32s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_AND(ae_dr_to_dr_v0, ae_dr_to_dr_v1) \ + ({ \ + ae_int64 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int64 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_and((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1)); \ + }) + +#define AE_CVT32X2F16_10(ae_to_dr_v0) \ + ({ \ + ae_int16x4 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_cvt32x2f16_10((__ae_to_dr_v0)); \ + }) + +#define AE_CVT32X2F16_32(ae_to_dr_v0) \ + ({ \ + ae_int16x4 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_cvt32x2f16_32((__ae_to_dr_v0)); \ + }) + +#define AE_CVT48A32(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_cvt48a32((__ars)); \ + }) + +#define AE_CVT64A32(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_cvt64a32((__ars)); \ + }) + +#define AE_CVT64F32_H(ae_dr_to_dr_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + __builtin_xtensa_ae_cvt64f32_h((__ae_dr_to_dr_v0)); \ + }) + +#define AE_CVTA32F24S_H(ae_dr_to_ar_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_cvta32f24s_h((__ae_dr_to_ar_v0)); \ + }) + +#define AE_CVTA32F24S_L(ae_dr_to_ar_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_cvta32f24s_l((__ae_dr_to_ar_v0)); \ + }) + +#define AE_CVTQ56A32S(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_cvtq56a32s((__ars)); \ + }) + +#define AE_CVTQ56P32S_H(ae_dr_to_dr_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + __builtin_xtensa_ae_cvtq56p32s_h((__ae_dr_to_dr_v0)); \ + }) + +#define AE_CVTQ56P32S_L(ae_dr_to_dr_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + __builtin_xtensa_ae_cvtq56p32s_l((__ae_dr_to_dr_v0)); \ + }) + +#define AE_DB(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_db((const short **)&(ars), (__art)); \ + }) + +#define AE_DB_IC(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_db_ic((const short **)&(ars), (__art)); \ + }) + +#define AE_DB_IP(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_db_ip((const short **)&(ars), (__art)); \ + }) + +#define AE_DBI(ars, ae_ohba) \ + ({ __builtin_xtensa_ae_dbi((const short **)&(ars), (ae_ohba)); }) + +#define AE_DBI_IC(ars, ae_ohba) \ + ({ __builtin_xtensa_ae_dbi_ic((const short **)&(ars), (ae_ohba)); }) + +#define AE_DBI_IP(ars, ae_ohba) \ + ({ __builtin_xtensa_ae_dbi_ip((const short **)&(ars), (ae_ohba)); }) + +#define AE_DIV64D32_H(ae_arth_v, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_div64d32_h((ae_int64 *)&(ae_arth_v), (__ae_arth_v1)); \ + }) + +#define AE_DIV64D32_L(ae_arth_v, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_div64d32_l((ae_int64 *)&(ae_arth_v), (__ae_arth_v1)); \ + }) + +#define AE_EQ16(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int16x4 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int16x4 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_eq16((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_EQ32(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_eq32((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_EQ64(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_eq64((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_L16_I(ars, ae_immls16) \ + ({ \ + ae_int16 *__ars = (ars); \ + __builtin_xtensa_ae_l16_i((__ars), (ae_immls16)); \ + }) + +#define AE_L16_IP(ae_ls_v, ars, ae_immls16) \ + ({ \ + __builtin_xtensa_ae_l16_ip((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (ae_immls16)); \ + }) + +#define AE_L16_X(ars, art) \ + ({ \ + ae_int16 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16_x((__ars), (__art)); \ + }) + +#define AE_L16_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16_xc((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_L16_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16_xp((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_L16M_I(ars, ae_immls16) \ + ({ \ + ae_int16 *__ars = (ars); \ + __builtin_xtensa_ae_l16m_i((__ars), (ae_immls16)); \ + }) + +#define AE_L16M_IU(ae_ls_v, ars, ae_immls16) \ + ({ \ + __builtin_xtensa_ae_l16m_iu((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (ae_immls16)); \ + }) + +#define AE_L16M_X(ars, art) \ + ({ \ + ae_int16 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16m_x((__ars), (__art)); \ + }) + +#define AE_L16M_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16m_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_L16M_XU(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16m_xu((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_L16X2M_I(ars, ae_immls32) \ + ({ \ + ae_int16x2 *__ars = (ars); \ + __builtin_xtensa_ae_l16x2m_i((__ars), (ae_immls32)); \ + }) + +#define AE_L16X2M_IU(ae_ls_v, ars, ae_immls32) \ + ({ \ + __builtin_xtensa_ae_l16x2m_iu((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16x2 **)&(ars), (ae_immls32)); \ + }) + +#define AE_L16X2M_X(ars, art) \ + ({ \ + ae_int16x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x2m_x((__ars), (__art)); \ + }) + +#define AE_L16X2M_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x2m_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16x2 **)&(ars), (__art)); \ + }) + +#define AE_L16X2M_XU(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x2m_xu((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int16x2 **)&(ars), (__art)); \ + }) + +#define AE_L16X4_I(ars, ae_immls64) \ + ({ \ + ae_int16x4 *__ars = (ars); \ + __builtin_xtensa_ae_l16x4_i((__ars), (ae_immls64)); \ + }) + +#define AE_L16X4_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + __builtin_xtensa_ae_l16x4_ip((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16x4 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_L16X4_RIC(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l16x4_ric((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_L16X4_RIP(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l16x4_rip((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_L16X4_X(ars, art) \ + ({ \ + ae_int16x4 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x4_x((__ars), (__art)); \ + }) + +#define AE_L16X4_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x4_xc((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16x4 **)&(ars), (__art)); \ + }) + +#define AE_L16X4_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l16x4_xp((ae_int16x4 *)&(ae_ls_v), \ + (const ae_int16x4 **)&(ars), (__art)); \ + }) + +#define AE_L32_I(ars, ae_immls32) \ + ({ \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_l32_i((__ars), (ae_immls32)); \ + }) + +#define AE_L32_IP(ae_ls_v, ars, ae_immls32) \ + ({ \ + __builtin_xtensa_ae_l32_ip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (ae_immls32)); \ + }) + +#define AE_L32_X(ars, art) \ + ({ \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32_x((__ars), (__art)); \ + }) + +#define AE_L32_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32_xp((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32F24_I(ars, ae_immls32) \ + ({ \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_l32f24_i((__ars), (ae_immls32)); \ + }) + +#define AE_L32F24_IP(ae_ls_v, ars, ae_immls32) \ + ({ \ + __builtin_xtensa_ae_l32f24_ip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (ae_immls32)); \ + }) + +#define AE_L32F24_X(ars, art) \ + ({ \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32f24_x((__ars), (__art)); \ + }) + +#define AE_L32F24_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32f24_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32F24_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32f24_xp((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32M_I(ars, ae_immls32) \ + ({ \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_l32m_i((__ars), (ae_immls32)); \ + }) + +#define AE_L32M_IU(ae_ls_v, ars, ae_immls32) \ + ({ \ + __builtin_xtensa_ae_l32m_iu((ae_int64 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (ae_immls32)); \ + }) + +#define AE_L32M_X(ars, art) \ + ({ \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32m_x((__ars), (__art)); \ + }) + +#define AE_L32M_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32m_xc((ae_int64 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32M_XU(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32m_xu((ae_int64 *)&(ae_ls_v), \ + (const ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_L32X2_I(ars, ae_immls64) \ + ({ \ + ae_int32x2 *__ars = (ars); \ + __builtin_xtensa_ae_l32x2_i((__ars), (ae_immls64)); \ + }) + +#define AE_L32X2_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + __builtin_xtensa_ae_l32x2_ip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_L32X2_RIC(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l32x2_ric((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_L32X2_RIP(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l32x2_rip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_L32X2_X(ars, art) \ + ({ \ + ae_int32x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2_x((__ars), (__art)); \ + }) + +#define AE_L32X2_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_L32X2_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2_xp((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_L32X2F24_I(ars, ae_immls64) \ + ({ \ + ae_int32x2 *__ars = (ars); \ + __builtin_xtensa_ae_l32x2f24_i((__ars), (ae_immls64)); \ + }) + +#define AE_L32X2F24_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + __builtin_xtensa_ae_l32x2f24_ip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_L32X2F24_RIC(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l32x2f24_ric((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_L32X2F24_RIP(ae_ls_v, ars) \ + ({ \ + __builtin_xtensa_ae_l32x2f24_rip((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_L32X2F24_X(ars, art) \ + ({ \ + ae_int32x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2f24_x((__ars), (__art)); \ + }) + +#define AE_L32X2F24_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2f24_xc((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_L32X2F24_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l32x2f24_xp((ae_int32x2 *)&(ae_ls_v), \ + (const ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_L64_I(ars, ae_immls64) \ + ({ \ + ae_int64 *__ars = (ars); \ + __builtin_xtensa_ae_l64_i((__ars), (ae_immls64)); \ + }) + +#define AE_L64_IP(ae_ls_v, ars, ae_immls64) \ + ({ \ + __builtin_xtensa_ae_l64_ip((ae_int64 *)&(ae_ls_v), \ + (const ae_int64 **)&(ars), (ae_immls64)); \ + }) + +#define AE_L64_X(ars, art) \ + ({ \ + ae_int64 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l64_x((__ars), (__art)); \ + }) + +#define AE_L64_XC(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l64_xc((ae_int64 *)&(ae_ls_v), \ + (const ae_int64 **)&(ars), (__art)); \ + }) + +#define AE_L64_XP(ae_ls_v, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_l64_xp((ae_int64 *)&(ae_ls_v), \ + (const ae_int64 **)&(ars), (__art)); \ + }) + +#define AE_LA16X4_IC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4_ic((ae_int16x4 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA16X4_IP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4_ip((ae_int16x4 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA16X4_RIC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4_ric((ae_int16x4 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA16X4_RIP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4_rip((ae_int16x4 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA16X4NEG_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4neg_pc((ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA16X4POS_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la16x4pos_pc((ae_valign *)&(ae_ls_uu), \ + (const ae_int16x4 **)&(ars)); \ + }) + +#define AE_LA24_IC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24_ic((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24_IP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24_ip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24_RIC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24_ric((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24_RIP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24_rip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24NEG_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24neg_pc((ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24POS_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24pos_pc((ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2_IC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2_ic((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2_IP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2_ip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2_RIC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2_ric((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2_RIP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2_rip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2NEG_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2neg_pc((ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA24X2POS_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la24x2pos_pc((ae_valign *)&(ae_ls_uu), \ + (const void **)&(ars)); \ + }) + +#define AE_LA32X2_IC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2_ic((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2_IP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2_ip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2_RIC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2_ric((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2_RIP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2_rip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2F24_IC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2f24_ic((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2F24_IP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2f24_ip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2F24_RIC(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2f24_ric((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2F24_RIP(ae_ls_av, ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2f24_rip((ae_int32x2 *)&(ae_ls_av), \ + (ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2NEG_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2neg_pc((ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA32X2POS_PC(ae_ls_uu, ars) \ + ({ \ + __builtin_xtensa_ae_la32x2pos_pc((ae_valign *)&(ae_ls_uu), \ + (const ae_int32x2 **)&(ars)); \ + }) + +#define AE_LA64_PP(ars) \ + ({ \ + void *__ars = (void *)(ars); \ + __builtin_xtensa_ae_la64_pp((__ars)); \ + }) + +#define AE_LALIGN64_I(ars, ae_immls64) \ + ({ \ + ae_valign *__ars = (ars); \ + __builtin_xtensa_ae_lalign64_i((__ars), (ae_immls64)); \ + }) + +#define AE_LB(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_lb((__art)); \ + }) + +#define AE_LBI(ae_ohba) ({ __builtin_xtensa_ae_lbi((ae_ohba)); }) + +#define AE_LBK(ars, art) \ + ({ \ + int __ars = (int)(ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_lbk((__ars), (__art)); \ + }) + +#define AE_LBKI(ars, ae_ohba) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_lbki((__ars), (ae_ohba)); \ + }) + +#define AE_LBS(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_lbs((__art)); \ + }) + +#define AE_LBSI(ae_ohba) ({ __builtin_xtensa_ae_lbsi((ae_ohba)); }) + +#define AE_LE16(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int16x4 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int16x4 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_le16((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_LE32(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_le32((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_LE64(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_le64((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_LT16(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int16x4 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int16x4 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_lt16((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_LT32(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_lt32((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_LT64(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_lt64((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MAX32(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_max32((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MAX64(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_max64((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MAXABS32S(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_maxabs32s((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MAXABS64S(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_maxabs64s((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MIN32(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_min32((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MIN64(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_min64((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MINABS32S(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int32x2 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int32x2 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_minabs32s((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MINABS64S(ae_cmpp_v0, ae_cmpp_v1) \ + ({ \ + ae_int64 __ae_cmpp_v0 = (ae_cmpp_v0); \ + ae_int64 __ae_cmpp_v1 = (ae_cmpp_v1); \ + __builtin_xtensa_ae_minabs64s((__ae_cmpp_v0), (__ae_cmpp_v1)); \ + }) + +#define AE_MOV(ae_to_dr_v0) \ + ({ \ + ae_int64 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_mov((__ae_to_dr_v0)); \ + }) + +#define AE_MOVAD16_0(ae_dr_to_ar_v0) \ + ({ \ + ae_int16x4 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad16_0((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVAD16_1(ae_dr_to_ar_v0) \ + ({ \ + ae_int16x4 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad16_1((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVAD16_2(ae_dr_to_ar_v0) \ + ({ \ + ae_int16x4 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad16_2((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVAD16_3(ae_dr_to_ar_v0) \ + ({ \ + ae_int16x4 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad16_3((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVAD32_H(ae_dr_to_ar_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad32_h((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVAD32_L(ae_dr_to_ar_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_movad32_l((__ae_dr_to_ar_v0)); \ + }) + +#define AE_MOVALIGN(ae_uu_v) \ + ({ \ + ae_valign __ae_uu_v = (ae_uu_v); \ + __builtin_xtensa_ae_movalign((__ae_uu_v)); \ + }) + +#define AE_MOVDA16(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_movda16((__ars)); \ + }) + +#define AE_MOVDA16X2(ars, art) \ + ({ \ + int __ars = (int)(ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_movda16x2((__ars), (__art)); \ + }) + +#define AE_MOVDA32(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_movda32((__ars)); \ + }) + +#define AE_MOVDA32X2(ars, art) \ + ({ \ + int __ars = (int)(ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_movda32x2((__ars), (__art)); \ + }) + +#define AE_MOVF16X4(ae_cmov_v, ae_cmov_v0, bt4) \ + ({ \ + ae_int16x4 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool4 __bt4 = (bt4); \ + __builtin_xtensa_ae_movf16x4((ae_int16x4 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt4)); \ + }) + +#define AE_MOVF32X2(ae_cmov_v, ae_cmov_v0, bt2) \ + ({ \ + ae_int32x2 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool2 __bt2 = (bt2); \ + __builtin_xtensa_ae_movf32x2((ae_int32x2 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt2)); \ + }) + +#define AE_MOVF64(ae_cmov_v, ae_cmov_v0, bt) \ + ({ \ + ae_int64 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool __bt = (bt); \ + __builtin_xtensa_ae_movf64((ae_int64 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt)); \ + }) + +#define AE_MOVI(movi_imm) ({ __builtin_xtensa_ae_movi((movi_imm)); }) + +#define AE_MOVT16X4(ae_cmov_v, ae_cmov_v0, bt4) \ + ({ \ + ae_int16x4 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool4 __bt4 = (bt4); \ + __builtin_xtensa_ae_movt16x4((ae_int16x4 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt4)); \ + }) + +#define AE_MOVT32X2(ae_cmov_v, ae_cmov_v0, bt2) \ + ({ \ + ae_int32x2 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool2 __bt2 = (bt2); \ + __builtin_xtensa_ae_movt32x2((ae_int32x2 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt2)); \ + }) + +#define AE_MOVT64(ae_cmov_v, ae_cmov_v0, bt) \ + ({ \ + ae_int64 __ae_cmov_v0 = (ae_cmov_v0); \ + xtbool __bt = (bt); \ + __builtin_xtensa_ae_movt64((ae_int64 *)&(ae_cmov_v), (__ae_cmov_v0), \ + (__bt)); \ + }) + +#define AE_MUL16X4(ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_mul16x4((__ae_mul_d1), (__ae_mul_d0)); \ + }) + +#define AE_MUL32_HH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32_hh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32_LH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32_lh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32_LL(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32_ll((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32_ll_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32U_LL(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32u_ll((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_H0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_h0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_H0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_h0_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_H1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_h1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_H1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_h1_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_H2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_h2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_H2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_h2_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_H3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_h3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_H3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_h3_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_L0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_l0_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_L1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_l1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_L1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_l1_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_L2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_l2_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MUL32X16_L3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mul32x16_l3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MUL32X16_L3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mul32x16_l3_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA16X4(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_mula16x4((ae_int32x2 *)&(ae_mul_q1), \ + (ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d1), \ + (__ae_mul_d0)); \ + }) + +#define AE_MULA32_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32U_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32u_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_H0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_h0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_H0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_h0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_H1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_h1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_H1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_h1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_H2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_h2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_H2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_h2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_H3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_h3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_H3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_h3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_L0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_l0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_l0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_L1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_l1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_l1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_L2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_l2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_l2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULA32X16_L3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mula32x16_l3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULA32X16_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mula32x16_l3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaad24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAAD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad24_hh_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaad24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAAD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad24_hl_lh_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD32X16_H0_L1(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaad32x16_h0_l1( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAD32X16_H0_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad32x16_h0_l1_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaad32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD32X16_H2_L3(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaad32x16_h2_l3( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAD32X16_H2_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad32x16_h2_l3_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaad32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaad32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD16SS_11_00(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd16ss_11_00( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD16SS_11_00_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd16ss_11_00_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD16SS_13_02(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd16ss_13_02( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD16SS_13_02_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd16ss_13_02_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD16SS_33_22(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd16ss_33_22( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD16SS_33_22_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd16ss_33_22_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaafd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAAFD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd24_hh_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaafd24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAAFD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd24_hl_lh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD32X16_H0_L1(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h0_l1( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD32X16_H0_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h0_l1_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD32X16_H2_L3(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h2_l3( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD32X16_H2_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h2_l3_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAAFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAAFD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaafd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAC24(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulac24((ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d0), \ + (__ae_mul_d1)); \ + }) + +#define AE_MULAC32X16_H(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, \ + opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulac32x16_h((ae_int32x2 *)&(opnd_ae_sem_mul_x4_q0), \ + (__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULAC32X16_L(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, \ + opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulac32x16_l((ae_int32x2 *)&(opnd_ae_sem_mul_x4_q0), \ + (__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULAF16SS_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf16ss_00( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF16SS_00_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf16ss_00_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF16SS_10(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_10((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_11(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_11((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_20(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_20((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_21(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_21((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_22(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_22((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_30(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_30((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_31(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_31((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_32(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_32((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16SS_33(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulaf16ss_33((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULAF16X4SS(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_mulaf16x4ss((ae_int32x2 *)&(ae_mul_q1), \ + (ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d1), \ + (__ae_mul_d0)); \ + }) + +#define AE_MULAF32R_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32r_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32R_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32r_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32R_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32r_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32R_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32r_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32S_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32s_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32S_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32s_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32S_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32s_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32S_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32s_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_H0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_h0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_H0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_h0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_H1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_h1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_H1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_h1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_H2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_h2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_H2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_h2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_H3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_h3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_H3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_h3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_L0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_l0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_l0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_L1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_l1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_l1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_L2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_l2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_l2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF32X16_L3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf32x16_l3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF32X16_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf32x16_l3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF48Q32SP16S_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf48q32sp16s_l( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF48Q32SP16S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf48q32sp16s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAF48Q32SP16U_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulaf48q32sp16u_l( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAF48Q32SP16U_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaf48q32sp16u_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFC24RA(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, \ + opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulafc24ra((ae_int32x2 *)&(opnd_ae_sem_mul_x4_q0), \ + (__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULAFC32X16RAS_H(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, \ + opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulafc32x16ras_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x4_q0), (__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULAFC32X16RAS_L(opnd_ae_sem_mul_x4_q0, opnd_ae_sem_mul_x4_d0, \ + opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulafc32x16ras_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x4_q0), (__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULAFD24X2_FIR_H(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int32x2 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd24x2_fir_h( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFD24X2_FIR_L(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int32x2 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd24x2_fir_l( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFD32X16X2_FIR_HH(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd32x16x2_fir_hh( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFD32X16X2_FIR_HL(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd32x16x2_fir_hl( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFD32X16X2_FIR_LH(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd32x16x2_fir_lh( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFD32X16X2_FIR_LL(ae_mul_q0, ae_mul_q1, ae_mul_d0, ae_mul_d1, \ + ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulafd32x16x2_fir_ll( \ + (ae_int64 *)&(ae_mul_q0), (ae_int64 *)&(ae_mul_q1), (__ae_mul_d0), \ + (__ae_mul_d1), (__ae_mul_d2)); \ + }) + +#define AE_MULAFP24X2R(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp24x2r((ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP24X2R_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp24x2r_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP24X2RA(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp24x2ra( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP24X2RA_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp24x2ra_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP32X16X2RAS_H(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x16x2ras_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP32X16X2RAS_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp32x16x2ras_h_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP32X16X2RAS_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x16x2ras_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP32X16X2RAS_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp32x16x2ras_l_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP32X16X2RS_H(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x16x2rs_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP32X16X2RS_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp32x16x2rs_h_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP32X16X2RS_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x16x2rs_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP32X16X2RS_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafp32x16x2rs_l_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFP32X2RAS(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x2ras( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFP32X2RS(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulafp32x2rs( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAFQ32SP24S_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafq32sp24s_h_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAFQ32SP24S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulafq32sp24s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAP24X2(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulap24x2((ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d0), \ + (__ae_mul_d1)); \ + }) + +#define AE_MULAP24X2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulap24x2_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAP32X16X2_H(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulap32x16x2_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAP32X16X2_L(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulap32x16x2_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAP32X2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulap32x2((ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAQ32SP16S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaq32sp16s_l_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAQ32SP16U_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulaq32sp16u_l_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULARFQ32SP24S_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mularfq32sp24s_h_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULARFQ32SP24S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mularfq32sp24s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAS32F48P16S_HH(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_hh( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAS32F48P16S_HH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_hh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAS32F48P16S_LH(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_lh( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAS32F48P16S_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_lh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULAS32F48P16S_LL(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_ll( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULAS32F48P16S_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulas32f48p16s_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulasd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULASD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasd24_hh_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulasd24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULASD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasd24_hl_lh_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulasd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULASD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulasd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULASD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASFD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulasfd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULASFD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasfd24_hh_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASFD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulasfd24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULASFD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasfd24_hl_lh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulasfd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULASFD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasfd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULASFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulasfd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULASFD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulasfd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULC24(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulc24((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULC32X16_H(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulc32x16_h((__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULC32X16_L(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulc32x16_l((__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULF16SS_00(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf16ss_00((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF16SS_00_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf16ss_00_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF16SS_10(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_10((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_11(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_11((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_20(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_20((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_21(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_21((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_22(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_22((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_30(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_30((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_31(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_31((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_32(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_32((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16SS_33(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulf16ss_33((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULF16X4SS(ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_mulf16x4ss((__ae_mul_d1), (__ae_mul_d0)); \ + }) + +#define AE_MULF32R_HH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32r_hh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32R_LH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32r_lh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32R_LL(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32r_ll((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32R_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32r_ll_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32S_HH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32s_hh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32S_LH(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32s_lh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32S_LL(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32s_ll((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32S_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32s_ll_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_H0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_h0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_H0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_h0_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_H1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_h1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_H1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_h1_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_H2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_h2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_H2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_h2_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_H3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_h3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_H3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_h3_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_L0(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_l0_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_L1(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_l1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_L1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_l1_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_L2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_l2_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF32X16_L3(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf32x16_l3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF32X16_L3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf32x16_l3_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF48Q32SP16S_L(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf48q32sp16s_l((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF48Q32SP16S_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf48q32sp16s_l_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULF48Q32SP16U_L(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulf48q32sp16u_l((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULF48Q32SP16U_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulf48q32sp16u_l_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFC24RA(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulfc24ra((__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULFC32X16RAS_H(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulfc32x16ras_h((__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULFC32X16RAS_L(opnd_ae_sem_mul_x4_d0, opnd_ae_sem_mul_x4_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x4_d0 = (opnd_ae_sem_mul_x4_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x4_d1 = (opnd_ae_sem_mul_x4_d1); \ + __builtin_xtensa_ae_mulfc32x16ras_l((__opnd_ae_sem_mul_x4_d0), \ + (__opnd_ae_sem_mul_x4_d1)); \ + }) + +#define AE_MULFD24X2_FIR_H(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int32x2 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd24x2_fir_h((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFD24X2_FIR_L(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int32x2 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd24x2_fir_l((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFD32X16X2_FIR_HH(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd32x16x2_fir_hh((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFD32X16X2_FIR_HL(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd32x16x2_fir_hl((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFD32X16X2_FIR_LH(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd32x16x2_fir_lh((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFD32X16X2_FIR_LL(ae_mul_d0, ae_mul_d1, ae_mul_d2) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d2 = (ae_mul_d2); \ + __builtin_xtensa_ae_mulfd32x16x2_fir_ll((__ae_mul_d0), (__ae_mul_d1), \ + (__ae_mul_d2)); \ + }) + +#define AE_MULFP16X4RAS(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulfp16x4ras((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULFP16X4S(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulfp16x4s((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULFP24X2R(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp24x2r((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP24X2R_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp24x2r_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP24X2RA(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp24x2ra((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP24X2RA_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp24x2ra_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP32X16X2RAS_H(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x16x2ras_h((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP32X16X2RAS_H_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp32x16x2ras_h_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP32X16X2RAS_L(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x16x2ras_l((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP32X16X2RAS_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp32x16x2ras_l_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP32X16X2RS_H(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x16x2rs_h((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP32X16X2RS_H_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp32x16x2rs_h_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP32X16X2RS_L(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x16x2rs_l((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP32X16X2RS_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfp32x16x2rs_l_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFP32X2RAS(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x2ras((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFP32X2RS(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulfp32x2rs((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULFQ32SP24S_H_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfq32sp24s_h_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULFQ32SP24S_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulfq32sp24s_l_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULP24X2(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulp24x2((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULP24X2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulp24x2_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULP32X16X2_H(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulp32x16x2_h((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULP32X16X2_L(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulp32x16x2_l((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULP32X2(opnd_ae_sem_mul_x2_S1_d0, opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulp32x2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULQ32SP16S_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulq32sp16s_l_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULQ32SP16U_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulq32sp16u_l_s2((__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULRFQ32SP24S_H_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulrfq32sp24s_h_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULRFQ32SP24S_L_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulrfq32sp24s_l_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS16X4(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_muls16x4((ae_int32x2 *)&(ae_mul_q1), \ + (ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d1), \ + (__ae_mul_d0)); \ + }) + +#define AE_MULS32_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32F48P16S_HH(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32f48p16s_hh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32F48P16S_HH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32f48p16s_hh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32F48P16S_LH(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32f48p16s_lh((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32F48P16S_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32f48p16s_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32F48P16S_LL(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32f48p16s_ll((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32F48P16S_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32f48p16s_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32U_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32u_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_H0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_h0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_H0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_h0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_H1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_h1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_H1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_h1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_H2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_h2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_H2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_h2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_H3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_h3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_H3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_h3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_L0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_l0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_l0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_L1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_l1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_l1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_L2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_l2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_l2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULS32X16_L3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_muls32x16_l3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULS32X16_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_muls32x16_l3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsad24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSAD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsad24_hh_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAD32X16_H1_L0(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsad32x16_h1_l0((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSAD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsad32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAD32X16_H3_L2(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsad32x16_h3_l2((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSAD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsad32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAFD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsafd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSAFD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsafd24_hh_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsafd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSAFD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsafd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSAFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsafd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSAFD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsafd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF16SS_00(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf16ss_00( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF16SS_00_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf16ss_00_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF16SS_10(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_10((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_11(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_11((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_20(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_20((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_21(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_21((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_22(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_22((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_30(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_30((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_31(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_31((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_32(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_32((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16SS_33(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsf16ss_33((ae_int32x2 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSF16X4SS(ae_mul_q1, ae_mul_q0, ae_mul_d1, ae_mul_d0) \ + ({ \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + ae_int16x4 __ae_mul_d0 = (ae_mul_d0); \ + __builtin_xtensa_ae_mulsf16x4ss((ae_int32x2 *)&(ae_mul_q1), \ + (ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d1), \ + (__ae_mul_d0)); \ + }) + +#define AE_MULSF32R_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32r_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32R_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32r_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32R_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32r_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32R_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32r_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32S_HH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32s_hh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32S_LH(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32s_lh((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32S_LL(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32s_ll((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_H0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_h0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_H0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_h0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_H1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_h1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_H1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_h1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_H2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_h2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_H2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_h2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_H3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_h3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_H3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_h3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_L0(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_l0((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_l0_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_L1(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_l1((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_L1_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_l1_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_L2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_l2((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_l2_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF32X16_L3(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf32x16_l3((ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF32X16_L3_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf32x16_l3_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF48Q32SP16S_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf48q32sp16s_l( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF48Q32SP16S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf48q32sp16s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSF48Q32SP16U_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int64 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsf48q32sp16u_l( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSF48Q32SP16U_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsf48q32sp16u_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP24X2R(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp24x2r((ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP24X2R_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp24x2r_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP24X2RA(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp24x2ra( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP24X2RA_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp24x2ra_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP32X16X2RAS_H(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2ras_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP32X16X2RAS_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2ras_h_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP32X16X2RAS_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2ras_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP32X16X2RAS_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2ras_l_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP32X16X2RS_H(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2rs_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP32X16X2RS_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2rs_h_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP32X16X2RS_L(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2rs_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP32X16X2RS_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfp32x16x2rs_l_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFP32X2RAS(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x2ras( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFP32X2RS(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsfp32x2rs( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSFQ32SP24S_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfq32sp24s_h_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSFQ32SP24S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsfq32sp24s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSP24X2(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulsp24x2((ae_int32x2 *)&(ae_mul_q0), (__ae_mul_d0), \ + (__ae_mul_d1)); \ + }) + +#define AE_MULSP24X2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsp24x2_s2((ae_int32x2 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSP32X16X2_H(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsp32x16x2_h( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSP32X16X2_L(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsp32x16x2_l( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSP32X2(opnd_ae_sem_mul_x2_S1_q0, opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulsp32x2((ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSQ32SP16S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsq32sp16s_l_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSQ32SP16U_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsq32sp16u_l_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSRFQ32SP24S_H_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsrfq32sp24s_h_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSRFQ32SP24S_L_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int64 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulsrfq32sp24s_l_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSS32F48P16S_HH(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_hh( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSS32F48P16S_HH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_hh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSS32F48P16S_LH(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_lh( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSS32F48P16S_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_lh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSS32F48P16S_LL(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_ll( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSS32F48P16S_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulss32f48p16s_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulssd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSSD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssd24_hh_ll_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulssd24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSSD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssd24_hl_lh_s2((ae_int64 *)&(ae_mul_S2_q0), \ + (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD16SS_11_00(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssfd16ss_11_00( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSFD16SS_11_00_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd16ss_11_00_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD16SS_13_02(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssfd16ss_13_02( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSFD16SS_13_02_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd16ss_13_02_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD16SS_33_22(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssfd16ss_33_22( \ + (ae_int32x2 *)&(opnd_ae_sem_mul_x2_S1_q0), \ + (__opnd_ae_sem_mul_x2_S1_d0), (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSFD16SS_33_22_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd16ss_33_22_s2( \ + (ae_int32x2 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD24_HH_LL(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulssfd24_hh_ll((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSSFD24_HH_LL_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd24_hh_ll_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD24_HL_LH(ae_mul_q0, ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulssfd24_hl_lh((ae_int64 *)&(ae_mul_q0), \ + (__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULSSFD24_HL_LH_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd24_hl_lh_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssfd32x16_h1_l0( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSFD32X16_H1_L0_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd32x16_h1_l0_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULSSFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_q0, \ + opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulssfd32x16_h3_l2( \ + (ae_int64 *)&(opnd_ae_sem_mul_x2_S1_q0), (__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULSSFD32X16_H3_L2_S2(ae_mul_S2_q0, ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulssfd32x16_h3_l2_s2( \ + (ae_int64 *)&(ae_mul_S2_q0), (__ae_mul_S2_d0), (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzaad24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZAAD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzaad24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZAAD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD32X16_H0_L1(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h0_l1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAD32X16_H0_L1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h0_l1_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD32X16_H2_L3(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h2_l3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAD32X16_H2_L3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h2_l3_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaad32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD16SS_11_00(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_11_00((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD16SS_11_00_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_11_00_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD16SS_13_02(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_13_02((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD16SS_13_02_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_13_02_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD16SS_33_22(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_33_22((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD16SS_33_22_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd16ss_33_22_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzaafd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZAAFD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzaafd24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZAAFD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD32X16_H0_L1(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h0_l1((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD32X16_H0_L1_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h0_l1_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD32X16_H2_L3(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h2_l3((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD32X16_H2_L3_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h2_l3_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZAAFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZAAFD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzaafd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzasd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZASD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzasd24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZASD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasd24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzasd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZASD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzasd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZASD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASFD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzasfd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZASFD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasfd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASFD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzasfd24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZASFD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasfd24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzasfd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZASFD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasfd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZASFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzasfd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZASFD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzasfd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzsad24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSAD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsad24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAD32X16_H1_L0(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzsad32x16_h1_l0((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSAD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsad32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAD32X16_H3_L2(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int16x4 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzsad32x16_h3_l2((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSAD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsad32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAFD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzsafd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSAFD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsafd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzsafd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSAFD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsafd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSAFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzsafd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSAFD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzsafd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzssd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSSD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzssd24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSSD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssd24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD16SS_11_00(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_11_00((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSFD16SS_11_00_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_11_00_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD16SS_13_02(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_13_02((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSFD16SS_13_02_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_13_02_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD16SS_33_22(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_33_22((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSFD16SS_33_22_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int16x4 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd16ss_33_22_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD24_HH_LL(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzssfd24_hh_ll((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSSFD24_HH_LL_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd24_hh_ll_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD24_HL_LH(ae_mul_d0, ae_mul_d1) \ + ({ \ + ae_int32x2 __ae_mul_d0 = (ae_mul_d0); \ + ae_int32x2 __ae_mul_d1 = (ae_mul_d1); \ + __builtin_xtensa_ae_mulzssfd24_hl_lh((__ae_mul_d0), (__ae_mul_d1)); \ + }) + +#define AE_MULZSSFD24_HL_LH_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int32x2 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd24_hl_lh_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD32X16_H1_L0(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssfd32x16_h1_l0((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSFD32X16_H1_L0_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd32x16_h1_l0_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_MULZSSFD32X16_H3_L2(opnd_ae_sem_mul_x2_S1_d0, \ + opnd_ae_sem_mul_x2_S1_d1) \ + ({ \ + ae_int32x2 __opnd_ae_sem_mul_x2_S1_d0 = (opnd_ae_sem_mul_x2_S1_d0); \ + ae_int16x4 __opnd_ae_sem_mul_x2_S1_d1 = (opnd_ae_sem_mul_x2_S1_d1); \ + __builtin_xtensa_ae_mulzssfd32x16_h3_l2((__opnd_ae_sem_mul_x2_S1_d0), \ + (__opnd_ae_sem_mul_x2_S1_d1)); \ + }) + +#define AE_MULZSSFD32X16_H3_L2_S2(ae_mul_S2_d0, ae_mul_S2_d1) \ + ({ \ + ae_int32x2 __ae_mul_S2_d0 = (ae_mul_S2_d0); \ + ae_int16x4 __ae_mul_S2_d1 = (ae_mul_S2_d1); \ + __builtin_xtensa_ae_mulzssfd32x16_h3_l2_s2((__ae_mul_S2_d0), \ + (__ae_mul_S2_d1)); \ + }) + +#define AE_NAND(ae_dr_to_dr_v0, ae_dr_to_dr_v1) \ + ({ \ + ae_int64 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int64 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_nand((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1)); \ + }) + +#define AE_NEG16S(ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg16s((__ae_arth_v1)); \ + }) + +#define AE_NEG24S(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg24s((__ae_arth_v1)); \ + }) + +#define AE_NEG32(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg32((__ae_arth_v1)); \ + }) + +#define AE_NEG32S(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg32s((__ae_arth_v1)); \ + }) + +#define AE_NEG64(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg64((__ae_arth_v1)); \ + }) + +#define AE_NEG64S(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_neg64s((__ae_arth_v1)); \ + }) + +#define AE_NSA64(ae_dr_to_ar_v0) \ + ({ \ + ae_int64 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_nsa64((__ae_dr_to_ar_v0)); \ + }) + +#define AE_NSAZ16_0(ae_dr_to_ar_v0) \ + ({ \ + ae_int16x4 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_nsaz16_0((__ae_dr_to_ar_v0)); \ + }) + +#define AE_NSAZ32_L(ae_dr_to_ar_v0) \ + ({ \ + ae_int32x2 __ae_dr_to_ar_v0 = (ae_dr_to_ar_v0); \ + __builtin_xtensa_ae_nsaz32_l((__ae_dr_to_ar_v0)); \ + }) + +#define AE_OR(ae_dr_to_dr_v0, ae_dr_to_dr_v1) \ + ({ \ + ae_int64 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int64 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_or((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1)); \ + }) + +#define AE_PKSR24(ae_pks_d, ae_pks_s, ae_imm2) \ + ({ \ + ae_int64 __ae_pks_s = (ae_pks_s); \ + __builtin_xtensa_ae_pksr24((ae_int32x2 *)&(ae_pks_d), (__ae_pks_s), \ + (ae_imm2)); \ + }) + +#define AE_PKSR32(ae_pks_d, ae_pks_s, ae_imm2) \ + ({ \ + ae_int64 __ae_pks_s = (ae_pks_s); \ + __builtin_xtensa_ae_pksr32((ae_int32x2 *)&(ae_pks_d), (__ae_pks_s), \ + (ae_imm2)); \ + }) + +#define AE_ROUND16X4F32SASYM(ae_arth_v1, ae_arth_v0) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + __builtin_xtensa_ae_round16x4f32sasym((__ae_arth_v1), (__ae_arth_v0)); \ + }) + +#define AE_ROUND16X4F32SSYM(ae_arth_v1, ae_arth_v0) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + __builtin_xtensa_ae_round16x4f32ssym((__ae_arth_v1), (__ae_arth_v0)); \ + }) + +#define AE_ROUND24X2F48SASYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round24x2f48sasym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUND24X2F48SSYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round24x2f48ssym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUND32X2F48SASYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round32x2f48sasym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUND32X2F48SSYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round32x2f48ssym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUND32X2F64SASYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round32x2f64sasym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUND32X2F64SSYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_round32x2f64ssym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUNDSP16F24ASYM(ae_arth_v0) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + __builtin_xtensa_ae_roundsp16f24asym((__ae_arth_v0)); \ + }) + +#define AE_ROUNDSP16F24SYM(ae_arth_v0) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + __builtin_xtensa_ae_roundsp16f24sym((__ae_arth_v0)); \ + }) + +#define AE_ROUNDSP16Q48X2ASYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_roundsp16q48x2asym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUNDSP16Q48X2SYM(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_roundsp16q48x2sym((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_ROUNDSQ32F48ASYM(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_roundsq32f48asym((__ae_arth_v1)); \ + }) + +#define AE_ROUNDSQ32F48SYM(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_roundsq32f48sym((__ae_arth_v1)); \ + }) + +#define AE_S16_0_I(ae_ls_v, ars, ae_immls16) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + ae_int16 *__ars = (ars); \ + __builtin_xtensa_ae_s16_0_i((__ae_ls_v), (__ars), (ae_immls16)); \ + }) + +#define AE_S16_0_IP(ae_ls_v, ars, ae_immls16) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16_0_ip((__ae_ls_v), (ae_int16 **)&(ars), \ + (ae_immls16)); \ + }) + +#define AE_S16_0_X(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + ae_int16 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16_0_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S16_0_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16_0_xc((__ae_ls_v), (ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_S16_0_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16_0_xp((__ae_ls_v), (ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_S16M_L_I(ae_ls_v, ars, ae_immls16) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int16 *__ars = (ars); \ + __builtin_xtensa_ae_s16m_l_i((__ae_ls_v), (__ars), (ae_immls16)); \ + }) + +#define AE_S16M_L_IU(ae_ls_v, ars, ae_immls16) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16m_l_iu((__ae_ls_v), (ae_int16 **)&(ars), \ + (ae_immls16)); \ + }) + +#define AE_S16M_L_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int16 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16m_l_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S16M_L_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16m_l_xc((__ae_ls_v), (ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_S16M_L_XU(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16m_l_xu((__ae_ls_v), (ae_int16 **)&(ars), (__art)); \ + }) + +#define AE_S16X2M_I(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int16x2 *__ars = (ars); \ + __builtin_xtensa_ae_s16x2m_i((__ae_ls_v), (__ars), (ae_immls32)); \ + }) + +#define AE_S16X2M_IU(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16x2m_iu((__ae_ls_v), (ae_int16x2 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S16X2M_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int16x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x2m_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S16X2M_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x2m_xc((__ae_ls_v), (ae_int16x2 **)&(ars), \ + (__art)); \ + }) + +#define AE_S16X2M_XU(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x2m_xu((__ae_ls_v), (ae_int16x2 **)&(ars), \ + (__art)); \ + }) + +#define AE_S16X4_I(ae_ls_v, ars, ae_immls64) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + ae_int16x4 *__ars = (ars); \ + __builtin_xtensa_ae_s16x4_i((__ae_ls_v), (__ars), (ae_immls64)); \ + }) + +#define AE_S16X4_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16x4_ip((__ae_ls_v), (ae_int16x4 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_S16X4_RIC(ae_ls_v, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16x4_ric((__ae_ls_v), (ae_int16x4 **)&(ars)); \ + }) + +#define AE_S16X4_RIP(ae_ls_v, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s16x4_rip((__ae_ls_v), (ae_int16x4 **)&(ars)); \ + }) + +#define AE_S16X4_X(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + ae_int16x4 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x4_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S16X4_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x4_xc((__ae_ls_v), (ae_int16x4 **)&(ars), (__art)); \ + }) + +#define AE_S16X4_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s16x4_xp((__ae_ls_v), (ae_int16x4 **)&(ars), (__art)); \ + }) + +#define AE_S24RA64S_I(ae_ls_v1, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_s24ra64s_i((__ae_ls_v1), (__ars), (ae_immls32)); \ + }) + +#define AE_S24RA64S_IP(ae_ls_v1, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + __builtin_xtensa_ae_s24ra64s_ip((__ae_ls_v1), (ae_int32 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S24RA64S_X(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s24ra64s_x((__ae_ls_v1), (__ars), (__art)); \ + }) + +#define AE_S24RA64S_XC(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s24ra64s_xc((__ae_ls_v1), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S24RA64S_XP(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s24ra64s_xp((__ae_ls_v1), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S24X2RA64S_IP(ae_ls_v2, ae_ls_v1, ars) \ + ({ \ + ae_int64 __ae_ls_v2 = (ae_ls_v2); \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + __builtin_xtensa_ae_s24x2ra64s_ip((__ae_ls_v2), (__ae_ls_v1), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S32_L_I(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_s32_l_i((__ae_ls_v), (__ars), (ae_immls32)); \ + }) + +#define AE_S32_L_IP(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32_l_ip((__ae_ls_v), (ae_int32 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S32_L_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32_l_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S32_L_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32_l_xc((__ae_ls_v), (ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_S32_L_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32_l_xp((__ae_ls_v), (ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_S32F24_L_I(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_s32f24_l_i((__ae_ls_v), (__ars), (ae_immls32)); \ + }) + +#define AE_S32F24_L_IP(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32f24_l_ip((__ae_ls_v), (ae_int32 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S32F24_L_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32f24_l_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S32F24_L_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32f24_l_xc((__ae_ls_v), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32F24_L_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32f24_l_xp((__ae_ls_v), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32M_I(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_s32m_i((__ae_ls_v), (__ars), (ae_immls32)); \ + }) + +#define AE_S32M_IU(ae_ls_v, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32m_iu((__ae_ls_v), (ae_int32 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S32M_X(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32m_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S32M_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32m_xc((__ae_ls_v), (ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_S32M_XU(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32m_xu((__ae_ls_v), (ae_int32 **)&(ars), (__art)); \ + }) + +#define AE_S32RA64S_I(ae_ls_v1, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + ae_int32 *__ars = (ars); \ + __builtin_xtensa_ae_s32ra64s_i((__ae_ls_v1), (__ars), (ae_immls32)); \ + }) + +#define AE_S32RA64S_IP(ae_ls_v1, ars, ae_immls32) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + __builtin_xtensa_ae_s32ra64s_ip((__ae_ls_v1), (ae_int32 **)&(ars), \ + (ae_immls32)); \ + }) + +#define AE_S32RA64S_X(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + ae_int32 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32ra64s_x((__ae_ls_v1), (__ars), (__art)); \ + }) + +#define AE_S32RA64S_XC(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32ra64s_xc((__ae_ls_v1), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32RA64S_XP(ae_ls_v1, ars, art) \ + ({ \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32ra64s_xp((__ae_ls_v1), (ae_int32 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32X2_I(ae_ls_v, ars, ae_immls64) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32x2 *__ars = (ars); \ + __builtin_xtensa_ae_s32x2_i((__ae_ls_v), (__ars), (ae_immls64)); \ + }) + +#define AE_S32X2_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2_ip((__ae_ls_v), (ae_int32x2 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_S32X2_RIC(ae_ls_v, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2_ric((__ae_ls_v), (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S32X2_RIP(ae_ls_v, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2_rip((__ae_ls_v), (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S32X2_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S32X2_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2_xc((__ae_ls_v), (ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_S32X2_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2_xp((__ae_ls_v), (ae_int32x2 **)&(ars), (__art)); \ + }) + +#define AE_S32X2F24_I(ae_ls_v, ars, ae_immls64) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32x2 *__ars = (ars); \ + __builtin_xtensa_ae_s32x2f24_i((__ae_ls_v), (__ars), (ae_immls64)); \ + }) + +#define AE_S32X2F24_IP(ae_ls_v, ars, ae_immls64pos) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2f24_ip((__ae_ls_v), (ae_int32x2 **)&(ars), \ + (ae_immls64pos)); \ + }) + +#define AE_S32X2F24_RIC(ae_ls_v, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2f24_ric((__ae_ls_v), (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S32X2F24_RIP(ae_ls_v, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s32x2f24_rip((__ae_ls_v), (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S32X2F24_X(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + ae_int32x2 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2f24_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S32X2F24_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2f24_xc((__ae_ls_v), (ae_int32x2 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32X2F24_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s32x2f24_xp((__ae_ls_v), (ae_int32x2 **)&(ars), \ + (__art)); \ + }) + +#define AE_S32X2RA64S_IP(ae_ls_v2, ae_ls_v1, ars) \ + ({ \ + ae_int64 __ae_ls_v2 = (ae_ls_v2); \ + ae_int64 __ae_ls_v1 = (ae_ls_v1); \ + __builtin_xtensa_ae_s32x2ra64s_ip((__ae_ls_v2), (__ae_ls_v1), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_S64_I(ae_ls_v, ars, ae_immls64) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + ae_int64 *__ars = (ars); \ + __builtin_xtensa_ae_s64_i((__ae_ls_v), (__ars), (ae_immls64)); \ + }) + +#define AE_S64_IP(ae_ls_v, ars, ae_immls64) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_s64_ip((__ae_ls_v), (ae_int64 **)&(ars), \ + (ae_immls64)); \ + }) + +#define AE_S64_X(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + ae_int64 *__ars = (ars); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s64_x((__ae_ls_v), (__ars), (__art)); \ + }) + +#define AE_S64_XC(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s64_xc((__ae_ls_v), (ae_int64 **)&(ars), (__art)); \ + }) + +#define AE_S64_XP(ae_ls_v, ars, art) \ + ({ \ + ae_int64 __ae_ls_v = (ae_ls_v); \ + int __art = (int)(art); \ + __builtin_xtensa_ae_s64_xp((__ae_ls_v), (ae_int64 **)&(ars), (__art)); \ + }) + +#define AE_SA16X4_IC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa16x4_ic((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int16x4 **)&(ars)); \ + }) + +#define AE_SA16X4_IP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa16x4_ip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int16x4 **)&(ars)); \ + }) + +#define AE_SA16X4_RIC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa16x4_ric((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int16x4 **)&(ars)); \ + }) + +#define AE_SA16X4_RIP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int16x4 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa16x4_rip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int16x4 **)&(ars)); \ + }) + +#define AE_SA24_L_IC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24_l_ic((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24_L_IP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24_l_ip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24_L_RIC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24_l_ric((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24_L_RIP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24_l_rip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24X2_IC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24x2_ic((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24X2_IP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24x2_ip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24X2_RIC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24x2_ric((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA24X2_RIP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa24x2_rip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (void **)&(ars)); \ + }) + +#define AE_SA32X2_IC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2_ic((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2_IP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2_ip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2_RIC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2_ric((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2_RIP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2_rip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2F24_IC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2f24_ic((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2F24_IP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2f24_ip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2F24_RIC(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2f24_ric((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA32X2F24_RIP(ae_ls_v, ae_ls_su, ars) \ + ({ \ + ae_int32x2 __ae_ls_v = (ae_ls_v); \ + __builtin_xtensa_ae_sa32x2f24_rip((__ae_ls_v), (ae_valign *)&(ae_ls_su), \ + (ae_int32x2 **)&(ars)); \ + }) + +#define AE_SA64NEG_FP(ae_ls_su, ars) \ + ({ \ + void *__ars = (void *)(ars); \ + __builtin_xtensa_ae_sa64neg_fp((ae_valign *)&(ae_ls_su), (__ars)); \ + }) + +#define AE_SA64POS_FP(ae_ls_su, ars) \ + ({ \ + void *__ars = (void *)(ars); \ + __builtin_xtensa_ae_sa64pos_fp((ae_valign *)&(ae_ls_su), (__ars)); \ + }) + +#define AE_SALIGN64_I(ae_ls_su, ars, ae_immls64) \ + ({ \ + ae_valign __ae_ls_su = (ae_ls_su); \ + ae_valign *__ars = (ars); \ + __builtin_xtensa_ae_salign64_i((__ae_ls_su), (__ars), (ae_immls64)); \ + }) + +#define AE_SAT16X4(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sat16x4((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SAT24S(ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sat24s((__ae_arth_v1)); \ + }) + +#define AE_SAT48S(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sat48s((__ae_arth_v1)); \ + }) + +#define AE_SATQ56S(ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_satq56s((__ae_arth_v1)); \ + }) + +#define AE_SB(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sb((short **)&(ars), (__art)); \ + }) + +#define AE_SB_IC(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sb_ic((short **)&(ars), (__art)); \ + }) + +#define AE_SB_IP(ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sb_ip((short **)&(ars), (__art)); \ + }) + +#define AE_SBF(ars) ({ __builtin_xtensa_ae_sbf((short **)&(ars)); }) + +#define AE_SBF_IC(ars) ({ __builtin_xtensa_ae_sbf_ic((short **)&(ars)); }) + +#define AE_SBF_IP(ars) ({ __builtin_xtensa_ae_sbf_ip((short **)&(ars)); }) + +#define AE_SBI(ars, art, ae_ohba2) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sbi((short **)&(ars), (__art), (ae_ohba2)); \ + }) + +#define AE_SBI_IC(ars, art, ae_ohba2) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sbi_ic((short **)&(ars), (__art), (ae_ohba2)); \ + }) + +#define AE_SBI_IP(ars, art, ae_ohba2) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_sbi_ip((short **)&(ars), (__art), (ae_ohba2)); \ + }) + +#define AE_SEL16I(ae_dr_to_dr_v0, ae_dr_to_dr_v1, ae_selimm) \ + ({ \ + ae_int16x4 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int16x4 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_sel16i((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1), \ + (ae_selimm)); \ + }) + +#define AE_SEL16I_N(ae_dr_to_dr_v0, ae_dr_to_dr_v1, ae_selimm_N) \ + ({ \ + ae_int16x4 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int16x4 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_sel16i_n((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1), \ + (ae_selimm_N)); \ + }) + +#define AE_SEXT32(ae_dr_to_dr_v0, ae_opnd_tp7) \ + ({ \ + ae_int32x2 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + __builtin_xtensa_ae_sext32((__ae_dr_to_dr_v0), (ae_opnd_tp7)); \ + }) + +#define AE_SEXT32X2D16_10(ae_to_dr_v0) \ + ({ \ + ae_int16x4 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_sext32x2d16_10((__ae_to_dr_v0)); \ + }) + +#define AE_SEXT32X2D16_32(ae_to_dr_v0) \ + ({ \ + ae_int16x4 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_sext32x2d16_32((__ae_to_dr_v0)); \ + }) + +#define AE_SHA32(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sha32((__ars)); \ + }) + +#define AE_SHORTSWAP(ae_to_dr_v0) \ + ({ \ + ae_int16x4 __ae_to_dr_v0 = (ae_to_dr_v0); \ + __builtin_xtensa_ae_shortswap((__ae_to_dr_v0)); \ + }) + +#define AE_SLAA16S(ae_shift_d0, ars) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaa16s((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAA32(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaa32((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAA32S(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaa32s((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAA64(ae_shift_d0, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaa64((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAA64S(ae_shift_d0, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaa64s((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAAQ56(ae_shift_d0, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_slaaq56((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SLAI16S(ae_shift_d0, ae_osa16) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai16s((__ae_shift_d0), (ae_osa16)); \ + }) + +#define AE_SLAI24(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai24((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SLAI24S(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai24s((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SLAI32(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai32((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SLAI32S(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai32s((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SLAI64(ae_shift_d0, ae_osa64) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai64((__ae_shift_d0), (ae_osa64)); \ + }) + +#define AE_SLAI64S(ae_shift_d0, ae_osa64) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slai64s((__ae_shift_d0), (ae_osa64)); \ + }) + +#define AE_SLAISQ56S(ae_shift_d0, ae_osa64) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slaisq56s((__ae_shift_d0), (ae_osa64)); \ + }) + +#define AE_SLAS24(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas24((__ae_shift_d0)); \ + }) + +#define AE_SLAS24S(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas24s((__ae_shift_d0)); \ + }) + +#define AE_SLAS32(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas32((__ae_shift_d0)); \ + }) + +#define AE_SLAS32S(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas32s((__ae_shift_d0)); \ + }) + +#define AE_SLAS64(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas64((__ae_shift_d0)); \ + }) + +#define AE_SLAS64S(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slas64s((__ae_shift_d0)); \ + }) + +#define AE_SLASQ56(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slasq56((__ae_shift_d0)); \ + }) + +#define AE_SLASSQ56S(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_slassq56s((__ae_shift_d0)); \ + }) + +#define AE_SRA64_32(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sra64_32((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA16RS(ae_shift_d0, ars) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa16rs((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA16S(ae_shift_d0, ars) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa16s((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA32(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa32((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA32RS(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa32rs((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA32S(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa32s((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAA64(ae_shift_d0, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_sraa64((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRAI16(ae_shift_d0, ae_osa16) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai16((__ae_shift_d0), (ae_osa16)); \ + }) + +#define AE_SRAI16R(ae_shift_d0, ae_osa16) \ + ({ \ + ae_int16x4 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai16r((__ae_shift_d0), (ae_osa16)); \ + }) + +#define AE_SRAI24(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai24((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SRAI32(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai32((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SRAI32R(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai32r((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SRAI64(ae_shift_d0, ae_osa64) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srai64((__ae_shift_d0), (ae_osa64)); \ + }) + +#define AE_SRAS24(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_sras24((__ae_shift_d0)); \ + }) + +#define AE_SRAS32(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_sras32((__ae_shift_d0)); \ + }) + +#define AE_SRAS64(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_sras64((__ae_shift_d0)); \ + }) + +#define AE_SRLA32(ae_shift_d0, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_srla32((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRLA64(ae_shift_d0, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_srla64((__ae_shift_d0), (__ars)); \ + }) + +#define AE_SRLI24(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srli24((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SRLI32(ae_shift_d0, ae_osa32) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srli32((__ae_shift_d0), (ae_osa32)); \ + }) + +#define AE_SRLI64(ae_shift_d0, ae_osa64) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srli64((__ae_shift_d0), (ae_osa64)); \ + }) + +#define AE_SRLS24(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srls24((__ae_shift_d0)); \ + }) + +#define AE_SRLS32(ae_shift_d0) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srls32((__ae_shift_d0)); \ + }) + +#define AE_SRLS64(ae_shift_d0) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + __builtin_xtensa_ae_srls64((__ae_shift_d0)); \ + }) + +#define AE_SUB16(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v0 = (ae_arth_v0); \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub16((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB16S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int16x4 __ae_arth_v0 = (ae_arth_v0); \ + ae_int16x4 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub16s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB24S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub24s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB32(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub32((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB32S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub32s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB64(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub64((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUB64S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int64 __ae_arth_v0 = (ae_arth_v0); \ + ae_int64 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_sub64s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUBADD32(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_subadd32((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_SUBADD32S(ae_arth_v0, ae_arth_v1) \ + ({ \ + ae_int32x2 __ae_arth_v0 = (ae_arth_v0); \ + ae_int32x2 __ae_arth_v1 = (ae_arth_v1); \ + __builtin_xtensa_ae_subadd32s((__ae_arth_v0), (__ae_arth_v1)); \ + }) + +#define AE_TRUNCA32F64S_L(ae_shift_d0, ae_shift_sd, ars) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + ae_int64 __ae_shift_sd = (ae_shift_sd); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_trunca32f64s_l((__ae_shift_d0), (__ae_shift_sd), \ + (__ars)); \ + }) + +#define AE_TRUNCA32X2F64S(ae_shift_d0, ae_shift_sd, ars) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + ae_int64 __ae_shift_sd = (ae_shift_sd); \ + int __ars = (int)(ars); \ + __builtin_xtensa_ae_trunca32x2f64s((__ae_shift_d0), (__ae_shift_sd), \ + (__ars)); \ + }) + +#define AE_TRUNCI32F64S_L(ae_shift_d0, ae_shift_sd, ae_osa16) \ + ({ \ + ae_int32x2 __ae_shift_d0 = (ae_shift_d0); \ + ae_int64 __ae_shift_sd = (ae_shift_sd); \ + __builtin_xtensa_ae_trunci32f64s_l((__ae_shift_d0), (__ae_shift_sd), \ + (ae_osa16)); \ + }) + +#define AE_TRUNCI32X2F64S(ae_shift_d0, ae_shift_sd, ae_osa16) \ + ({ \ + ae_int64 __ae_shift_d0 = (ae_shift_d0); \ + ae_int64 __ae_shift_sd = (ae_shift_sd); \ + __builtin_xtensa_ae_trunci32x2f64s((__ae_shift_d0), (__ae_shift_sd), \ + (ae_osa16)); \ + }) + +#define AE_VLDL16C(ars) \ + ({ __builtin_xtensa_ae_vldl16c((const short **)&(ars)); }) + +#define AE_VLDL16C_IC(ars) \ + ({ __builtin_xtensa_ae_vldl16c_ic((const short **)&(ars)); }) + +#define AE_VLDL16C_IP(ars) \ + ({ __builtin_xtensa_ae_vldl16c_ip((const short **)&(ars)); }) + +#define AE_VLDL16T(ars) \ + ({ \ + short *__ars = (short *)(ars); \ + __builtin_xtensa_ae_vldl16t((__ars)); \ + }) + +#define AE_VLDL32T(ars) \ + ({ \ + int *__ars = (int *)(ars); \ + __builtin_xtensa_ae_vldl32t((__ars)); \ + }) + +#define AE_VLDSHT(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_ae_vldsht((__art)); \ + }) + +#define AE_VLEL16T(br, art, ars) \ + ({ \ + short *__ars = (short *)(ars); \ + __builtin_xtensa_ae_vlel16t((xtbool *)&(br), (int *)&(art), (__ars)); \ + }) + +#define AE_VLEL32T(br, art, ars) \ + ({ \ + int *__ars = (int *)(ars); \ + __builtin_xtensa_ae_vlel32t((xtbool *)&(br), (int *)&(art), (__ars)); \ + }) + +#define AE_VLES16C(ars) ({ __builtin_xtensa_ae_vles16c((short **)&(ars)); }) + +#define AE_VLES16C_IC(ars) \ + ({ __builtin_xtensa_ae_vles16c_ic((short **)&(ars)); }) + +#define AE_VLES16C_IP(ars) \ + ({ __builtin_xtensa_ae_vles16c_ip((short **)&(ars)); }) + +#define AE_XOR(ae_dr_to_dr_v0, ae_dr_to_dr_v1) \ + ({ \ + ae_int64 __ae_dr_to_dr_v0 = (ae_dr_to_dr_v0); \ + ae_int64 __ae_dr_to_dr_v1 = (ae_dr_to_dr_v1); \ + __builtin_xtensa_ae_xor((__ae_dr_to_dr_v0), (__ae_dr_to_dr_v1)); \ + }) + +#define AE_ZALIGN64() ({ __builtin_xtensa_ae_zalign64(); }) + +#define RUR_AE_BITHEAD() ({ __builtin_xtensa_rur_ae_bithead(); }) + +#define RUR_AE_BITPTR() ({ __builtin_xtensa_rur_ae_bitptr(); }) + +#define RUR_AE_BITSUSED() ({ __builtin_xtensa_rur_ae_bitsused(); }) + +#define RUR_AE_CBEGIN0() ({ __builtin_xtensa_rur_ae_cbegin0(); }) + +#define RUR_AE_CEND0() ({ __builtin_xtensa_rur_ae_cend0(); }) + +#define RUR_AE_CW_SD_NO() ({ __builtin_xtensa_rur_ae_cw_sd_no(); }) + +#define RUR_AE_CWRAP() ({ __builtin_xtensa_rur_ae_cwrap(); }) + +#define RUR_AE_FIRST_TS() ({ __builtin_xtensa_rur_ae_first_ts(); }) + +#define RUR_AE_NEXTOFFSET() ({ __builtin_xtensa_rur_ae_nextoffset(); }) + +#define RUR_AE_OVERFLOW() ({ __builtin_xtensa_rur_ae_overflow(); }) + +#define RUR_AE_OVF_SAR() ({ __builtin_xtensa_rur_ae_ovf_sar(); }) + +#define RUR_AE_SAR() ({ __builtin_xtensa_rur_ae_sar(); }) + +#define RUR_AE_SEARCHDONE() ({ __builtin_xtensa_rur_ae_searchdone(); }) + +#define RUR_AE_TABLESIZE() ({ __builtin_xtensa_rur_ae_tablesize(); }) + +#define RUR_AE_TS_FTS_BU_BP() ({ __builtin_xtensa_rur_ae_ts_fts_bu_bp(); }) + +#define WUR_AE_BITHEAD(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_bithead((__art)); \ + }) + +#define WUR_AE_BITPTR(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_bitptr((__art)); \ + }) + +#define WUR_AE_BITSUSED(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_bitsused((__art)); \ + }) + +#define WUR_AE_CBEGIN0(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_cbegin0((__art)); \ + }) + +#define WUR_AE_CEND0(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_cend0((__art)); \ + }) + +#define WUR_AE_CW_SD_NO(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_cw_sd_no((__art)); \ + }) + +#define WUR_AE_CWRAP(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_cwrap((__art)); \ + }) + +#define WUR_AE_FIRST_TS(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_first_ts((__art)); \ + }) + +#define WUR_AE_NEXTOFFSET(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_nextoffset((__art)); \ + }) + +#define WUR_AE_OVERFLOW(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_overflow((__art)); \ + }) + +#define WUR_AE_OVF_SAR(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_ovf_sar((__art)); \ + }) + +#define WUR_AE_SAR(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_sar((__art)); \ + }) + +#define WUR_AE_SEARCHDONE(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_searchdone((__art)); \ + }) + +#define WUR_AE_TABLESIZE(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_tablesize((__art)); \ + }) + +#define WUR_AE_TS_FTS_BU_BP(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_wur_ae_ts_fts_bu_bp((__art)); \ + }) + +#define XT_ABS_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_abs_s((__frs)); \ + }) + +#define XT_ADD_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_add_s((__frs), (__frt)); \ + }) + +#define XT_ADDEXP_S(frr, frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_addexp_s((xtfloat *)&(frr), (__frs)); \ + }) + +#define XT_ADDEXPM_S(frr, frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_addexpm_s((xtfloat *)&(frr), (__frs)); \ + }) + +#define XT_CEIL_S(frs, imm_t) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_ceil_s((__frs), (imm_t)); \ + }) + +#define XT_DIV0_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_div0_s((__frs)); \ + }) + +#define XT_DIVN_S(frr, frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_divn_s((xtfloat *)&(frr), (__frs), (__frt)); \ + }) + +#define XT_FLOAT_S(ars, imm_t) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_xt_float_s((__ars), (imm_t)); \ + }) + +#define XT_FLOOR_S(frs, imm_t) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_floor_s((__frs), (imm_t)); \ + }) + +#define XT_LSI(ars, imm8x4) \ + ({ \ + xtfloat *__ars = (xtfloat *)(ars); \ + __builtin_xtensa_xt_lsi((__ars), (imm8x4)); \ + }) + +#define XT_LSIP(frt, ars, imm8x4) \ + ({ \ + __builtin_xtensa_xt_lsip((xtfloat *)&(frt), (const xtfloat **)&(ars), \ + (imm8x4)); \ + }) + +#define XT_LSX(ars, art) \ + ({ \ + xtfloat *__ars = (xtfloat *)(ars); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_lsx((__ars), (__art)); \ + }) + +#define XT_LSXP(frr, ars, art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_xt_lsxp((xtfloat *)&(frr), (const xtfloat **)&(ars), \ + (__art)); \ + }) + +#define XT_MADD_S(frr, frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_madd_s((xtfloat *)&(frr), (__frs), (__frt)); \ + }) + +#define XT_MADDN_S(frr, frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_maddn_s((xtfloat *)&(frr), (__frs), (__frt)); \ + }) + +#define XT_MKDADJ_S(frr, frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_mkdadj_s((xtfloat *)&(frr), (__frs)); \ + }) + +#define XT_MKSADJ_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_mksadj_s((__frs)); \ + }) + +#define XT_MOV_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_mov_s((__frs)); \ + }) + +#define XT_MOVEQZ_S(frr, frs, art) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_moveqz_s((xtfloat *)&(frr), (__frs), (__art)); \ + }) + +#define XT_MOVF_S(frr, frs, bt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtbool __bt = (bt); \ + __builtin_xtensa_xt_movf_s((xtfloat *)&(frr), (__frs), (__bt)); \ + }) + +#define XT_MOVGEZ_S(frr, frs, art) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_movgez_s((xtfloat *)&(frr), (__frs), (__art)); \ + }) + +#define XT_MOVLTZ_S(frr, frs, art) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_movltz_s((xtfloat *)&(frr), (__frs), (__art)); \ + }) + +#define XT_MOVNEZ_S(frr, frs, art) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_movnez_s((xtfloat *)&(frr), (__frs), (__art)); \ + }) + +#define XT_MOVT_S(frr, frs, bt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtbool __bt = (bt); \ + __builtin_xtensa_xt_movt_s((xtfloat *)&(frr), (__frs), (__bt)); \ + }) + +#define XT_MSUB_S(frr, frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_msub_s((xtfloat *)&(frr), (__frs), (__frt)); \ + }) + +#define XT_MUL_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_mul_s((__frs), (__frt)); \ + }) + +#define XT_NEG_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_neg_s((__frs)); \ + }) + +#define XT_NEXP01_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_nexp01_s((__frs)); \ + }) + +#define XT_OEQ_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_oeq_s((__frs), (__frt)); \ + }) + +#define XT_OLE_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_ole_s((__frs), (__frt)); \ + }) + +#define XT_OLT_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_olt_s((__frs), (__frt)); \ + }) + +#define XT_RECIP0_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_recip0_s((__frs)); \ + }) + +#define XT_RFR(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_rfr((__frs)); \ + }) + +#define XT_ROUND_S(frs, imm_t) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_round_s((__frs), (imm_t)); \ + }) + +#define XT_RSQRT0_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_rsqrt0_s((__frs)); \ + }) + +#define XT_RUR_FCR() ({ __builtin_xtensa_xt_rur_fcr(); }) + +#define XT_RUR_FSR() ({ __builtin_xtensa_xt_rur_fsr(); }) + +#define XT_SQRT0_S(frs) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_sqrt0_s((__frs)); \ + }) + +#define XT_SSI(frt, ars, imm8x4) \ + ({ \ + xtfloat __frt = (xtfloat)(frt); \ + xtfloat *__ars = (xtfloat *)(ars); \ + __builtin_xtensa_xt_ssi((__frt), (__ars), (imm8x4)); \ + }) + +#define XT_SSIP(frt, ars, imm8x4) \ + ({ \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_ssip((__frt), (xtfloat **)&(ars), (imm8x4)); \ + }) + +#define XT_SSX(frr, ars, art) \ + ({ \ + xtfloat __frr = (xtfloat)(frr); \ + xtfloat *__ars = (xtfloat *)(ars); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_ssx((__frr), (__ars), (__art)); \ + }) + +#define XT_SSXP(frr, ars, art) \ + ({ \ + xtfloat __frr = (xtfloat)(frr); \ + int __art = (int)(art); \ + __builtin_xtensa_xt_ssxp((__frr), (xtfloat **)&(ars), (__art)); \ + }) + +#define XT_SUB_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_sub_s((__frs), (__frt)); \ + }) + +#define XT_TRUNC_S(frs, imm_t) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_trunc_s((__frs), (imm_t)); \ + }) + +#define XT_UEQ_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_ueq_s((__frs), (__frt)); \ + }) + +#define XT_UFLOAT_S(ars, imm_t) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_xt_ufloat_s((__ars), (imm_t)); \ + }) + +#define XT_ULE_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_ule_s((__frs), (__frt)); \ + }) + +#define XT_ULT_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_ult_s((__frs), (__frt)); \ + }) + +#define XT_UN_S(frs, frt) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + xtfloat __frt = (xtfloat)(frt); \ + __builtin_xtensa_xt_un_s((__frs), (__frt)); \ + }) + +#define XT_UTRUNC_S(frs, imm_t) \ + ({ \ + xtfloat __frs = (xtfloat)(frs); \ + __builtin_xtensa_xt_utrunc_s((__frs), (imm_t)); \ + }) + +#define XT_WFR(ars) \ + ({ \ + int __ars = (int)(ars); \ + __builtin_xtensa_xt_wfr((__ars)); \ + }) + +#define XT_WUR_FCR(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_xt_wur_fcr((__art)); \ + }) + +#define XT_WUR_FSR(art) \ + ({ \ + int __art = (int)(art); \ + __builtin_xtensa_xt_wur_fsr((__art)); \ + }) + +#endif /* __XTENSAHIFI3INTRIN_H */ From 8d3dd40e14539b8b2cf2c5b1b86620a1c7121b3c Mon Sep 17 00:00:00 2001 From: Maciej Czekaj Date: Mon, 16 Oct 2023 10:42:09 +0000 Subject: [PATCH 216/289] [Xtensa] Add support for decoding from HIFI namespace --- .../Disassembler/XtensaDisassembler.cpp | 52 +++++++++++++++++-- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 2265dd99b609d..64ce0e4ab9b4b 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -44,6 +44,11 @@ class XtensaDisassembler : public MCDisassembler { bool hasESP32S3Ops() const { return STI.getFeatureBits()[Xtensa::FeatureESP32S3Ops]; + + } + + bool hasHIFI3() const { + return STI.getFeatureBits()[Xtensa::FeatureHIFI3]; } DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size, @@ -832,7 +837,7 @@ static DecodeStatus decodeMem32nOperand(MCInst &Inst, uint64_t Imm, /// Read two bytes from the ArrayRef and return 16 bit data sorted /// according to the given endianness. static DecodeStatus readInstruction16(ArrayRef Bytes, uint64_t Address, - uint64_t &Size, uint32_t &Insn, + uint64_t &Size, uint64_t &Insn, bool IsLittleEndian) { // We want to read exactly 2 Bytes of data. if (Bytes.size() < 2) { @@ -851,7 +856,7 @@ static DecodeStatus readInstruction16(ArrayRef Bytes, uint64_t Address, /// Read three bytes from the ArrayRef and return 24 bit data static DecodeStatus readInstruction24(ArrayRef Bytes, uint64_t Address, - uint64_t &Size, uint32_t &Insn, + uint64_t &Size, uint64_t &Insn, bool IsLittleEndian) { // We want to read exactly 3 Bytes of data. if (Bytes.size() < 3) { @@ -870,7 +875,7 @@ static DecodeStatus readInstruction24(ArrayRef Bytes, uint64_t Address, /// Read three bytes from the ArrayRef and return 32 bit data static DecodeStatus readInstruction32(ArrayRef Bytes, uint64_t Address, - uint64_t &Size, uint32_t &Insn, + uint64_t &Size, uint64_t &Insn, bool IsLittleEndian) { // We want to read exactly 4 Bytes of data. if (Bytes.size() < 4) { @@ -887,13 +892,37 @@ static DecodeStatus readInstruction32(ArrayRef Bytes, uint64_t Address, return MCDisassembler::Success; } +/// Read InstSize bytes from the ArrayRef and return 24 bit data +static DecodeStatus readInstructionN(ArrayRef Bytes, uint64_t Address, + unsigned InstSize, + uint64_t &Size, uint64_t &Insn, + bool IsLittleEndian) { + // We want to read exactly 3 Bytes of data. + if (Bytes.size() < InstSize) { + Size = 0; + return MCDisassembler::Fail; + } + + if (!IsLittleEndian) { + report_fatal_error("Big-endian mode currently is not supported!"); + } else { + Insn = 0; + for (unsigned i = 0; i < InstSize; i++) + Insn |= (Bytes[i] << 8*i); + } + + Size = InstSize; + return MCDisassembler::Success; +} + + #include "XtensaGenDisassemblerTables.inc" DecodeStatus XtensaDisassembler::getInstruction(MCInst &MI, uint64_t &Size, ArrayRef Bytes, uint64_t Address, raw_ostream &CS) const { - uint32_t Insn; + uint64_t Insn; DecodeStatus Result; // Parse 16-bit instructions @@ -946,5 +975,20 @@ DecodeStatus XtensaDisassembler::getInstruction(MCInst &MI, uint64_t &Size, } } + if (hasHIFI3()) { + LLVM_DEBUG(dbgs() << "Trying Xtensa HIFI3 24-bit instruction table :\n"); + Result = decodeInstruction(DecoderTableHIFI324, MI, Insn, Address, this, STI); + if(Result != MCDisassembler::Fail) + return Result; + + Result = readInstructionN(Bytes, Address, 48, Size, Insn, IsLittleEndian); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + + LLVM_DEBUG(dbgs() << "Trying Xtensa HIFI3 48-bit instruction table :\n"); + Result = decodeInstruction(DecoderTableHIFI348, MI, Insn, Address, this, STI); + if(Result != MCDisassembler::Fail) + return Result; + } return Result; } From 2192436fcadb9c333ae71755b3013a5ef0040d20 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 1 Oct 2024 22:21:01 +0300 Subject: [PATCH 217/289] [Xtensa] Implement support for `__attribute__((short__call))` and `__attribute__((near))` --- clang/include/clang/Basic/Attr.td | 16 ++++++++++++++-- clang/include/clang/Basic/AttrDocs.td | 13 +++++++------ clang/include/clang/Sema/SemaMIPS.h | 1 + clang/include/clang/Sema/SemaXtensa.h | 3 +++ clang/lib/CodeGen/Targets/Xtensa.cpp | 10 ++++++++++ clang/lib/Sema/SemaDeclAttr.cpp | 19 +++++++++++++++++++ clang/lib/Sema/SemaMIPS.cpp | 13 +++++++++++++ clang/lib/Sema/SemaXtensa.cpp | 17 +++++++++++++++++ clang/test/CodeGen/Xtensa/xtensa-short-call.c | 16 ++++++++++++++++ ...a-attribute-supported-attributes-list.test | 2 +- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 9 ++++++++- 11 files changed, 109 insertions(+), 10 deletions(-) create mode 100644 clang/test/CodeGen/Xtensa/xtensa-short-call.c diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td index 46d0a66d59c37..77f088142617e 100644 --- a/clang/include/clang/Basic/Attr.td +++ b/clang/include/clang/Basic/Attr.td @@ -459,6 +459,7 @@ def TargetX86 : TargetArch<["x86"]>; def TargetAnyX86 : TargetArch<["x86", "x86_64"]>; def TargetWebAssembly : TargetArch<["wasm32", "wasm64"]>; def TargetNVPTX : TargetArch<["nvptx", "nvptx64"]>; +def TargetXtensa : TargetArch<["xtensa"]>; def TargetWindows : TargetSpec { let OSes = ["Win32"]; } @@ -1921,11 +1922,22 @@ def MipsLongCall : InheritableAttr, TargetSpecificAttr { def MipsShortCall : InheritableAttr, TargetSpecificAttr { let Spellings = [GCC<"short_call">, GCC<"near">]; let Subjects = SubjectList<[Function]>; - let Documentation = [MipsShortCallStyleDocs]; - let SimpleHandler = 1; + let Documentation = [ShortCallStyleDocs]; + let ParseKind = "ShortCall"; + let HasCustomParsing = 1; + let SemaHandler = 1; } def : MutualExclusions<[MipsLongCall, MipsShortCall]>; +def XtensaShortCall : InheritableAttr, TargetSpecificAttr { + let Spellings = [GCC<"short_call">, GCC<"near">]; + let Subjects = SubjectList<[Function]>; + let Documentation = [ShortCallStyleDocs]; + let ParseKind = "ShortCall"; + let HasCustomParsing = 1; + let SemaHandler = 1; +} + def M68kInterrupt : InheritableAttr, TargetSpecificAttr { // NOTE: If you add any additional spellings, ARMInterrupt's, MipsInterrupt's // MSP430Interrupt's and AnyX86Interrupt's spellings must match. diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td index b5d468eb5ec95..f66d88ace8634 100644 --- a/clang/include/clang/Basic/AttrDocs.td +++ b/clang/include/clang/Basic/AttrDocs.td @@ -2392,18 +2392,19 @@ as ``-mlong-calls`` and ``-mno-long-calls``. }]; } -def MipsShortCallStyleDocs : Documentation { +def ShortCallStyleDocs : Documentation { let Category = DocCatFunction; let Heading = "short_call, near"; let Content = [{ Clang supports the ``__attribute__((long_call))``, ``__attribute__((far))``, ``__attribute__((short__call))``, and ``__attribute__((near))`` attributes -on MIPS targets. These attributes may only be added to function declarations -and change the code generated by the compiler when directly calling +on MIPS and Xtensa targets. These attributes may only be added to function +declarations and change the code generated by the compiler when directly calling the function. The ``short_call`` and ``near`` attributes are synonyms and -allow calls to the function to be made using the ``jal`` instruction, which -requires the function to be located in the same naturally aligned 256MB segment -as the caller. The ``long_call`` and ``far`` attributes are synonyms and +allow calls to the function to be made using the ``jal`` instruction for MIPS and +``calln`` instruction for Xtensa, which requires the function to be located +in the same naturally aligned 256MB segment as the caller. +The ``long_call`` and ``far`` attributes are synonyms and require the use of a different call sequence that works regardless of the distance between the functions. diff --git a/clang/include/clang/Sema/SemaMIPS.h b/clang/include/clang/Sema/SemaMIPS.h index 6366dce57626a..fc9198c1c8bf9 100644 --- a/clang/include/clang/Sema/SemaMIPS.h +++ b/clang/include/clang/Sema/SemaMIPS.h @@ -31,6 +31,7 @@ class SemaMIPS : public SemaBase { CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); void handleInterruptAttr(Decl *D, const ParsedAttr &AL); + void handleMipsShortCall(Decl *D, const ParsedAttr &AL); }; } // namespace clang diff --git a/clang/include/clang/Sema/SemaXtensa.h b/clang/include/clang/Sema/SemaXtensa.h index cadd7705bfe81..2f5b935c1a327 100644 --- a/clang/include/clang/Sema/SemaXtensa.h +++ b/clang/include/clang/Sema/SemaXtensa.h @@ -15,6 +15,7 @@ #include "clang/AST/Expr.h" #include "clang/Basic/TargetInfo.h" +#include "clang/Sema/Attr.h" #include "clang/Sema/SemaBase.h" namespace clang { @@ -26,6 +27,8 @@ class SemaXtensa : public SemaBase { CallExpr *TheCall); bool SemaBuiltinXtensaConversion(unsigned BuiltinID, CallExpr *TheCall); + + void handleXtensaShortCall(Decl *D, const ParsedAttr &AL); }; } // namespace clang diff --git a/clang/lib/CodeGen/Targets/Xtensa.cpp b/clang/lib/CodeGen/Targets/Xtensa.cpp index 4ad07648fef40..d5c9cd6f7d8c8 100644 --- a/clang/lib/CodeGen/Targets/Xtensa.cpp +++ b/clang/lib/CodeGen/Targets/Xtensa.cpp @@ -257,6 +257,16 @@ class XtensaTargetCodeGenInfo : public TargetCodeGenInfo { public: XtensaTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) : TargetCodeGenInfo(std::make_unique(CGT)) {} + + void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, + CodeGen::CodeGenModule &CGM) const override { + const FunctionDecl *FD = dyn_cast_or_null(D); + if (!FD) + return; + llvm::Function *Fn = cast(GV); + if (FD->hasAttr()) + Fn->addFnAttr("short-call"); + } }; } // namespace diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp index e2eada24f9fcc..60e37bcb31cdc 100644 --- a/clang/lib/Sema/SemaDeclAttr.cpp +++ b/clang/lib/Sema/SemaDeclAttr.cpp @@ -59,6 +59,7 @@ #include "clang/Sema/SemaSwift.h" #include "clang/Sema/SemaWasm.h" #include "clang/Sema/SemaX86.h" +#include "clang/Sema/SemaXtensa.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/STLForwardCompat.h" #include "llvm/ADT/StringExtras.h" @@ -6247,6 +6248,20 @@ static bool MustDelayAttributeArguments(const ParsedAttr &AL) { return false; } +static void handleShortCallAttr(Sema &S, Decl *D, const ParsedAttr &AL) { + switch (S.Context.getTargetInfo().getTriple().getArch()) { + case llvm::Triple::xtensa: + S.Xtensa().handleXtensaShortCall(D, AL); + break; + case llvm::Triple::mips64: + case llvm::Triple::mips: + S.MIPS().handleMipsShortCall(D, AL); + break; + default: + break; + } +} + /// ProcessDeclAttribute - Apply the specific attribute to the specified decl if /// the attribute applies to decls. If the attribute is a type attribute, just /// silently ignore it if a GNU attribute. @@ -7100,6 +7115,10 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL, case ParsedAttr::AT_VTablePointerAuthentication: handleVTablePointerAuthentication(S, D, AL); break; + + case ParsedAttr::AT_ShortCall: + handleShortCallAttr(S, D, AL); + break; } } diff --git a/clang/lib/Sema/SemaMIPS.cpp b/clang/lib/Sema/SemaMIPS.cpp index 269d927903c5d..75f35f6f166d4 100644 --- a/clang/lib/Sema/SemaMIPS.cpp +++ b/clang/lib/Sema/SemaMIPS.cpp @@ -297,4 +297,17 @@ void SemaMIPS::handleInterruptAttr(Decl *D, const ParsedAttr &AL) { MipsInterruptAttr(getASTContext(), AL, Kind)); } +void SemaMIPS::handleMipsShortCall(Decl *D, const ParsedAttr &AL) { + if (!isFuncOrMethodForAttrSubject(D)) { + Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type) + << "'short_call'" << ExpectedFunction; + return; + } + + if (!AL.checkExactlyNumArgs(SemaRef, 0)) + return; + + handleSimpleAttribute(*this, D, AL); +} + } // namespace clang diff --git a/clang/lib/Sema/SemaXtensa.cpp b/clang/lib/Sema/SemaXtensa.cpp index 4138c7fb7a670..c81f273985cc4 100644 --- a/clang/lib/Sema/SemaXtensa.cpp +++ b/clang/lib/Sema/SemaXtensa.cpp @@ -11,7 +11,11 @@ //===----------------------------------------------------------------------===// #include "clang/Sema/SemaXtensa.h" +#include "clang/AST/DeclBase.h" +#include "clang/Basic/DiagnosticSema.h" #include "clang/Basic/TargetBuiltins.h" +#include "clang/Sema/Attr.h" +#include "clang/Sema/ParsedAttr.h" #include "clang/Sema/Sema.h" namespace clang { @@ -387,4 +391,17 @@ bool SemaXtensa::SemaBuiltinXtensaConversion(unsigned BuiltinID, CallExpr *TheCa return false; } +void SemaXtensa::handleXtensaShortCall(Decl *D, const ParsedAttr &AL){ + if (!isFuncOrMethodForAttrSubject(D)) { + Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type) + << "'short_call'" << ExpectedFunction; + return; + } + + if (!AL.checkExactlyNumArgs(SemaRef, 0)) + return; + + handleSimpleAttribute(*this, D, AL); +} + } // namespace clang diff --git a/clang/test/CodeGen/Xtensa/xtensa-short-call.c b/clang/test/CodeGen/Xtensa/xtensa-short-call.c new file mode 100644 index 0000000000000..54858ff5d4ec1 --- /dev/null +++ b/clang/test/CodeGen/Xtensa/xtensa-short-call.c @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -triple xtensa -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple xtensa -S -o - %s | FileCheck %s --check-prefix=ASM + +void foo1 (void); +void __attribute__((short_call)) foo (void); +void __attribute__((near)) bar (void) { foo1(); foo(); } + +// CHECK: define{{.*}} void @bar() [[NEAR:#[0-9]+]] + +// CHECK: declare void @foo() [[SHORTDECL:#[0-9]+]] + +// CHECK: attributes [[NEAR]] = { {{.*}} "short-call" {{.*}} } +// CHECK: attributes [[SHORTDECL]] = { {{.*}} "short-call" {{.*}} } + +// ASM: callx8 a8 +// ASM: call8 foo diff --git a/clang/test/Misc/pragma-attribute-supported-attributes-list.test b/clang/test/Misc/pragma-attribute-supported-attributes-list.test index e082db698ef0c..7aa38e1cfaac6 100644 --- a/clang/test/Misc/pragma-attribute-supported-attributes-list.test +++ b/clang/test/Misc/pragma-attribute-supported-attributes-list.test @@ -101,7 +101,7 @@ // CHECK-NEXT: MinVectorWidth (SubjectMatchRule_function) // CHECK-NEXT: Mips16 (SubjectMatchRule_function) // CHECK-NEXT: MipsLongCall (SubjectMatchRule_function) -// CHECK-NEXT: MipsShortCall (SubjectMatchRule_function) +// CHECK-NEXT: ShortCall (SubjectMatchRule_function) // CHECK-NEXT: NSConsumed (SubjectMatchRule_variable_is_parameter) // CHECK-NEXT: NSConsumesSelf (SubjectMatchRule_objc_method) // CHECK-NEXT: NSErrorDomain (SubjectMatchRule_enum) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index cdf5771e3c1aa..3abef2fc97710 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -1343,6 +1343,7 @@ XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI, } std::string name; unsigned char TF = 0; + bool HasShortCallAttr = false; // Accept direct calls by converting symbolic call addresses to the // associated Target* opcodes. @@ -1356,9 +1357,15 @@ XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI, } else if (GlobalAddressSDNode *G = dyn_cast(Callee)) { const GlobalValue *GV = G->getGlobal(); name = GV->getName().str(); + if (auto *F = dyn_cast(GV)) + if (F->hasFnAttribute("short-call")) { + HasShortCallAttr = true; + Callee = DAG.getTargetGlobalAddress( + G->getGlobal(), DL, Callee.getValueType(), 0, 0 /* TargetFlags */); + } } - if ((!name.empty()) && isLongCall(name.c_str())) { + if (!name.empty() && isLongCall(name.c_str()) && !HasShortCallAttr) { // Create a constant pool entry for the callee address XtensaCP::XtensaCPModifier Modifier = XtensaCP::no_modifier; From d25b79026d7ad8b8ff32246254418601c8203d10 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 19 Oct 2023 18:06:44 +0300 Subject: [PATCH 218/289] [RISCV] Add 'tcontrol' CSR register --- llvm/lib/Target/RISCV/RISCVSystemOperands.td | 1 + 1 file changed, 1 insertion(+) diff --git a/llvm/lib/Target/RISCV/RISCVSystemOperands.td b/llvm/lib/Target/RISCV/RISCVSystemOperands.td index a836227e18957..5f51775ea64a9 100644 --- a/llvm/lib/Target/RISCV/RISCVSystemOperands.td +++ b/llvm/lib/Target/RISCV/RISCVSystemOperands.td @@ -323,6 +323,7 @@ def : SysReg<"tselect", 0x7A0>; def : SysReg<"tdata1", 0x7A1>; def : SysReg<"tdata2", 0x7A2>; def : SysReg<"tdata3", 0x7A3>; +def : SysReg<"tcontrol", 0x7A5>; def : SysReg<"mcontext", 0x7A8>; //===----------------------------------------------------------------------===// From 6da7a2e30bf6c4a2019bf95f74b5e9885d773768 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 2 Oct 2024 02:19:21 +0300 Subject: [PATCH 219/289] [Toolchain][RISCV][Xtensa] Add Espressif baremetal. --- clang/include/clang/Driver/ToolChain.h | 2 +- clang/lib/Driver/CMakeLists.txt | 1 + clang/lib/Driver/Driver.cpp | 8 +- clang/lib/Driver/ToolChains/BareMetal.cpp | 50 +- clang/lib/Driver/ToolChains/BareMetal.h | 10 +- clang/lib/Driver/ToolChains/EspBareMetal.cpp | 302 +++++++++ clang/lib/Driver/ToolChains/EspBareMetal.h | 94 +++ .../CodeGen/Xtensa/xtensa-ee-intrinsics.c | 2 +- .../bin/ld.lld} | 0 .../bin/riscv32-esp-elf-as} | 0 .../bin/riscv32-esp-elf-ld} | 0 .../lib/clang-runtimes/multilib.yaml | 248 +++++++ .../include/c++/11.2.0}/.keep | 0 .../include/c++/v1}/.keep | 0 .../include/c++/11.2.0/.keep} | 0 .../include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../include/c++/11.2.0/.keep} | 0 .../include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../include/c++/11.2.0/.keep} | 0 .../include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../include/c++/11.2.0/.keep} | 0 .../include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../include/c++/11.2.0/.keep} | 0 .../include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../include/c++/11.2.0/.keep} | 0 .../include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../include/c++/11.2.0/.keep} | 0 .../include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../include/c++/11.2.0/.keep} | 0 .../include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../bin/ld.lld} | 0 .../bin/xtensa-esp32-elf-as} | 0 .../bin/xtensa-esp32-elf-ld | 1 + .../bin/xtensa-esp32s2-elf-as | 1 + .../bin/xtensa-esp32s2-elf-ld | 1 + .../bin/xtensa-esp32s3-elf-as | 1 + .../bin/xtensa-esp32s3-elf-ld | 1 + .../lib/clang-runtimes/multilib.yaml | 236 +++++++ .../esp32/include/c++/11.2.0/.keep} | 0 .../esp32/include/c++/v1/.keep} | 0 .../esp32}/lib/libclang_rt.builtins.a | 0 .../esp32_no-rtti/include/c++/11.2.0/.keep} | 0 .../esp32_no-rtti/include/c++/v1/.keep} | 0 .../esp32_no-rtti}/lib/libclang_rt.builtins.a | 0 .../esp32_psram/include/c++/11.2.0/.keep} | 0 .../esp32_psram/include/c++/v1/.keep} | 0 .../esp32_psram}/lib/libclang_rt.builtins.a | 0 .../include/c++/11.2.0/.keep} | 0 .../esp32_psram_no-rtti/include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../esp32s2/include/c++/11.2.0/.keep} | 0 .../esp32s2/include/c++/v1/.keep} | 0 .../esp32s2}/lib/libclang_rt.builtins.a | 0 .../esp32s2_no-rtti/include/c++/11.2.0/.keep} | 0 .../esp32s2_no-rtti/include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../esp32s3/include/c++/11.2.0/.keep} | 0 .../esp32s3/include/c++/v1/.keep} | 0 .../esp32s3}/lib/libclang_rt.builtins.a | 0 .../esp32s3_no-rtti/include/c++/11.2.0/.keep} | 0 .../esp32s3_no-rtti/include/c++/v1/.keep} | 0 .../lib/libclang_rt.builtins.a | 0 .../include/c++/11.2.0/.keep} | 0 .../8.4.0/rv32imc/ilp32/crtend.o | 0 .../8.4.0/rv32imc/ilp32/no-rtti/crtbegin.o | 0 .../8.4.0/rv32imc/ilp32/no-rtti/crtend.o | 0 .../riscv32-esp-elf/lib/crt0.o | 0 .../riscv32-esp-elf/lib/rv32i/ilp32/crt0.o | 0 .../lib/rv32i/ilp32/no-rtti/crt0.o | 0 .../riscv32-esp-elf/lib/rv32imac/ilp32/crt0.o | 0 .../lib/rv32imac/ilp32/no-rtti/crt0.o | 0 .../lib/rv32imafc/ilp32f/crt0.o | 0 .../lib/rv32imafc/ilp32f/no-rtti/crt0.o | 0 .../riscv32-esp-elf/lib/rv32imc/ilp32/crt0.o | 0 .../lib/rv32imc/ilp32/no-rtti/crt0.o | 0 .../esp32/esp32-psram/lib/clang_rt.crtbegin.o | 0 .../esp32/esp32-psram/lib/clang_rt.crtend.o | 0 .../no-rtti/lib/clang_rt.crtbegin.o | 0 .../esp32-psram/no-rtti/lib/clang_rt.crtend.o | 0 .../esp32/lib/clang_rt.crtbegin.o | 0 .../esp32/lib/clang_rt.crtend.o | 0 .../esp32/no-rtti/lib/clang_rt.crtbegin.o | 0 .../esp32/no-rtti/lib/clang_rt.crtend.o | 0 .../esp32s2/lib/clang_rt.crtbegin.o | 0 .../esp32s2/lib/clang_rt.crtend.o | 0 .../esp32s2/no-rtti/lib/clang_rt.crtbegin.o | 0 .../esp32s2/no-rtti/lib/clang_rt.crtend.o | 0 .../esp32s3/lib/clang_rt.crtbegin.o | 0 .../esp32s3/lib/clang_rt.crtend.o | 0 .../esp32s3/no-rtti/lib/clang_rt.crtbegin.o | 0 .../esp32s3/no-rtti/lib/clang_rt.crtend.o | 0 .../no-rtti/lib/libclang_rt.builtins.a | 0 .../lib/gcc/xtensa-esp32-elf/8.4.0/crtbegin.o | 0 .../lib/gcc/xtensa-esp32-elf/8.4.0/crtend.o | 0 .../8.4.0/esp32-psram/crtbegin.o | 0 .../8.4.0/esp32-psram/crtend.o | 0 .../8.4.0/esp32-psram/no-rtti/crtbegin.o | 0 .../8.4.0/esp32-psram/no-rtti/crtend.o | 0 .../xtensa-esp32-elf/8.4.0/no-rtti/crtbegin.o | 0 .../xtensa-esp32-elf/8.4.0/no-rtti/crtend.o | 0 .../gcc/xtensa-esp32s2-elf/8.4.0/crtbegin.o | 0 .../lib/gcc/xtensa-esp32s2-elf/8.4.0/crtend.o | 0 .../8.4.0/no-rtti/crtbegin.o | 0 .../xtensa-esp32s2-elf/8.4.0/no-rtti/crtend.o | 0 .../gcc/xtensa-esp32s3-elf/8.4.0/crtbegin.o | 0 .../lib/gcc/xtensa-esp32s3-elf/8.4.0/crtend.o | 0 .../8.4.0/no-rtti/crtbegin.o | 0 .../xtensa-esp32s3-elf/8.4.0/no-rtti/crtend.o | 0 .../xtensa-esp32-elf/lib/crt0.o | 0 .../xtensa-esp32-elf/lib/esp32-psram/crt0.o | 0 .../lib/esp32-psram/no-rtti/crt0.o | 0 .../xtensa-esp32-elf/lib/no-rtti/crt0.o | 0 .../xtensa-esp32s2-elf/lib/crt0.o | 0 .../xtensa-esp32s2-elf/lib/no-rtti/crt0.o | 0 .../xtensa-esp32s3-elf/lib/crt0.o | 0 .../xtensa-esp32s3-elf/lib/no-rtti/crt0.o | 0 clang/test/Driver/baremetal-esp.cpp | 618 ++++++++++++++++++ clang/test/Driver/baremetal-sysroot.cpp | 55 ++ .../test/Driver/riscv32-esp-toolchain-extra.c | 115 ---- clang/test/Driver/riscv32-esp-toolchain.c | 325 --------- .../test/Driver/xtensa-esp-toolchain-extra.c | 111 ---- clang/test/Driver/xtensa-toolchain.c | 125 ---- compiler-rt/test/builtins/Unit/lit.cfg.py | 2 +- llvm/include/llvm/TargetParser/Triple.h | 3 + .../Xtensa/AsmParser/XtensaAsmParser.cpp | 12 +- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 19 + llvm/test/MC/Xtensa/Core/processor-control.s | 4 + 135 files changed, 1638 insertions(+), 709 deletions(-) create mode 100644 clang/lib/Driver/ToolChains/EspBareMetal.cpp create mode 100644 clang/lib/Driver/ToolChains/EspBareMetal.h rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-as => basic_riscv32_esp_tree/bin/ld.lld} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld => basic_riscv32_esp_tree/bin/riscv32-esp-elf-as} (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/bin/xtensa-esp32-elf-ld => basic_riscv32_esp_tree/bin/riscv32-esp-elf-ld} (100%) create mode 100644 clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/multilib.yaml rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/riscv32-esp-elf/include/c++/8.4.0 => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/include/c++/11.2.0}/.keep (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/xtensa-esp32-elf/include/c++/8.4.0 => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/include/c++/v1}/.keep (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtbegin.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtend.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32 => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtbegin.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtend.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32 => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtbegin.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtend.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtbegin.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtend.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32 => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtbegin.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtend.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtbegin.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtend.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtbegin.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtend.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtbegin.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtend.o => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32 => basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld => basic_xtensa_esp_tree/bin/ld.lld} (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld => basic_xtensa_esp_tree/bin/xtensa-esp32-elf-as} (100%) create mode 100755 clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-ld create mode 100755 clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-as create mode 100755 clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-ld create mode 100755 clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-as create mode 100755 clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-ld create mode 100644 clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/multilib.yaml rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtbegin.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtend.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtbegin.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_no-rtti/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtend.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_no-rtti/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_no-rtti}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtbegin.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtend.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtbegin.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram_no-rtti/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtend.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram_no-rtti/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32 => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram_no-rtti}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtbegin.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtend.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtbegin.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2_no-rtti/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtend.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2_no-rtti/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2 => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2_no-rtti}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtbegin.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtend.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtbegin.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3_no-rtti/include/c++/11.2.0/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtend.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3_no-rtti/include/c++/v1/.keep} (100%) rename clang/test/Driver/Inputs/{multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3 => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3_no-rtti}/lib/libclang_rt.builtins.a (100%) rename clang/test/Driver/Inputs/{multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtbegin.o => basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/include/c++/11.2.0/.keep} (100%) delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/no-rtti/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/no-rtti/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/no-rtti/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/no-rtti/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/libclang_rt.builtins.a delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtbegin.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtend.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/no-rtti/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/no-rtti/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/no-rtti/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/crt0.o delete mode 100644 clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/no-rtti/crt0.o create mode 100644 clang/test/Driver/baremetal-esp.cpp delete mode 100644 clang/test/Driver/riscv32-esp-toolchain-extra.c delete mode 100644 clang/test/Driver/riscv32-esp-toolchain.c delete mode 100644 clang/test/Driver/xtensa-esp-toolchain-extra.c delete mode 100644 clang/test/Driver/xtensa-toolchain.c diff --git a/clang/include/clang/Driver/ToolChain.h b/clang/include/clang/Driver/ToolChain.h index ece1384d5d3c0..73ae7d72666db 100644 --- a/clang/include/clang/Driver/ToolChain.h +++ b/clang/include/clang/Driver/ToolChain.h @@ -314,7 +314,7 @@ class ToolChain { /// function should be extended. /// To allow users to find out what flags are returned, clang accepts a /// -print-multi-flags-experimental argument. - Multilib::flags_list getMultilibFlags(const llvm::opt::ArgList &) const; + virtual Multilib::flags_list getMultilibFlags(const llvm::opt::ArgList &) const; SanitizerArgs getSanitizerArgs(const llvm::opt::ArgList &JobArgs) const; diff --git a/clang/lib/Driver/CMakeLists.txt b/clang/lib/Driver/CMakeLists.txt index 421e07798a920..1a6d89040d886 100644 --- a/clang/lib/Driver/CMakeLists.txt +++ b/clang/lib/Driver/CMakeLists.txt @@ -53,6 +53,7 @@ add_clang_library(clangDriver ToolChains/Cuda.cpp ToolChains/Darwin.cpp ToolChains/DragonFly.cpp + ToolChains/EspBareMetal.cpp ToolChains/Flang.cpp ToolChains/FreeBSD.cpp ToolChains/Fuchsia.cpp diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 3e2f02b27bf84..7ea5a434b5259 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -19,6 +19,7 @@ #include "ToolChains/Cuda.h" #include "ToolChains/Darwin.h" #include "ToolChains/DragonFly.h" +#include "ToolChains/EspBareMetal.h" #include "ToolChains/FreeBSD.h" #include "ToolChains/Fuchsia.h" #include "ToolChains/Gnu.h" @@ -6495,6 +6496,8 @@ const ToolChain &Driver::getToolChain(const ArgList &Args, if (toolchains::RISCVToolChain::hasGCCToolchain(*this, Args)) TC = std::make_unique(*this, Target, Args); + else if (Target.getVendor() == llvm::Triple::Espressif) + TC = std::make_unique(*this, Target, Args); else TC = std::make_unique(*this, Target, Args); break; @@ -6509,7 +6512,10 @@ const ToolChain &Driver::getToolChain(const ArgList &Args, TC = std::make_unique(*this, Target, Args); break; case llvm::Triple::xtensa: - TC = std::make_unique(*this, Target, Args); + if (Target.getVendor() == llvm::Triple::Espressif) + TC = std::make_unique(*this, Target, Args); + else + TC = std::make_unique(*this, Target, Args); break; default: if (toolchains::BareMetal::handlesTarget(Target)) diff --git a/clang/lib/Driver/ToolChains/BareMetal.cpp b/clang/lib/Driver/ToolChains/BareMetal.cpp index 852e0442f50a2..f7b191f88d4a2 100644 --- a/clang/lib/Driver/ToolChains/BareMetal.cpp +++ b/clang/lib/Driver/ToolChains/BareMetal.cpp @@ -98,13 +98,14 @@ static bool findRISCVMultilibs(const Driver &D, } BareMetal::BareMetal(const Driver &D, const llvm::Triple &Triple, - const ArgList &Args) + const ArgList &Args, bool detectMultilibs) : ToolChain(D, Triple, Args) { getProgramPaths().push_back(getDriver().Dir); - findMultilibs(D, Triple, Args); + if (detectMultilibs) + findMultilibs(D, Triple, Args); SmallString<128> SysRoot(computeSysRoot()); - if (!SysRoot.empty()) { + if (!SysRoot.empty() && detectMultilibs) { for (const Multilib &M : getOrderedMultilibs()) { SmallString<128> Dir(SysRoot); llvm::sys::path::append(Dir, M.osSuffix(), "lib"); @@ -293,6 +294,28 @@ void BareMetal::addClangTargetOptions(const ArgList &DriverArgs, CC1Args.push_back("-nostdsysteminc"); } +void BareMetal::DetectAndAppendGCCVersion(const Driver &D, + SmallString<128> &Dir) const { + std::error_code EC; + Generic_GCC::GCCVersion Version = {"", -1, -1, -1, "", "", ""}; + + // Walk the subdirs, and find the one with the newest gcc version: + for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(Dir.str(), EC), + LE; + !EC && LI != LE; LI = LI.increment(EC)) { + StringRef VersionText = llvm::sys::path::filename(LI->path()); + auto CandidateVersion = Generic_GCC::GCCVersion::Parse(VersionText); + if (CandidateVersion.Major == -1) + continue; + if (CandidateVersion <= Version) + continue; + Version = CandidateVersion; + } + if (Version.Major == -1) + return; // no GCC version found, do not append it + llvm::sys::path::append(Dir, Version.Text); +} + void BareMetal::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs, ArgStringList &CC1Args) const { if (DriverArgs.hasArg(options::OPT_nostdinc, options::OPT_nostdlibinc, @@ -357,25 +380,8 @@ void BareMetal::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs, } case ToolChain::CST_Libstdcxx: { llvm::sys::path::append(Dir, "include", "c++"); - std::error_code EC; - Generic_GCC::GCCVersion Version = {"", -1, -1, -1, "", "", ""}; - // Walk the subdirs, and find the one with the newest gcc version: - for (llvm::vfs::directory_iterator - LI = D.getVFS().dir_begin(Dir.str(), EC), - LE; - !EC && LI != LE; LI = LI.increment(EC)) { - StringRef VersionText = llvm::sys::path::filename(LI->path()); - auto CandidateVersion = Generic_GCC::GCCVersion::Parse(VersionText); - if (CandidateVersion.Major == -1) - continue; - if (CandidateVersion <= Version) - continue; - Version = CandidateVersion; - } - if (Version.Major != -1) { - llvm::sys::path::append(Dir, Version.Text); - addSystemInclude(DriverArgs, CC1Args, Dir.str()); - } + DetectAndAppendGCCVersion(D, Dir); + addSystemInclude(DriverArgs, CC1Args, Dir.str()); break; } } diff --git a/clang/lib/Driver/ToolChains/BareMetal.h b/clang/lib/Driver/ToolChains/BareMetal.h index 67b5aa5998fc3..f915bff8f4907 100644 --- a/clang/lib/Driver/ToolChains/BareMetal.h +++ b/clang/lib/Driver/ToolChains/BareMetal.h @@ -22,7 +22,7 @@ namespace toolchains { class LLVM_LIBRARY_VISIBILITY BareMetal : public ToolChain { public: BareMetal(const Driver &D, const llvm::Triple &Triple, - const llvm::opt::ArgList &Args); + const llvm::opt::ArgList &Args, bool detectMultilibs = true); ~BareMetal() override = default; static bool handlesTarget(const llvm::Triple &Triple); @@ -31,6 +31,8 @@ class LLVM_LIBRARY_VISIBILITY BareMetal : public ToolChain { const llvm::opt::ArgList &Args); protected: + void DetectAndAppendGCCVersion(const Driver &D, + SmallString<128> &Dir) const; Tool *buildLinker() const override; Tool *buildStaticLibTool() const override; @@ -64,17 +66,17 @@ class LLVM_LIBRARY_VISIBILITY BareMetal : public ToolChain { addClangTargetOptions(const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args, Action::OffloadKind DeviceOffloadKind) const override; - void AddClangCXXStdlibIncludeArgs( + virtual void AddClangCXXStdlibIncludeArgs( const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args) const override; - void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args, + virtual void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const override; void AddLinkRuntimeLib(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const; std::string computeSysRoot() const override; SanitizerMask getSupportedSanitizers() const override; -private: +protected: using OrderedMultilibs = llvm::iterator_range::const_reverse_iterator>; OrderedMultilibs getOrderedMultilibs() const; diff --git a/clang/lib/Driver/ToolChains/EspBareMetal.cpp b/clang/lib/Driver/ToolChains/EspBareMetal.cpp new file mode 100644 index 0000000000000..a3badac5dd3a7 --- /dev/null +++ b/clang/lib/Driver/ToolChains/EspBareMetal.cpp @@ -0,0 +1,302 @@ +//===-- BareMetal.cpp - Bare Metal ToolChain --------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "EspBareMetal.h" + +#include "CommonArgs.h" +#include "Gnu.h" +#include "clang/Driver/InputInfo.h" + +#include "Arch/RISCV.h" +#include "clang/Driver/Compilation.h" +#include "clang/Driver/Driver.h" +#include "clang/Driver/DriverDiagnostic.h" +#include "clang/Driver/Options.h" +#include "clang/Driver/MultilibBuilder.h" +#include "llvm/ADT/StringSet.h" +#include "llvm/Option/ArgList.h" +#include "llvm/Support/Path.h" +#include "llvm/Support/VirtualFileSystem.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; +using namespace llvm::opt; +using namespace clang; +using namespace clang::driver; +using namespace clang::driver::tools; +using namespace clang::driver::toolchains; + + +EspBareMetal::EspBareMetal(const Driver &D, const llvm::Triple &Triple, + const ArgList &Args) + : BareMetal(D, Triple, Args, false) { + + findMultilibs(D, Triple, Args); + SmallString<128> SysRoot(computeSysRoot()); + if (!SysRoot.empty()) { + for (const Multilib &M : getOrderedMultilibs()) { + SmallString<128> Dir(SysRoot); + llvm::sys::path::append(Dir, M.osSuffix(), "lib"); + getFilePaths().push_back(std::string(Dir)); + getLibraryPaths().push_back(std::string(Dir)); + } + } + + // TODO: Add full support for Xtensa to integrated asm + // LLVM-290, LLVM-291 + if (Triple.getArch() == llvm::Triple::xtensa) { + for (auto *A : Args) { + std::string Str = A->getAsString(Args); + // Currently don't use integrated assembler for assembler input files + if ((IsIntegratedAsm) && (Str.length() > 2)) { + std::string ExtSubStr = Str.substr(Str.length() - 2); + if (!ExtSubStr.compare(".s")) + IsIntegratedAsm = false; + if (!ExtSubStr.compare(".S")) + IsIntegratedAsm = false; + } + } + if (IsIntegratedAsm) { + if (Args.getLastArgValue(options::OPT_x) == "assembler") + IsIntegratedAsm = false; + + if (Args.getLastArgValue(options::OPT_x) == "assembler-with-cpp") + IsIntegratedAsm = false; + } + } +} + +void EspBareMetal::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + if (DriverArgs.hasArg(options::OPT_nostdinc) || + DriverArgs.hasArg(options::OPT_nostdlibinc) || + DriverArgs.hasArg(options::OPT_nostdincxx)) + return; + + const Driver &D = getDriver(); + std::string SysRoot(computeSysRoot()); + if (SysRoot.empty()) + return; + + BareMetal::AddClangCXXStdlibIncludeArgs(DriverArgs, CC1Args); + + // add include path to non-multilib libsdtcxx headers + if(GetCXXStdlibType(DriverArgs) == ToolChain::CST_Libstdcxx) { + // computed SysRoot points to clang-runtimes + SmallString<128> Dir(SysRoot); + llvm::sys::path::append(Dir, getTriple().str(), "include", "c++"); + DetectAndAppendGCCVersion(D, Dir); + addSystemInclude(DriverArgs, CC1Args, Dir.str()); + } +} + +void EspBareMetal::AddCXXStdlibLibArgs(const ArgList &Args, + ArgStringList &CmdArgs) const { + switch (GetCXXStdlibType(Args)) { + case ToolChain::CST_Libcxx: + CmdArgs.push_back("-lc++"); + if (Args.hasArg(options::OPT_fexperimental_library)) + CmdArgs.push_back("-lc++experimental"); + CmdArgs.push_back("-lc++abi"); + CmdArgs.push_back("-lunwind"); + break; + case ToolChain::CST_Libstdcxx: + CmdArgs.push_back("-lstdc++"); + break; + } +} + +static void getRISCV32MultilibFlags(const llvm::Triple &Triple, + const llvm::opt::ArgList &Args, + Multilib::flags_list &Result) { + + Result.push_back("-march=" + riscv::getRISCVArch(Args, Triple)); + Result.push_back("-mabi=" + riscv::getRISCVABI(Args, Triple).str()); +} + +static void getXtensaMultilibFlags(const llvm::Triple &Triple, + const llvm::opt::ArgList &Args, + Multilib::flags_list &Result) { + + if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) + Result.push_back(A->getAsString(Args)); + else + Result.push_back("-mcpu=esp32"); + + if (Args.hasArg(options::OPT_mfix_esp32_psram_cache_issue)) + Result.push_back("-mfix-esp32-psram-cache-issue"); +} + +Multilib::flags_list +EspBareMetal::getMultilibFlags(const llvm::opt::ArgList &Args) const { + + std::vector Result; + const llvm::Triple Triple(ComputeEffectiveClangTriple(Args)); + + if (Triple.getVendor() != llvm::Triple::Espressif) + return BareMetal::getMultilibFlags(Args); + + Result.push_back("--target=" + Triple.str()); + + if (Args.hasArg(options::OPT_fno_rtti)) + Result.push_back("-fno-rtti"); + + switch (Triple.getArch()) { + case llvm::Triple::riscv32: + getRISCV32MultilibFlags(Triple, Args, Result); + break; + case llvm::Triple::xtensa: + getXtensaMultilibFlags(Triple, Args, Result); + break; + default: + break; + } + + // Sort and remove duplicates. + std::sort(Result.begin(), Result.end()); + Result.erase(std::unique(Result.begin(), Result.end()), Result.end()); + return Result; +} + +Tool *EspBareMetal::buildLinker() const { + return new tools::baremetal::esp::Linker(*this); +} + +Tool *EspBareMetal::buildAssembler() const { + return new tools::baremetal::esp::Assembler(*this); +} + +void baremetal::esp::Linker::ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + auto &ToolChain = static_cast(getToolChain()); + const Driver &D = ToolChain.getDriver(); + ArgStringList CmdArgs; + + if (!D.SysRoot.empty()) + CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); + + if (ToolChain.getArch() == llvm::Triple::riscv32) { + CmdArgs.push_back("-m"); + CmdArgs.push_back("elf32lriscv"); + } + + CmdArgs.push_back("-o"); + CmdArgs.push_back(Output.getFilename()); + + CmdArgs.push_back("-X"); + + std::string Linker = ToolChain.GetLinkerPath(); + + bool WantCRTs = + !Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles); + + if (WantCRTs) { + /* Espressif toolcahin uses newlib. crt0.o from it refers to 'main' symbol. + In 'freestanding' mode 'main' is not marked as special symbol by clang, + so when compiling C++ program with 'clang++' 'main' gets mmangled + (if not decalred as 'extern "C"' ) and linker can not resolve it. + The problem can happen, for example, when cmake checks C++ compiler by buiding simple C++ code, + unfortunately 'main' function in that code is not decalred as 'extern "C"'. */ + bool Freestanding = + Args.hasFlag(options::OPT_ffreestanding, options::OPT_fhosted, false); + if (!Freestanding) { + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); + } + } + + AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA); + + Args.AddAllArgs(CmdArgs, options::OPT_L); + Args.AddAllArgs(CmdArgs, options::OPT_u); + ToolChain.AddFilePathLibArgs(Args, CmdArgs); + Args.AddAllArgs(CmdArgs, options::OPT_T_Group); + Args.AddAllArgs(CmdArgs, options::OPT_e); + Args.AddAllArgs(CmdArgs, options::OPT_s); + Args.AddAllArgs(CmdArgs, options::OPT_t); + Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag); + Args.AddAllArgs(CmdArgs, options::OPT_r); + + // TODO: add C++ includes and libs if compiling C++. + + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nodefaultlibs)) { + if (ToolChain.ShouldLinkCXXStdlib(Args)) + ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs); + CmdArgs.push_back("-lm"); + CmdArgs.push_back("--start-group"); + CmdArgs.push_back("-lc"); + CmdArgs.push_back("-lgloss"); + CmdArgs.push_back("-lnosys"); + CmdArgs.push_back("--end-group"); + ToolChain.AddLinkRuntimeLib(Args, CmdArgs); + } + + C.addCommand(std::make_unique( + JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker), + CmdArgs, Inputs, Output)); +} + +void baremetal::esp::Assembler::ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + const auto &TC = static_cast(getToolChain()); + + claimNoWarnArgs(Args); + ArgStringList CmdArgs; + + CmdArgs.push_back("-o"); + CmdArgs.push_back(Output.getFilename()); + + CmdArgs.push_back("-c"); + + if (Args.hasArg(options::OPT_v)) + CmdArgs.push_back("-v"); + + if (Arg *A = Args.getLastArg(options::OPT_g_Group)) + if (!A->getOption().matches(options::OPT_g0)) + CmdArgs.push_back("-g"); + + if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm, + false)) + CmdArgs.push_back("-fverbose-asm"); + + Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); + + for (const auto &II : Inputs) + CmdArgs.push_back(II.getFilename()); + + std::string AsmPrefix; + if (TC.getTriple().getArch() == llvm::Triple::xtensa) { + StringRef cpu = Args.getLastArgValue(options::OPT_mcpu_EQ, "esp32"); + // xtensa-esp32-elf + AsmPrefix = TC.getTriple().getArchName().str() + "-" + cpu.str() + "-" + + TC.getTriple().getEnvironmentName().str(); + } else { + // riscv32-esp-elf + if (Args.hasArg(options::OPT_march_EQ)) + Args.AddAllArgs(CmdArgs, options::OPT_march_EQ); + else + CmdArgs.push_back("-march=rv32imac"); + if (Args.hasArg(options::OPT_mabi_EQ)) + Args.AddAllArgs(CmdArgs, options::OPT_mabi_EQ); + else + CmdArgs.push_back("-mabi=ilp32"); + AsmPrefix = TC.getTriple().getArchName().str() + "-" + + TC.getTriple().getVendorName().str() + "-" + + TC.getTriple().getEnvironmentName().str(); + } + SmallString<128> Asm(AsmPrefix + "-" + getShortName()); + C.addCommand( + std::make_unique(JA, *this, ResponseFileSupport::AtFileCurCP(), + Args.MakeArgString(TC.GetProgramPath(Asm.str().str().c_str())), CmdArgs, Inputs)); +} diff --git a/clang/lib/Driver/ToolChains/EspBareMetal.h b/clang/lib/Driver/ToolChains/EspBareMetal.h new file mode 100644 index 0000000000000..ea074937dbb82 --- /dev/null +++ b/clang/lib/Driver/ToolChains/EspBareMetal.h @@ -0,0 +1,94 @@ +//===--- EspBareMetal.h - Espressif Bare Metal Tool and ToolChain ------------*- C++-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ESPBAREMETAL_H +#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ESPBAREMETAL_H + +#include "BareMetal.h" + +namespace clang { +namespace driver { + +namespace toolchains { + +class LLVM_LIBRARY_VISIBILITY EspBareMetal : public BareMetal { +public: + EspBareMetal(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args); + + ~EspBareMetal() override = default; + +protected: + Tool *buildLinker() const override; + Tool *buildAssembler() const override; + +public: + // BareMetal forces to use integrated ASM, we wnat to be more flexible + // and allow users to to switch to GNU ASM using command line + bool useIntegratedAs() const override { return ToolChain::useIntegratedAs(); } + bool IsIntegratedAssemblerDefault() const override { return IsIntegratedAsm; } + + RuntimeLibType GetDefaultRuntimeLibType() const override { + return ToolChain::RLT_CompilerRT; + } + CXXStdlibType GetDefaultCXXStdlibType() const override { + return ToolChain::CST_Libstdcxx; + } + + void AddClangCXXStdlibIncludeArgs( + const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; + + const char *getDefaultLinker() const override { return "ld.lld"; } + + void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs) const override; + + virtual Multilib::flags_list getMultilibFlags(const llvm::opt::ArgList &) const override; + +private: + bool IsIntegratedAsm = true; +}; + +} // namespace toolchains + +namespace tools { +namespace baremetal { +namespace esp { + +class LLVM_LIBRARY_VISIBILITY Linker : public Tool { +public: + Linker(const ToolChain &TC) : Tool("baremetal::esp::Linker", "ld", TC) {} + bool isLinkJob() const override { return true; } + bool hasIntegratedCPP() const override { return false; } + void ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, const InputInfoList &Inputs, + const llvm::opt::ArgList &TCArgs, + const char *LinkingOutput) const override; +}; + +class LLVM_LIBRARY_VISIBILITY Assembler : public Tool { +public: + Assembler(const ToolChain &TC) + : Tool("baremetal::esp::Assembler", "as", TC) {} + + bool hasIntegratedCPP() const override { return false; } + void ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, const InputInfoList &Inputs, + const llvm::opt::ArgList &TCArgs, + const char *LinkingOutput) const override; +}; + +} // namespace esp +} // namespace baremetal +} // namespace tools + +} // namespace driver +} // namespace clang + +#endif diff --git a/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c index c3ce2e107d886..3624bff2b318a 100644 --- a/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c +++ b/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -no-opaque-pointers -triple xtensa -S -emit-llvm -O0 -o - %s \ +// RUN: %clang_cc1 -triple xtensa -S -emit-llvm -O0 -o - %s \ // RUN: | FileCheck %s #include diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-as b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/ld.lld similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-as rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/ld.lld diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-as similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-as diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32-elf-ld b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-ld similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32-elf-ld rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-ld diff --git a/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/multilib.yaml b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/multilib.yaml new file mode 100644 index 0000000000000..c4123d3cfc039 --- /dev/null +++ b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/multilib.yaml @@ -0,0 +1,248 @@ +# +# Copyright (c) 2023, Arm Limited and affiliates. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# If you're reading this file under the name 'multilib.yaml.in' in the +# LLVM-embedded-toolchain-for-Arm source tree, then it's not valid +# YAML in its own right: it's a template that CMakeLists.txt will +# expand into a real 'multilib.yaml' containing a list of library +# variants and the flags that will select them. +# +# If you're reading it under the name 'multilib.yaml' in the build or +# install directory, then that substitution has been done. +# +# Comments in this file mostly make more sense from the +# multilib.yaml.in point of view. + +MultilibVersion: '1.0' + +# The list of library variants is substituted in by CMakeLists.txt, so +# that it can respect the LLVM_TOOLCHAIN_LIBRARY_VARIANTS setting and +# only include the set of libraries actually included in this build. + +Variants: +- Dir: riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32 + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32i_zicsr_zifencei + - -mabi=ilp32 +- Dir: riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32i_zicsr_zifencei + - -mabi=ilp32 + - -fno-rtti +- Dir: riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32 + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imc_zicsr_zifencei + - -mabi=ilp32 +- Dir: riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imc_zicsr_zifencei + - -mabi=ilp32 + - -fno-rtti +- Dir: riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32 + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imac_zicsr_zifencei + - -mabi=ilp32 +- Dir: riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imac_zicsr_zifencei + - -mabi=ilp32 + - -fno-rtti +- Dir: riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imafc_zicsr_zifencei + - -mabi=ilp32f +- Dir: riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imafc_zicsr_zifencei + - -mabi=ilp32f + - -fno-rtti +- Dir: xtensa-esp-unknown-elf/esp32 + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32 +- Dir: xtensa-esp-unknown-elf/esp32_no-rtti + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32 + - -fno-rtti +- Dir: xtensa-esp-unknown-elf/esp32_psram + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32 + - -mfix-esp32-psram-cache-issue +- Dir: xtensa-esp-unknown-elf/esp32_psram_no-rtti + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32 + - -mfix-esp32-psram-cache-issue + - -fno-rtti +- Dir: xtensa-esp-unknown-elf/esp32s2 + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32s2 +- Dir: xtensa-esp-unknown-elf/esp32s2_no-rtti + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32s2 + - -fno-rtti +- Dir: xtensa-esp-unknown-elf/esp32s3 + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32s3 +- Dir: xtensa-esp-unknown-elf/esp32s3_no-rtti + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32s3 + - -fno-rtti + + +Mappings: + +# Map higher architecture versions to subsets of them, so that a +# compatible library can be found even for architectures we don't have +# specific variants for. + +# v8-M Baseline is a superset of v6-M +- Match: --target=thumbv8m\.base-none-unknown-eabi + Flags: + - --target=thumbv6m-none-unknown-eabi + +# v8.2-M Mainline is a superset of v8.1-M Mainline, in both hard and +# soft float variants. +# +# Also, v8.1-M Mainline is also a superset of v8-M Mainline, which in +# turn is a superset of v7E-M, and then of plain v7-M. We have +# libraries for all those architecture versions, but not for every +# combination of them with FPUs, so in some cases it might be +# necessary to fall back to a lower architecture in order to provide +# the needed FPU support. +- Match: --target=thumbv8\.[2-9]m\.main-none-unknown-eabi + Flags: + - --target=thumbv8.1m.main-none-unknown-eabi + - --target=thumbv8m.main-none-unknown-eabi + - --target=thumbv7em-none-unknown-eabi + - --target=thumbv7m-none-unknown-eabi +- Match: --target=thumbv8\.[2-9]m\.main-none-unknown-eabihf + Flags: + - --target=thumbv8.1m.main-none-unknown-eabihf + - --target=thumbv8m.main-none-unknown-eabihf + - --target=thumbv7em-none-unknown-eabihf + - --target=thumbv7m-none-unknown-eabihf +- Match: --target=thumbv8\.1m\.main-none-unknown-eabi + Flags: + - --target=thumbv8m.main-none-unknown-eabi + - --target=thumbv7em-none-unknown-eabi + - --target=thumbv7m-none-unknown-eabi +- Match: --target=thumbv8\.1m\.main-none-unknown-eabihf + Flags: + - --target=thumbv8m.main-none-unknown-eabihf + - --target=thumbv7em-none-unknown-eabihf + - --target=thumbv7m-none-unknown-eabihf +- Match: --target=thumbv8m\.main-none-unknown-eabi + Flags: + - --target=thumbv7em-none-unknown-eabi + - --target=thumbv7m-none-unknown-eabi +- Match: --target=thumbv8m\.main-none-unknown-eabihf + Flags: + - --target=thumbv7em-none-unknown-eabihf + - --target=thumbv7m-none-unknown-eabihf +- Match: --target=thumbv7em-none-unknown-eabi + Flags: + - --target=thumbv7m-none-unknown-eabi +- Match: --target=thumbv7em-none-unknown-eabihf + Flags: + - --target=thumbv7m-none-unknown-eabihf + +# Higher versions of v8-A, and v9-A, are all supersets of v8-A. (And +# of each other, in the obvious way, but we don't have any libraries +# for those at present, so there's no need to generate all their +# flags.) +- Match: --target=armv(8\.[1-9]|9|9\.[1-9])a-none-unknown-eabi + Flags: + - --target=armv8a-none-unknown-eabi + +# -march extensions +- Match: -march=thumbv8\.[1-9]m\.main.*\+fp16.* + Flags: + - -march=thumbv8.1m.main+fp16 +- Match: -march=thumbv8\.[1-9]m\.main.*\+dsp.*\+mve.* + Flags: + - -march=thumbv8.1m.main+dsp+mve +- Match: -march=thumbv8\.[1-9]m\.main.*\+mve\.fp.*\+fp16.*\+lob.* + Flags: + - -march=thumbv8.1m.main+fp16+lob+mve.fp + +# Hierarchy among FPUs: fpvN-d16 is a superset of fpvN-sp-d16, and +# fpvN-d16 is a superset of fpv[N-1]-d16, for all N. +# +# We don't consider any hardware FP configuration to be compatible +# with -mfpu=none. It would work in most cases to cross-call between +# code compiled for an FPU or no FPU, if you were using the soft float +# ABI. But it wouldn't work in all cases: setjmp needs to know whether +# to save FP registers in the jmp_buf, so a non-FPU-aware setjmp would +# not behave correctly if linked into an otherwise FPU-using +# application. Similarly for exception unwinding. So we don't permit +# selecting an -mfpu=none library as a fallback for any hard-FP +# library. +- Match: -mfpu=fpv5-d16 + Flags: + - -mfpu=fpv4-d16 + - -mfpu=fpv5-sp-d16 + - -mfpu=fpv4-sp-d16 +- Match: -mfpu=fpv5-sp-d16 + Flags: + - -mfpu=fpv4-sp-d16 +- Match: -mfpu=fpv4-d16 + Flags: + - -mfpu=fpv4-sp-d16 +- Match: -mfpu=fp-armv8-fullfp16-d16 + Flags: + - -mfpu=fp-armv8-fullfp16-sp-d16 + +# RISCV -march extensions and mappings +- Match: -march=rv32i + Flags: + - -march=rv32i_zicsr_zifencei +- Match: -march=rv32ic + Flags: + - -march=rv32i_zicsr_zifencei +- Match: -march=rv32im + Flags: + - -march=rv32imc_zicsr_zifencei +- Match: -march=rv32imc + Flags: + - -march=rv32imc_zicsr_zifencei +- Match: -march=rv32imac + Flags: + - -march=rv32imac_zicsr_zifencei +- Match: -march=rv32imafc + Flags: + - -march=rv32imafc_zicsr_zifencei +- Match: -march=rv32imafdc + Flags: + - -march=rv32imafc_zicsr_zifencei +- Match: -march=rv32gc + Flags: + - -march=rv32imafc_zicsr_zifencei diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/include/c++/8.4.0/.keep b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/include/c++/8.4.0/.keep rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/include/c++/8.4.0/.keep b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/include/c++/8.4.0/.keep rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtbegin.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/clang_rt.crtend.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtbegin.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtend.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtbegin.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtend.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtbegin.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtend.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtbegin.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtend.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtbegin.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtend.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtbegin.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtend.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtbegin.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtend.o rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/ld.lld similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/ld.lld diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-as similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-as diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-ld b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-ld new file mode 100755 index 0000000000000..b23e55619b2ff --- /dev/null +++ b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-ld @@ -0,0 +1 @@ +#!/bin/true diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-as b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-as new file mode 100755 index 0000000000000..b23e55619b2ff --- /dev/null +++ b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-as @@ -0,0 +1 @@ +#!/bin/true diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-ld b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-ld new file mode 100755 index 0000000000000..b23e55619b2ff --- /dev/null +++ b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-ld @@ -0,0 +1 @@ +#!/bin/true diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-as b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-as new file mode 100755 index 0000000000000..b23e55619b2ff --- /dev/null +++ b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-as @@ -0,0 +1 @@ +#!/bin/true diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-ld b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-ld new file mode 100755 index 0000000000000..b23e55619b2ff --- /dev/null +++ b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-ld @@ -0,0 +1 @@ +#!/bin/true diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/multilib.yaml b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/multilib.yaml new file mode 100644 index 0000000000000..a03c10799b2d4 --- /dev/null +++ b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/multilib.yaml @@ -0,0 +1,236 @@ +# +# Copyright (c) 2023, Arm Limited and affiliates. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# If you're reading this file under the name 'multilib.yaml.in' in the +# LLVM-embedded-toolchain-for-Arm source tree, then it's not valid +# YAML in its own right: it's a template that CMakeLists.txt will +# expand into a real 'multilib.yaml' containing a list of library +# variants and the flags that will select them. +# +# If you're reading it under the name 'multilib.yaml' in the build or +# install directory, then that substitution has been done. +# +# Comments in this file mostly make more sense from the +# multilib.yaml.in point of view. + +MultilibVersion: '1.0' + +# The list of library variants is substituted in by CMakeLists.txt, so +# that it can respect the LLVM_TOOLCHAIN_LIBRARY_VARIANTS setting and +# only include the set of libraries actually included in this build. + +Variants: +- Dir: riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32 + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32i_zicsr_zifencei + - -mabi=ilp32 +- Dir: riscv32-esp-unknown-elf/rv32i-zicsr-zifencei_ilp32_no-rtti + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32i_zicsr_zifencei + - -mabi=ilp32 + - -fno-rtti +- Dir: riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32 + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imc_zicsr_zifencei + - -mabi=ilp32 +- Dir: riscv32-esp-unknown-elf/rv32imc-zicsr-zifencei_ilp32_no-rtti + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imc_zicsr_zifencei + - -mabi=ilp32 + - -fno-rtti +- Dir: riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32 + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imac_zicsr_zifencei + - -mabi=ilp32 +- Dir: riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32_no-rtti + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imac_zicsr_zifencei + - -mabi=ilp32 + - -fno-rtti +- Dir: riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imafc_zicsr_zifencei + - -mabi=ilp32f +- Dir: riscv32-esp-unknown-elf/rv32imafc-zicsr-zifencei_ilp32f_no-rtti + Flags: + - --target=riscv32-esp-unknown-elf + - -march=rv32imafc_zicsr_zifencei + - -mabi=ilp32f + - -fno-rtti +- Dir: xtensa-esp-unknown-elf/esp32 + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32 +- Dir: xtensa-esp-unknown-elf/esp32_no-rtti + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32 + - -fno-rtti +- Dir: xtensa-esp-unknown-elf/esp32_psram + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32 + - -mfix-esp32-psram-cache-issue +- Dir: xtensa-esp-unknown-elf/esp32_psram_no-rtti + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32 + - -mfix-esp32-psram-cache-issue + - -fno-rtti +- Dir: xtensa-esp-unknown-elf/esp32s2 + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32s2 +- Dir: xtensa-esp-unknown-elf/esp32s2_no-rtti + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32s2 + - -fno-rtti +- Dir: xtensa-esp-unknown-elf/esp32s3 + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32s3 +- Dir: xtensa-esp-unknown-elf/esp32s3_no-rtti + Flags: + - --target=xtensa-esp-unknown-elf + - -mcpu=esp32s3 + - -fno-rtti + + +Mappings: + +# Map higher architecture versions to subsets of them, so that a +# compatible library can be found even for architectures we don't have +# specific variants for. + +# v8-M Baseline is a superset of v6-M +- Match: --target=thumbv8m\.base-none-unknown-eabi + Flags: + - --target=thumbv6m-none-unknown-eabi + +# v8.2-M Mainline is a superset of v8.1-M Mainline, in both hard and +# soft float variants. +# +# Also, v8.1-M Mainline is also a superset of v8-M Mainline, which in +# turn is a superset of v7E-M, and then of plain v7-M. We have +# libraries for all those architecture versions, but not for every +# combination of them with FPUs, so in some cases it might be +# necessary to fall back to a lower architecture in order to provide +# the needed FPU support. +- Match: --target=thumbv8\.[2-9]m\.main-none-unknown-eabi + Flags: + - --target=thumbv8.1m.main-none-unknown-eabi + - --target=thumbv8m.main-none-unknown-eabi + - --target=thumbv7em-none-unknown-eabi + - --target=thumbv7m-none-unknown-eabi +- Match: --target=thumbv8\.[2-9]m\.main-none-unknown-eabihf + Flags: + - --target=thumbv8.1m.main-none-unknown-eabihf + - --target=thumbv8m.main-none-unknown-eabihf + - --target=thumbv7em-none-unknown-eabihf + - --target=thumbv7m-none-unknown-eabihf +- Match: --target=thumbv8\.1m\.main-none-unknown-eabi + Flags: + - --target=thumbv8m.main-none-unknown-eabi + - --target=thumbv7em-none-unknown-eabi + - --target=thumbv7m-none-unknown-eabi +- Match: --target=thumbv8\.1m\.main-none-unknown-eabihf + Flags: + - --target=thumbv8m.main-none-unknown-eabihf + - --target=thumbv7em-none-unknown-eabihf + - --target=thumbv7m-none-unknown-eabihf +- Match: --target=thumbv8m\.main-none-unknown-eabi + Flags: + - --target=thumbv7em-none-unknown-eabi + - --target=thumbv7m-none-unknown-eabi +- Match: --target=thumbv8m\.main-none-unknown-eabihf + Flags: + - --target=thumbv7em-none-unknown-eabihf + - --target=thumbv7m-none-unknown-eabihf +- Match: --target=thumbv7em-none-unknown-eabi + Flags: + - --target=thumbv7m-none-unknown-eabi +- Match: --target=thumbv7em-none-unknown-eabihf + Flags: + - --target=thumbv7m-none-unknown-eabihf + +# Higher versions of v8-A, and v9-A, are all supersets of v8-A. (And +# of each other, in the obvious way, but we don't have any libraries +# for those at present, so there's no need to generate all their +# flags.) +- Match: --target=armv(8\.[1-9]|9|9\.[1-9])a-none-unknown-eabi + Flags: + - --target=armv8a-none-unknown-eabi + +# -march extensions +- Match: -march=thumbv8\.[1-9]m\.main.*\+fp16.* + Flags: + - -march=thumbv8.1m.main+fp16 +- Match: -march=thumbv8\.[1-9]m\.main.*\+dsp.*\+mve.* + Flags: + - -march=thumbv8.1m.main+dsp+mve +- Match: -march=thumbv8\.[1-9]m\.main.*\+mve\.fp.*\+fp16.*\+lob.* + Flags: + - -march=thumbv8.1m.main+fp16+lob+mve.fp + +# Hierarchy among FPUs: fpvN-d16 is a superset of fpvN-sp-d16, and +# fpvN-d16 is a superset of fpv[N-1]-d16, for all N. +# +# We don't consider any hardware FP configuration to be compatible +# with -mfpu=none. It would work in most cases to cross-call between +# code compiled for an FPU or no FPU, if you were using the soft float +# ABI. But it wouldn't work in all cases: setjmp needs to know whether +# to save FP registers in the jmp_buf, so a non-FPU-aware setjmp would +# not behave correctly if linked into an otherwise FPU-using +# application. Similarly for exception unwinding. So we don't permit +# selecting an -mfpu=none library as a fallback for any hard-FP +# library. +- Match: -mfpu=fpv5-d16 + Flags: + - -mfpu=fpv4-d16 + - -mfpu=fpv5-sp-d16 + - -mfpu=fpv4-sp-d16 +- Match: -mfpu=fpv5-sp-d16 + Flags: + - -mfpu=fpv4-sp-d16 +- Match: -mfpu=fpv4-d16 + Flags: + - -mfpu=fpv4-sp-d16 +- Match: -mfpu=fp-armv8-fullfp16-d16 + Flags: + - -mfpu=fp-armv8-fullfp16-sp-d16 + +# RISCV -march extensions +- Match: -march=rv32i + Flags: + - -march=rv32i_zicsr_zifencei +- Match: -march=rv32imc + Flags: + - -march=rv32imc_zicsr_zifencei +- Match: -march=rv32imac + Flags: + - -march=rv32imac_zicsr_zifencei +- Match: -march=rv32imafc + Flags: + - -march=rv32imafc_zicsr_zifencei diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtbegin.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtend.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtbegin.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_no-rtti/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtbegin.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_no-rtti/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtend.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_no-rtti/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/crtend.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_no-rtti/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_no-rtti/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_no-rtti/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtbegin.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtbegin.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtend.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/crtend.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram_no-rtti/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtbegin.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram_no-rtti/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtend.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram_no-rtti/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32/no-rtti/crtend.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram_no-rtti/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram_no-rtti/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32_psram_no-rtti/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtbegin.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtbegin.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtend.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/crtend.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2_no-rtti/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtbegin.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2_no-rtti/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtend.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2_no-rtti/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti/crtend.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2_no-rtti/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2_no-rtti/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s2_no-rtti/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtbegin.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtbegin.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtend.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/crtend.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3_no-rtti/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtbegin.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3_no-rtti/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtend.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3_no-rtti/include/c++/v1/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f/no-rtti/crtend.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3_no-rtti/include/c++/v1/.keep diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3_no-rtti/lib/libclang_rt.builtins.a similarity index 100% rename from clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/libclang_rt.builtins.a rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32s3_no-rtti/lib/libclang_rt.builtins.a diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtbegin.o b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/include/c++/11.2.0/.keep similarity index 100% rename from clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtbegin.o rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/lib/clang-runtimes/xtensa-esp-unknown-elf/include/c++/11.2.0/.keep diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32/no-rtti/crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32i/ilp32/no-rtti/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imac/ilp32/no-rtti/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imafc/ilp32f/no-rtti/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib/rv32imc/ilp32/no-rtti/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/libclang_rt.builtins.a b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/libclang_rt.builtins.a deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/esp32-psram/no-rtti/crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/no-rtti/crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/no-rtti/crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtbegin.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtbegin.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtend.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/no-rtti/crtend.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/esp32-psram/no-rtti/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32-elf/lib/no-rtti/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf/lib/no-rtti/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/no-rtti/crt0.o b/clang/test/Driver/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf/lib/no-rtti/crt0.o deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/clang/test/Driver/baremetal-esp.cpp b/clang/test/Driver/baremetal-esp.cpp new file mode 100644 index 0000000000000..18c1888daad48 --- /dev/null +++ b/clang/test/Driver/baremetal-esp.cpp @@ -0,0 +1,618 @@ +// UNSUPPORTED: system-windows + +//////////////////// RISCV ///////////////////////// + +// IMAC is default and it re-uses IMC sub-dirs + +// RUN: rm -rf %t +// RUN: mkdir -p %t/basic_riscv32_esp_tree/bin +// RUN: ln -s %clang %t/basic_riscv32_esp_tree/bin/clang +// RUN: ln -s %S/Inputs/basic_riscv32_esp_tree/bin/ld.lld %t/basic_riscv32_esp_tree/bin/ld.lld +// RUN: ln -s %S/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-as %t/basic_riscv32_esp_tree/bin/riscv32-esp-elf-as +// RUN: ln -s %S/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-ld %t/basic_riscv32_esp_tree/bin/riscv32-esp-elf-ld +// RUN: ln -s %S/Inputs/basic_riscv32_esp_tree/lib %t/basic_riscv32_esp_tree/lib + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -L some/directory/user/asked/for \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC %s +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -march=rv32imac -mabi=ilp32 \ +// RUN: -L some/directory/user/asked/for \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC %s +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -rtlib=compiler-rt \ +// RUN: -L some/directory/user/asked/for \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC %s +// CHECK-ESP-RV32IMAC: "-cc1" "-triple" "riscv32-esp-unknown-elf" +// CHECK-ESP-RV32IMAC-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32IMAC-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32IMAC-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMAC-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMAC-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-RV32IMAC-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include" +// CHECK-ESP-RV32IMAC-SAME: "-x" "c++" "{{.*}}baremetal-esp.cpp" +// CHECK-ESP-RV32IMAC-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32IMAC-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-RV32IMAC-SAME: "-m" "elf32lriscv" +// CHECK-ESP-RV32IMAC-SAME: "-o" "a.out" +// CHECK-ESP-RV32IMAC-SAME: "-X" "{{.*}}.o" +// CHECK-ESP-RV32IMAC-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" +// CHECK-ESP-RV32IMAC-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" +// CHECK-ESP-RV32IMAC-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" +// CHECK-ESP-RV32IMAC-SAME: "-lclang_rt.builtins" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -fno-integrated-as \ +// RUN: -L some/directory/user/asked/for \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-FORCEAS %s +// CHECK-ESP-RV32IMAC-FORCEAS: "-cc1" "-triple" "riscv32-esp-unknown-elf" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-x" "c++" "{{.*}}baremetal-esp.cpp" +// CHECK-ESP-RV32IMAC-FORCEAS-NEXT: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imac" "-mabi=ilp32" +// CHECK-ESP-RV32IMAC-FORCEAS-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-m" "elf32lriscv" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-o" "a.out" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-X" "{{.*}}.o" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-lclang_rt.builtins" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf --ld-path=riscv32-esp-elf-ld \ +// RUN: -L some/directory/user/asked/for \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-FORCELD %s +// CHECK-ESP-RV32IMAC-FORCELD: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32IMAC-FORCELD: riscv32-esp-elf-ld{{(.exe)?}}" +// CHECK-ESP-RV32IMAC-FORCELD-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-RV32IMAC-FORCELD-SAME: "-m" "elf32lriscv" +// CHECK-ESP-RV32IMAC-FORCELD-SAME: "-o" "a.out" +// CHECK-ESP-RV32IMAC-FORCELD-SAME: "-X" "{{.*}}.o" +// CHECK-ESP-RV32IMAC-FORCELD-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" +// CHECK-ESP-RV32IMAC-FORCELD-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" +// CHECK-ESP-RV32IMAC-FORCELD-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" +// CHECK-ESP-RV32IMAC-FORCELD-SAME: "-lclang_rt.builtins" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -nostdlibinc -nobuiltininc \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-LIBINC %s +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -nostdinc \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-LIBINC %s +// CHECK-ESP-RV32IMAC-LIBINC-NOT: "-internal-isystem" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -rtlib=libgcc \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-LIBGCC %s +// CHECK-ESP-RV32IMAC-LIBGCC-NOT: "-lclang_rt.builtins" +// CHECK-ESP-RV32IMAC-LIBGCC: "-lgcc" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-DEFAULTSTDCXX %s +// RUN: %t/basic_riscv32_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -stdlib=libstdc++ \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-DEFAULTSTDCXX %s +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-NOT: "-internal-isystem" "{{[^"]+}}{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}v1" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-m" "elf32lriscv" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-o" "a.out" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-X" "{{.*}}.o" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-lstdc++" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-lclang_rt.builtins" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -stdlib=libc++ \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-LIBCXX %s +// CHECK-ESP-RV32IMAC-LIBCXX: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32IMAC-LIBCXX-NOT: "-internal-isystem" "{{[^"]+}}{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}{{[^v].*}}" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}v1" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include" +// CHECK-ESP-RV32IMAC-LIBCXX: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-m" "elf32lriscv" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-o" "a.out" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-X" "{{.*}}.o" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-lc++" "-lc++abi" "-lunwind" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-lclang_rt.builtins" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -nodefaultlibs \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-NDL %s +// CHECK-ESP-RV32IMAC-NDL: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32IMAC-NDL-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32IMAC-NDL: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32IMAC-NDL-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -march=rv32i -mabi=ilp32 \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32I %s +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -march=rv32ic -mabi=ilp32 \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32I %s +// CHECK-ESP-RV32I: "-cc1" "-triple" "riscv32-esp-unknown-elf" +// CHECK-ESP-RV32I-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32I-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32I-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32i-zicsr-zifencei_ilp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32I-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32I-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-RV32I-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32i-zicsr-zifencei_ilp32{{[/\\]+}}include" +// CHECK-ESP-RV32I-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32I-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-RV32I-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32i-zicsr-zifencei_ilp32{{[/\\]+}}lib" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -fno-integrated-as \ +// RUN: -march=rv32i -mabi=ilp32 \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32I-FORCEAS %s +// CHECK-ESP-RV32I-FORCEAS: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32i" "-mabi=ilp32" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -march=rv32i -mabi=ilp32 \ +// RUN: -fno-rtti \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32I-NORTTI %s +// CHECK-ESP-RV32I-NORTTI: "-cc1" "-triple" "riscv32-esp-unknown-elf" +// CHECK-ESP-RV32I-NORTTI-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32I-NORTTI-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32I-NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32i-zicsr-zifencei_ilp32_no-rtti{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32I-NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32I-NORTTI-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-RV32I-NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32i-zicsr-zifencei_ilp32_no-rtti{{[/\\]+}}include" +// CHECK-ESP-RV32I-NORTTI-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32I-NORTTI-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32i-zicsr-zifencei_ilp32_no-rtti{{[/\\]+}}lib" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -march=rv32im -mabi=ilp32 \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMC %s +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -march=rv32imc -mabi=ilp32 \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMC %s +// CHECK-ESP-RV32IMC: "-cc1" "-triple" "riscv32-esp-unknown-elf" +// CHECK-ESP-RV32IMC-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32IMC-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32IMC-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imc-zicsr-zifencei_ilp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMC-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMC-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-RV32IMC-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imc-zicsr-zifencei_ilp32{{[/\\]+}}include" +// CHECK-ESP-RV32IMC-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32IMC-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imc-zicsr-zifencei_ilp32{{[/\\]+}}lib" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ +// RUN: -march=rv32imc -mabi=ilp32 \ +// RUN: -fno-rtti \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMC-NORTTI %s +// CHECK-ESP-RV32IMC-NORTTI: "-cc1" "-triple" "riscv32-esp-unknown-elf" +// CHECK-ESP-RV32IMC-NORTTI-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32IMC-NORTTI-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32IMC-NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imc-zicsr-zifencei_ilp32_no-rtti{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMC-NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMC-NORTTI-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-RV32IMC-NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imc-zicsr-zifencei_ilp32_no-rtti{{[/\\]+}}include" +// CHECK-ESP-RV32IMC-NORTTI-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32IMC-NORTTI-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imc-zicsr-zifencei_ilp32_no-rtti{{[/\\]+}}lib" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -fno-integrated-as \ +// RUN: -march=rv32im -mabi=ilp32 \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IM-FORCEAS %s +// CHECK-ESP-RV32IM-FORCEAS: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32im" "-mabi=ilp32" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -fno-integrated-as \ +// RUN: -march=rv32imc -mabi=ilp32 \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMC-FORCEAS %s +// CHECK-ESP-RV32IMC-FORCEAS: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imc" "-mabi=ilp32" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -fno-integrated-as \ +// RUN: -march=rv32imac -mabi=ilp32 \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-FORCEAS2 %s +// CHECK-ESP-RV32IMAC-FORCEAS2: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imac" "-mabi=ilp32" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -march=rv32imafc -mabi=ilp32f \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAFC %s +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -march=rv32imafdc -mabi=ilp32f \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAFC %s +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -march=rv32gc -mabi=ilp32f \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAFC %s +// CHECK-ESP-RV32IMAFC: "-cc1" "-triple" "riscv32-esp-unknown-elf" +// CHECK-ESP-RV32IMAFC-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32IMAFC-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32IMAFC-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imafc-zicsr-zifencei_ilp32f{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMAFC-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMAFC-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-RV32IMAFC-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imafc-zicsr-zifencei_ilp32f{{[/\\]+}}include" +// CHECK-ESP-RV32IMAFC-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32IMAFC-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imafc-zicsr-zifencei_ilp32f{{[/\\]+}}lib" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -march=rv32imafc -mabi=ilp32f \ +// RUN: -fno-rtti \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAFC-NORTTI %s +// CHECK-ESP-RV32IMAFC-NORTTI: "-cc1" "-triple" "riscv32-esp-unknown-elf" +// CHECK-ESP-RV32IMAFC-NORTTI-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-RV32IMAFC-NORTTI-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-RV32IMAFC-NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imafc-zicsr-zifencei_ilp32f_no-rtti{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMAFC-NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-RV32IMAFC-NORTTI-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-RV32IMAFC-NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imafc-zicsr-zifencei_ilp32f_no-rtti{{[/\\]+}}include" +// CHECK-ESP-RV32IMAFC-NORTTI-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-RV32IMAFC-NORTTI-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imafc-zicsr-zifencei_ilp32f_no-rtti{{[/\\]+}}lib" + +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -fno-integrated-as \ +// RUN: -march=rv32imafc -mabi=ilp32f \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAFC-FORCEAS %s +// CHECK-ESP-RV32IMAFC-FORCEAS: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imafc" "-mabi=ilp32f" + +// Check that compiler-rt library without the arch filename suffix will +// be used if present. +// RUN: rm -rf %T/baremetal_clang_rt_noarch +// RUN: mkdir -p %T/baremetal_clang_rt_noarch/lib +// RUN: touch %T/baremetal_clang_rt_noarch/lib/libclang_rt.builtins.a +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 \ +// RUN: --target=riscv32-esp-elf \ +// RUN: --sysroot=%T/baremetal_clang_rt_noarch \clang-runtimes/riscv32-esp-unknown-elf +// used if present. +// RUN: rm -rf %T/baremetal_clang_rt_arch +// RUN: mkdir -p %T/baremetal_clang_rt_arch/lib +// RUN: touch %T/baremetal_clang_rt_arch/lib/libclang_rt.builtins-riscv32.a +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 \ +// RUN: --target=riscv32-esp-elf \ +// RUN: --sysroot=%T/baremetal_clang_rt_arch \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32-CLANGRT-ARCH %s +// CHECK-ESP-RV32-CLANGRT-ARCH: "-lclang_rt.builtins-riscv32" +// CHECK-ESP-RV32-CLANGRT-ARCH-NOT: "-lclang_rt.builtins" + + +//////////////////// XTENSA ///////////////////////// + +// RUN: rm -rf %t +// RUN: mkdir -p %t/basic_xtensa_esp_tree/bin +// RUN: ln -s %clang %t/basic_xtensa_esp_tree/bin/clang +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/ld.lld %t/basic_xtensa_esp_tree/bin/ld.lld +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-as %t/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-as +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-ld %t/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-ld +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-as %t/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-as +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-ld %t/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-ld +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-as %t/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-as +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-ld %t/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-ld +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/lib %t/basic_xtensa_esp_tree/lib + +// ESP32 is default + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -L some/directory/user/asked/for \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32 %s +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf -mcpu=esp32 \ +// RUN: -L some/directory/user/asked/for \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32 %s +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -rtlib=compiler-rt \ +// RUN: -L some/directory/user/asked/for \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32 %s +// CHECK-ESP-ESP32: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include" +// CHECK-ESP-ESP32-SAME: "-x" "c++" "{{.*}}baremetal-esp.cpp" +// CHECK-ESP-ESP32-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32-SAME: "-o" "a.out" +// CHECK-ESP-ESP32-SAME: "-X" "{{.*}}.o" +// CHECK-ESP-ESP32-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" +// CHECK-ESP-ESP32-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" +// CHECK-ESP-ESP32-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" +// CHECK-ESP-ESP32-SAME: "-lclang_rt.builtins" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf -fno-integrated-as \ +// RUN: -L some/directory/user/asked/for \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-FORCEAS %s +// CHECK-ESP-ESP32-FORCEAS: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-x" "c++" "{{.*}}baremetal-esp.cpp" +// CHECK-ESP-ESP32-FORCEAS-NEXT: xtensa-esp32-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" +// CHECK-ESP-ESP32-FORCEAS-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32-FORCEAS-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-o" "a.out" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-X" "{{.*}}.o" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" +// CHECK-ESP-ESP32-FORCEAS-SAME: "-lclang_rt.builtins" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf --ld-path=xtensa-esp32-elf-ld \ +// RUN: -L some/directory/user/asked/for \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-FORCELD %s +// CHECK-ESP-ESP32-FORCELD: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32-FORCELD-NEXT: xtensa-esp32-elf-ld{{(.exe)?}}" +// CHECK-ESP-ESP32-FORCELD-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32-FORCELD-SAME: "-o" "a.out" +// CHECK-ESP-ESP32-FORCELD-SAME: "-X" "{{.*}}.o" +// CHECK-ESP-ESP32-FORCELD-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" +// CHECK-ESP-ESP32-FORCELD-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" +// CHECK-ESP-ESP32-FORCELD-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" +// CHECK-ESP-ESP32-FORCELD-SAME: "-lclang_rt.builtins" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -nostdlibinc -nobuiltininc \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-LIBINC %s +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -nostdinc \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-LIBINC %s +// CHECK-ESP-ESP32-LIBINC-NOT: "-internal-isystem" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -rtlib=libgcc \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-LIBGCC %s +// CHECK-ESP-ESP32-LIBGCC-NOT: "-lclang_rt.builtins" +// CHECK-ESP-ESP32-LIBGCC: "-lgcc" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-DEFAULTSTDCXX %s +// RUN: %t/basic_xtensa_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -stdlib=libstdc++ \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-DEFAULTSTDCXX %s +// CHECK-ESP-ESP32-DEFAULTSTDCXX: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-NOT: "-internal-isystem" "{{[^"]+}}{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}v1" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include" +// CHECK-ESP-ESP32-DEFAULTSTDCXX: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-o" "a.out" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-X" "{{.*}}.o" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-lstdc++" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-lclang_rt.builtins" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -stdlib=libc++ \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-LIBCXX %s +// CHECK-ESP-ESP32-LIBCXX: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32-LIBCXX-NOT: "-internal-isystem" "{{[^"]+}}{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}{{[^v].*}}" +// CHECK-ESP-ESP32-LIBCXX-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32-LIBCXX-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}v1" +// CHECK-ESP-ESP32-LIBCXX-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32-LIBCXX-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include" +// CHECK-ESP-ESP32-LIBCXX: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32-LIBCXX-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32-LIBCXX-SAME: "-o" "a.out" +// CHECK-ESP-ESP32-LIBCXX-SAME: "-X" "{{.*}}.o" +// CHECK-ESP-ESP32-LIBCXX-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" +// CHECK-ESP-ESP32-LIBCXX-SAME: "-lc++" "-lc++abi" "-lunwind" +// CHECK-ESP-ESP32-LIBCXX-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" +// CHECK-ESP-ESP32-LIBCXX-SAME: "-lclang_rt.builtins" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -nodefaultlibs \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-NDL %s +// CHECK-ESP-ESP32-NDL: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32-NDL-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32-NDL: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32-NDL-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -mcpu=esp32 -fno-rtti \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32_NORTTI %s +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -fno-rtti \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32_NORTTI %s +// CHECK-ESP-ESP32_NORTTI: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32_NORTTI-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32_NORTTI-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32_no-rtti{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32_NORTTI-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32_no-rtti{{[/\\]+}}include" +// CHECK-ESP-ESP32_NORTTI-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32_NORTTI-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32_NORTTI-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32_no-rtti{{[/\\]+}}lib" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -mcpu=esp32 -mfix-esp32-psram-cache-issue \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32_PSRAM %s +// CHECK-ESP-ESP32_PSRAM: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32_PSRAM-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32_PSRAM-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32_PSRAM-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32_psram{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32_PSRAM-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32_PSRAM-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32_PSRAM-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32_psram{{[/\\]+}}include" +// CHECK-ESP-ESP32_PSRAM-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32_PSRAM-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32_PSRAM-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32_psram{{[/\\]+}}lib" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -mcpu=esp32 -fno-rtti -mfix-esp32-psram-cache-issue \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32_PSRAM_NORTTI %s +// CHECK-ESP-ESP32_PSRAM_NORTTI: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32_PSRAM_NORTTI-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32_PSRAM_NORTTI-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32_PSRAM_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32_psram_no-rtti{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32_PSRAM_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32_PSRAM_NORTTI-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32_PSRAM_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32_psram_no-rtti{{[/\\]+}}include" +// CHECK-ESP-ESP32_PSRAM_NORTTI-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32_PSRAM_NORTTI-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32_PSRAM_NORTTI-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32_psram_no-rtti{{[/\\]+}}lib" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -mcpu=esp32s2 \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32S2 %s +// CHECK-ESP-ESP32S2: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32S2-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32S2-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32S2-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S2-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S2-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32S2-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2{{[/\\]+}}include" +// CHECK-ESP-ESP32S2-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32S2-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32S2-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2{{[/\\]+}}lib" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf -fno-integrated-as \ +// RUN: -mcpu=esp32s2 \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32S2-FORCEAS %s +// CHECK-ESP-ESP32S2-FORCEAS: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32S2-FORCEAS-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32S2-FORCEAS-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32S2-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S2-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S2-FORCEAS-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32S2-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2{{[/\\]+}}include" +// CHECK-ESP-ESP32S2-FORCEAS: xtensa-esp32s2-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" +// CHECK-ESP-ESP32S2-FORCEAS-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32S2-FORCEAS-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32S2-FORCEAS-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2{{[/\\]+}}lib" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -mcpu=esp32s2 -fno-rtti \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32S2_NORTTI %s +// CHECK-ESP-ESP32S2_NORTTI: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32S2_NORTTI-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32S2_NORTTI-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32S2_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2_no-rtti{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S2_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S2_NORTTI-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32S2_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2_no-rtti{{[/\\]+}}include" +// CHECK-ESP-ESP32S2_NORTTI-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32S2_NORTTI-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32S2_NORTTI-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2_no-rtti{{[/\\]+}}lib" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -mcpu=esp32s3 \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32S3 %s +// CHECK-ESP-ESP32S3: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32S3-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32S3-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32S3-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S3-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S3-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32S3-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3{{[/\\]+}}include" +// CHECK-ESP-ESP32S3-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32S3-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32S3-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3{{[/\\]+}}lib" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf -fno-integrated-as \ +// RUN: -mcpu=esp32s3 \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32S3-FORCEAS %s +// CHECK-ESP-ESP32S3-FORCEAS: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32S3-FORCEAS-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32S3-FORCEAS-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32S3-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S3-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S3-FORCEAS-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32S3-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3{{[/\\]+}}include" +// CHECK-ESP-ESP32S3-FORCEAS: xtensa-esp32s3-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" +// CHECK-ESP-ESP32S3-FORCEAS-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32S3-FORCEAS-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32S3-FORCEAS-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3{{[/\\]+}}lib" + +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ +// RUN: -mcpu=esp32s3 -fno-rtti \ +// RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32S3_NORTTI %s +// CHECK-ESP-ESP32S3_NORTTI: "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32S3_NORTTI-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" +// CHECK-ESP-ESP32S3_NORTTI-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" +// CHECK-ESP-ESP32S3_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3_no-rtti{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S3_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECK-ESP-ESP32S3_NORTTI-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" +// CHECK-ESP-ESP32S3_NORTTI-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3_no-rtti{{[/\\]+}}include" +// CHECK-ESP-ESP32S3_NORTTI-NEXT: ld.lld{{(.exe)?}}" +// CHECK-ESP-ESP32S3_NORTTI-SAME: "--sysroot=[[SYSROOT]]" +// CHECK-ESP-ESP32S3_NORTTI-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3_no-rtti{{[/\\]+}}lib" + +// Check that compiler-rt library without the arch filename suffix will +// be used if present. +// RUN: rm -rf %T/baremetal_clang_rt_noarch +// RUN: mkdir -p %T/baremetal_clang_rt_noarch/lib +// RUN: touch %T/baremetal_clang_rt_noarch/lib/libclang_rt.builtins.a +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 \ +// RUN: --target=xtensa-esp-elf \ +// RUN: --sysroot=%T/baremetal_clang_rt_noarch \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32_CLANGRT-NOARCH %s +// CHECK-ESP-ESP32_CLANGRT-NOARCH: "-lclang_rt.builtins" +// CHECK-ESP-ESP32_CLANGRT-NOARCH-NOT: "-lclang_rt.builtins-xtensa" + +// Check that compiler-rt library with the arch filename suffix will be +// used if present. +// RUN: rm -rf %T/baremetal_clang_rt_arch +// RUN: mkdir -p %T/baremetal_clang_rt_arch/lib +// RUN: touch %T/baremetal_clang_rt_arch/lib/libclang_rt.builtins-xtensa.a +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 \ +// RUN: --target=xtensa-esp-elf \ +// RUN: --sysroot=%T/baremetal_clang_rt_arch \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-CLANGRT-ARCH %s +// CHECK-ESP-ESP32-CLANGRT-ARCH: "-lclang_rt.builtins-xtensa" +// CHECK-ESP-ESP32-CLANGRT-ARCH-NOT: "-lclang_rt.builtins" diff --git a/clang/test/Driver/baremetal-sysroot.cpp b/clang/test/Driver/baremetal-sysroot.cpp index 18654be33b87c..e79f353b1d84f 100644 --- a/clang/test/Driver/baremetal-sysroot.cpp +++ b/clang/test/Driver/baremetal-sysroot.cpp @@ -20,3 +20,58 @@ // CHECK-V6M-C-SAME: "-L{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}armv6m-none-eabi{{[/\\]+}}lib" // CHECK-V6M-C-SAME: "-lc" "-lm" "{{[^"]*}}libclang_rt.builtins.a" // CHECK-V6M-C-SAME: "-o" "{{.*}}.o" + +// RUN: rm -rf %T/baremetal_default_sysroot +// RUN: mkdir -p %T/baremetal_default_sysroot/bin +// RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/riscv32-esp-unknown-elf/include/c++/11.2.0 +// RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/include/c++/11.2.0 +// RUN: echo "MultilibVersion: '1.0'" > %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo "Variants:" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo "- Dir: riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo " Flags:" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo " - --target=riscv32-esp-unknown-elf" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo " - -march=rv32imac_zicsr_zifencei" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo " - -mabi=ilp32" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo "Mappings:" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo "- Match: -march=rv32imac" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo " Flags:" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo " - -march=rv32imac_zicsr_zifencei" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: ln -s %clang %T/baremetal_default_sysroot/bin/clang + +// RUN: %T/baremetal_default_sysroot/bin/clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target riscv32-esp-elf --sysroot= \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-C %s +// CHECK-ESP-RV32IMAC-C: "{{.*}}clang{{.*}}" "-cc1" "-triple" "riscv32-esp-unknown-elf" +// CHECK-ESP-RV32IMAC-C-SAME: "-internal-isystem" "{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECk-ESP-RV32IMAC-C-SAME: "-internal-isystem" "{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}include" +// CHECk-ESP-RV32IMAC-C-SAME: "-internal-isystem" "{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include" +// CHECK-ESP-RV32IMAC-C-SAME: "-x" "c++" "{{.*}}baremetal-sysroot.cpp" +// CHECK-ESP-RV32IMAC-C-NEXT: "{{[^"]*}}ld{{(\.(lld|bfd|gold))?}}{{(\.exe)?}}" "-m" "elf32lriscv" +// CHECK-ESP-RV32IMAC-C-SAME: "-o" "{{.*}}.o" +// CHECK-ESP-RV32IMAC-C-SAME: "-L{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" +// CHECK-ESP-RV32IMAC-C-SAME: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lclang_rt.builtins-riscv32" + +// RUN: rm -rf %T/baremetal_default_sysroot +// RUN: mkdir -p %T/baremetal_default_sysroot/bin +// RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/xtensa-esp-unknown-elf/include/c++/11.2.0 +// RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/include/c++/11.2.0 +// RUN: echo "MultilibVersion: '1.0'" > %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo "Variants:" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo "- Dir: xtensa-esp-unknown-elf/esp32" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo " Flags:" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo " - --target=xtensa-esp-unknown-elf" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: echo " - -mcpu=esp32" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml +// RUN: ln -s %clang %T/baremetal_default_sysroot/bin/clang + +// RUN: %T/baremetal_default_sysroot/bin/clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target xtensa-esp-unknown-elf --sysroot= \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-C %s +// CHECK-ESP-ESP32-C: "{{.*}}clang{{.*}}" "-cc1" "-triple" "xtensa-esp-unknown-elf" +// CHECK-ESP-ESP32-C-SAME: "-internal-isystem" "{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" +// CHECk-ESP-ESP32-C-SAME: "-internal-isystem" "{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include" +// CHECk-ESP-ESP32-C-SAME: "-internal-isystem" "{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include" +// CHECK-ESP-ESP32-C-SAME: "-x" "c++" "{{.*}}baremetal-sysroot.cpp" +// CHECK-ESP-ESP32-C-NEXT: "{{[^"]*}}ld{{(\.(lld|bfd|gold))?}}{{(\.exe)?}}" +// CHECK-ESP-ESP32-C-SAME: "-o" "{{.*}}.o" +// CHECK-ESP-ESP32-C-SAME: "-L{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" +// CHECK-ESP-ESP32-C-SAME: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lclang_rt.builtins-xtensa" diff --git a/clang/test/Driver/riscv32-esp-toolchain-extra.c b/clang/test/Driver/riscv32-esp-toolchain-extra.c deleted file mode 100644 index 13e94e981553c..0000000000000 --- a/clang/test/Driver/riscv32-esp-toolchain-extra.c +++ /dev/null @@ -1,115 +0,0 @@ -// A basic clang -cc1 command-line, and simple environment check. - -// The tests here are similar to those in xtensa-toolchain.c, however -// these tests need to create symlinks to test directory trees in order to -// set up the environment and therefore shell support is required. -// REQUIRES: shell, xtensa-registered-target -// UNSUPPORTED: system-windows - -// Compiler-rt multilibs are located at '$INSTALLDIR/lib/clang/15.0.0//mcpu/'. -// At this moment multilib feature for compiler-rt is supported only when GCC installation with the same multilib structure is found. -// It is safe because ESP toolchain still depends on libstdc++ which is part of GCC installation. -// When libc++ wil be supported by toolchain the dependency on GCC multilibs will be removed. - -// RUN: rm -rf %t -// RUN: mkdir -p %t/multilib_riscv_esp_elf_sdk/bin -// RUN: ln -s %clang %t/multilib_riscv_esp_elf_sdk/bin/clang -// RUN: ln -s %S/Inputs/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld %t/multilib_riscv_esp_elf_sdk/bin/riscv32-esp-elf-ld -// RUN: ln -s %S/Inputs/multilib_riscv_esp_elf_sdk/lib %t/multilib_riscv_esp_elf_sdk/lib -// RUN: ln -s %S/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf %t/multilib_riscv_esp_elf_sdk/riscv32-esp-elf - -// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ -// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ -// RUN: --target=riscv32-esp-elf -march=rv32i -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32I-RTLIB-COMPILERRT-NORTTI %s - -// C-RV32I-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32I-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32i/ilp32/no-rtti{{/|\\\\}}crt0.o" -// C-RV32I-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtbegin.o" -// C-RV32I-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/libclang_rt.builtins.a" -// C-RV32I-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/no-rtti/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ -// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ -// RUN: --target=riscv32-esp-elf -march=rv32i -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32I-RTLIB-COMPILERRT-RTTI %s - -// C-RV32I-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32I-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32i/ilp32{{/|\\\\}}crt0.o" -// C-RV32I-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtbegin.o" -// C-RV32I-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/libclang_rt.builtins.a" -// C-RV32I-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32i/ilp32/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ -// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ -// RUN: --target=riscv32-esp-elf -march=rv32imc -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32IMC-RTLIB-COMPILERRT-NORTTI %s - -// C-RV32IMC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32IMC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imc/ilp32/no-rtti{{/|\\\\}}crt0.o" -// C-RV32IMC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtbegin.o" -// C-RV32IMC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/libclang_rt.builtins.a" -// C-RV32IMC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/no-rtti/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ -// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ -// RUN: --target=riscv32-esp-elf -march=rv32imc -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32IMC-RTLIB-COMPILERRT-RTTI %s - -// C-RV32IMC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32IMC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imc/ilp32{{/|\\\\}}crt0.o" -// C-RV32IMC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtbegin.o" -// C-RV32IMC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/libclang_rt.builtins.a" -// C-RV32IMC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imc/ilp32/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ -// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ -// RUN: --target=riscv32-esp-elf -march=rv32imac -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32IMAC-RTLIB-COMPILERRT-NORTTI %s - -// C-RV32IMAC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32IMAC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imac/ilp32/no-rtti{{/|\\\\}}crt0.o" -// C-RV32IMAC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtbegin.o" -// C-RV32IMAC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/libclang_rt.builtins.a" -// C-RV32IMAC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/no-rtti/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ -// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ -// RUN: --target=riscv32-esp-elf -march=rv32imac -mabi=ilp32 --rtlib=compiler-rt -fuse-ld= 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32IMAC-RTLIB-COMPILERRT-RTTI %s - -// C-RV32IMAC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32IMAC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imac/ilp32{{/|\\\\}}crt0.o" -// C-RV32IMAC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtbegin.o" -// C-RV32IMAC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/libclang_rt.builtins.a" -// C-RV32IMAC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imac/ilp32/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ -// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ -// RUN: --target=riscv32-esp-elf -march=rv32imafc -mabi=ilp32f --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI %s - -// C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imafc/ilp32f/no-rtti{{/|\\\\}}crt0.o" -// C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtbegin.o" -// C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/libclang_rt.builtins.a" -// C-RV32IMAFC-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/no-rtti/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_riscv_esp_elf_sdk/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_riscv_esp_elf_sdk \ -// RUN: -resource-dir=%t/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0 \ -// RUN: --target=riscv32-esp-elf -march=rv32imafc -mabi=ilp32f --rtlib=compiler-rt -fuse-ld= 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32IMAFC-RTLIB-COMPILERRT-RTTI %s - -// C-RV32IMAFC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32IMAFC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib/rv32imafc/ilp32f{{/|\\\\}}crt0.o" -// C-RV32IMAFC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtbegin.o" -// C-RV32IMAFC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/libclang_rt.builtins.a" -// C-RV32IMAFC-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_riscv_esp_elf_sdk/lib/clang/15.0.0/riscv32-esp-elf/generic-rv32/rv32imafc/ilp32f/lib/clang_rt.crtend.o" diff --git a/clang/test/Driver/riscv32-esp-toolchain.c b/clang/test/Driver/riscv32-esp-toolchain.c deleted file mode 100644 index 34ef6871f30f0..0000000000000 --- a/clang/test/Driver/riscv32-esp-toolchain.c +++ /dev/null @@ -1,325 +0,0 @@ -// A basic clang -cc1 command-line, and simple environment check. - -// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ -// RUN: | FileCheck -check-prefix=CC1 %s -// CC1: clang{{.*}} "-cc1" "-triple" "riscv32-esp-unknown-elf" - -// Test interaction with -fuse-ld=lld, if lld is available. -// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk -fuse-ld=lld 2>&1 \ -// RUN: | FileCheck -check-prefix=LLD %s -// LLD: {{(error: invalid linker name in argument '-fuse-ld=lld')|(ld.lld)}} - -// rv32imac is the default - -// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ -// RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk \ -// RUN: --sysroot=%S/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32IMAC-BAREMETAL-MULTI-ILP32 %s - -// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" -// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "-mabi" "ilp32" "-march" "rv32imac" -// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "--sysroot={{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf" -// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "-m" "elf32lriscv" -// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtbegin.o" -// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" -// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib" -// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" -// C-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtend.o" - -// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ -// RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld \ -// RUN: --sysroot= \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32 %s - -// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" -// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-mabi" "ilp32" "-march" "rv32imac" -// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-m" "elf32lriscv" -// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtbegin.o" -// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" -// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib" -// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" -// C-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtend.o" - -// RUN: %clangxx %s -### -no-canonical-prefixes -target riscv32-esp-elf \ -// RUN: -ffreestanding -stdlib=libstdc++ --rtlib=libgcc --ld-path=riscv32-esp-elf-ld \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk \ -// RUN: --sysroot=%S/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf 2>&1 \ -// RUN: | FileCheck -check-prefix=CXX-RV32IMAC-BAREMETAL-MULTI-ILP32 %s - -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-internal-isystem" "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/include/c++{{/|\\\\}}8.4.0" -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-mabi" "ilp32" "-march" "rv32imac" -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "--sysroot={{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf" -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-m" "elf32lriscv" -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtbegin.o" -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib" -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "-lstdc++" "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" -// CXX-RV32IMAC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtend.o" - -// RUN: %clangxx %s -### -no-canonical-prefixes -target riscv32-esp-elf \ -// RUN: -ffreestanding -stdlib=libstdc++ --rtlib=libgcc --ld-path=riscv32-esp-elf-ld \ -// RUN: --sysroot= \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ -// RUN: | FileCheck -check-prefix=CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32 %s - -// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-internal-isystem" "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/include/c++{{/|\\\\}}8.4.0" -// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" -// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-mabi" "ilp32" "-march" "rv32imac" -// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-m" "elf32lriscv" -// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtbegin.o" -// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" -// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib" -// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "-lstdc++" "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" -// CXX-RV32IMAC-BAREMETAL-MULTI-NOSYSROOT-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32{{/|\\\\}}crtend.o" - -// RUN: %clangxx %s -### -no-canonical-prefixes -target riscv32-esp-elf \ -// RUN: -ffreestanding -stdlib=libstdc++ --rtlib=libgcc --ld-path=riscv32-esp-elf-ld \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk \ -// RUN: --sysroot=%S/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf \ -// RUN: -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32 %s - -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-internal-isystem" "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/include/c++{{/|\\\\}}8.4.0" -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-mabi" "ilp32" "-march" "rv32imac" -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "--sysroot={{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf" -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-m" "elf32lriscv" -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti{{/|\\\\}}crtbegin.o" -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/riscv32-esp-elf/lib" -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "-lstdc++" "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" -// CXX-RV32IMAC-BAREMETAL-MULTI-NORTTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imac/ilp32/no-rtti{{/|\\\\}}crtend.o" - -// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ -// RUN: -march=rv32i -mabi=ilp32 \ -// RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld --sysroot= \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32I-BAREMETAL-MULTI-ILP32 %s - -// C-RV32I-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" -// C-RV32I-BAREMETAL-MULTI-ILP32: "-mabi" "ilp32" "-march" "rv32i" -// C-RV32I-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32I-BAREMETAL-MULTI-ILP32: "-m" "elf32lriscv" -// C-RV32I-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32{{/|\\\\}}crtbegin.o" -// C-RV32I-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" -// C-RV32I-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib" -// C-RV32I-BAREMETAL-MULTI-ILP32: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" -// C-RV32I-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32i/ilp32{{/|\\\\}}crtend.o" - -// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ -// RUN: -march=rv32imc -mabi=ilp32 \ -// RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld --sysroot= \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32IMC-BAREMETAL-MULTI-ILP32 %s - -// C-RV32IMC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" -// C-RV32IMC-BAREMETAL-MULTI-ILP32: "-mabi" "ilp32" "-march" "rv32imc" -// C-RV32IMC-BAREMETAL-MULTI-ILP32: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32IMC-BAREMETAL-MULTI-ILP32: "-m" "elf32lriscv" -// C-RV32IMC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32{{/|\\\\}}crtbegin.o" -// C-RV32IMC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" -// C-RV32IMC-BAREMETAL-MULTI-ILP32: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib" -// C-RV32IMC-BAREMETAL-MULTI-ILP32: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" -// C-RV32IMC-BAREMETAL-MULTI-ILP32: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imc/ilp32{{/|\\\\}}crtend.o" - -// RUN: %clang %s -### -no-canonical-prefixes -target riscv32-esp-elf \ -// RUN: -march=rv32imafc -mabi=ilp32f \ -// RUN: -ffreestanding --rtlib=libgcc --ld-path=riscv32-esp-elf-ld --sysroot= \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_riscv_esp_elf_sdk 2>&1 \ -// RUN: | FileCheck -check-prefix=C-RV32IMAFC-BAREMETAL-MULTI-ILP32F %s - -// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-as" -// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "-mabi" "ilp32f" "-march" "rv32imafc" -// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "{{.*}}Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}riscv32-esp-elf-ld" -// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "-m" "elf32lriscv" -// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f{{/|\\\\}}crtbegin.o" -// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0" -// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "-L{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}riscv32-esp-elf/lib" -// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lgcc" -// C-RV32IMAFC-BAREMETAL-MULTI-ILP32F: "{{.*}}/Inputs/multilib_riscv_esp_elf_sdk/lib/gcc/riscv32-esp-elf/8.4.0/rv32imafc/ilp32f{{/|\\\\}}crtend.o" - -// RUN: %clang -target riscv32-esp-elf %s -emit-llvm -S -o - | FileCheck %s - -typedef __builtin_va_list va_list; -typedef __SIZE_TYPE__ size_t; -typedef __PTRDIFF_TYPE__ ptrdiff_t; -typedef __WCHAR_TYPE__ wchar_t; -typedef __WINT_TYPE__ wint_t; - - -// Check Alignments - -// CHECK: @align_c = dso_local global i32 1 -int align_c = __alignof(char); - -// CHECK: @align_s = dso_local global i32 2 -int align_s = __alignof(short); - -// CHECK: @align_i = dso_local global i32 4 -int align_i = __alignof(int); - -// CHECK: @align_wc = dso_local global i32 4 -int align_wc = __alignof(wchar_t); - -// CHECK: @align_wi = dso_local global i32 4 -int align_wi = __alignof(wint_t); - -// CHECK: @align_l = dso_local global i32 4 -int align_l = __alignof(long); - -// CHECK: @align_ll = dso_local global i32 8 -int align_ll = __alignof(long long); - -// CHECK: @align_p = dso_local global i32 4 -int align_p = __alignof(void*); - -// CHECK: @align_f = dso_local global i32 4 -int align_f = __alignof(float); - -// CHECK: @align_d = dso_local global i32 8 -int align_d = __alignof(double); - -// CHECK: @align_ld = dso_local global i32 16 -int align_ld = __alignof(long double); - -// CHECK: @align_vl = dso_local global i32 4 -int align_vl = __alignof(va_list); - -// CHECK: @align_a_c = dso_local global i32 1 -int align_a_c = __alignof(_Atomic(char)); - -// CHECK: @align_a_s = dso_local global i32 2 -int align_a_s = __alignof(_Atomic(short)); - -// CHECK: @align_a_i = dso_local global i32 4 -int align_a_i = __alignof(_Atomic(int)); - -// CHECK: @align_a_wc = dso_local global i32 4 -int align_a_wc = __alignof(_Atomic(wchar_t)); - -// CHECK: @align_a_wi = dso_local global i32 4 -int align_a_wi = __alignof(_Atomic(wint_t)); - -// CHECK: @align_a_l = dso_local global i32 4 -int align_a_l = __alignof(_Atomic(long)); - -// CHECK: @align_a_ll = dso_local global i32 8 -int align_a_ll = __alignof(_Atomic(long long)); - -// CHECK: @align_a_p = dso_local global i32 4 -int align_a_p = __alignof(_Atomic(void*)); - -// CHECK: @align_a_f = dso_local global i32 4 -int align_a_f = __alignof(_Atomic(float)); - -// CHECK: @align_a_d = dso_local global i32 8 -int align_a_d = __alignof(_Atomic(double)); - -// CHECK: @align_a_ld = dso_local global i32 16 -int align_a_ld = __alignof(_Atomic(long double)); - -// CHECK: @align_a_s4 = dso_local global i32 4 -int align_a_s4 = __alignof(_Atomic(struct { char s[4]; })); - -// CHECK: @align_a_s8 = dso_local global i32 8 -int align_a_s8 = __alignof(_Atomic(struct { char s[8]; })); - -// CHECK: @align_a_s16 = dso_local global i32 16 -int align_a_s16 = __alignof(_Atomic(struct { char s[16]; })); - -// CHECK: @align_a_s32 = dso_local global i32 1 -int align_a_s32 = __alignof(_Atomic(struct { char s[32]; })); - - -// Check Sizes - -// CHECK: @size_a_c = dso_local global i32 1 -int size_a_c = sizeof(_Atomic(char)); - -// CHECK: @size_a_s = dso_local global i32 2 -int size_a_s = sizeof(_Atomic(short)); - -// CHECK: @size_a_i = dso_local global i32 4 -int size_a_i = sizeof(_Atomic(int)); - -// CHECK: @size_a_wc = dso_local global i32 4 -int size_a_wc = sizeof(_Atomic(wchar_t)); - -// CHECK: @size_a_wi = dso_local global i32 4 -int size_a_wi = sizeof(_Atomic(wint_t)); - -// CHECK: @size_a_l = dso_local global i32 4 -int size_a_l = sizeof(_Atomic(long)); - -// CHECK: @size_a_ll = dso_local global i32 8 -int size_a_ll = sizeof(_Atomic(long long)); - -// CHECK: @size_a_p = dso_local global i32 4 -int size_a_p = sizeof(_Atomic(void*)); - -// CHECK: @size_a_f = dso_local global i32 4 -int size_a_f = sizeof(_Atomic(float)); - -// CHECK: @size_a_d = dso_local global i32 8 -int size_a_d = sizeof(_Atomic(double)); - -// CHECK: @size_a_ld = dso_local global i32 16 -int size_a_ld = sizeof(_Atomic(long double)); - - -// Check types - -// CHECK: zeroext i8 @check_char() -char check_char() { return 0; } - -// CHECK: define dso_local signext i16 @check_short() -short check_short() { return 0; } - -// CHECK: define dso_local i32 @check_int() -int check_int() { return 0; } - -// CHECK: define dso_local i32 @check_wchar_t() -int check_wchar_t() { return 0; } - -// CHECK: define dso_local i32 @check_long() -long check_long() { return 0; } - -// CHECK: define dso_local i64 @check_longlong() -long long check_longlong() { return 0; } - -// CHECK: define dso_local zeroext i8 @check_uchar() -unsigned char check_uchar() { return 0; } - -// CHECK: define dso_local zeroext i16 @check_ushort() -unsigned short check_ushort() { return 0; } - -// CHECK: define dso_local i32 @check_uint() -unsigned int check_uint() { return 0; } - -// CHECK: define dso_local i32 @check_ulong() -unsigned long check_ulong() { return 0; } - -// CHECK: define dso_local i64 @check_ulonglong() -unsigned long long check_ulonglong() { return 0; } - -// CHECK: define dso_local i32 @check_size_t() -size_t check_size_t() { return 0; } - -// CHECK: define dso_local float @check_float() -float check_float() { return 0; } - -// CHECK: define dso_local double @check_double() -double check_double() { return 0; } - -// CHECK: define dso_local fp128 @check_longdouble() -long double check_longdouble() { return 0; } diff --git a/clang/test/Driver/xtensa-esp-toolchain-extra.c b/clang/test/Driver/xtensa-esp-toolchain-extra.c deleted file mode 100644 index 5e195019820d4..0000000000000 --- a/clang/test/Driver/xtensa-esp-toolchain-extra.c +++ /dev/null @@ -1,111 +0,0 @@ -// A basic clang -cc1 command-line, and simple environment check. - -// The tests here are similar to those in xtensa-toolchain.c, however -// these tests need to create symlinks to test directory trees in order to -// set up the environment and therefore shell support is required. -// REQUIRES: shell, xtensa-registered-target -// UNSUPPORTED: system-windows - -// Compiler-rt multilibs are located at '$INSTALLDIR/lib/clang/15.0.0//mcpu/'. -// At this moment multilib feature for compiler-rt is supported only when GCC installation with the same multilib structure is found. -// It is safe because ESP toolchain still depends on libstdc++ which is part of GCC installation. -// When libc++ wil be supported by toolchain the dependency on GCC multilibs will be removed. - -// RUN: rm -rf %t -// RUN: mkdir -p %t/multilib_xtensa_tree/bin -// RUN: ln -s %clang %t/multilib_xtensa_tree/bin/clang -// RUN: ln -s %S/Inputs/multilib_xtensa_tree/bin/xtensa-esp32-elf-ld %t/multilib_xtensa_tree/bin/xtensa-esp32-elf-ld -// RUN: ln -s %S/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld %t/multilib_xtensa_tree/bin/xtensa-esp32s2-elf-ld -// RUN: ln -s %S/Inputs/multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld %t/multilib_xtensa_tree/bin/xtensa-esp32s3-elf-ld -// RUN: ln -s %S/Inputs/multilib_xtensa_tree/lib %t/multilib_xtensa_tree/lib -// RUN: ln -s %S/Inputs/multilib_xtensa_tree/xtensa-esp32-elf %t/multilib_xtensa_tree/xtensa-esp32-elf -// RUN: ln -s %S/Inputs/multilib_xtensa_tree/xtensa-esp32s2-elf %t/multilib_xtensa_tree/xtensa-esp32s2-elf -// RUN: ln -s %S/Inputs/multilib_xtensa_tree/xtensa-esp32s3-elf %t/multilib_xtensa_tree/xtensa-esp32s3-elf - -// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ -// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ -// RUN: --target=xtensa-esp-elf -mcpu=esp32 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-RTLIB-COMPILERRT-NORTTI %s - -// C-XTENSA-ESP32-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtbegin.o" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/libclang_rt.builtins.a" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/no-rtti/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ -// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ -// RUN: --target=xtensa-esp-elf -mcpu=esp32 --rtlib=compiler-rt -fuse-ld= 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-RTLIB-COMPILERRT-RTTI %s - -// C-XTENSA-ESP32-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtbegin.o" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/libclang_rt.builtins.a" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ -// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ -// RUN: --target=xtensa-esp-elf -mcpu=esp32 --rtlib=compiler-rt -fuse-ld= -mfix-esp32-psram-cache-issue -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-NORTTI %s - -// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtbegin.o" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/libclang_rt.builtins.a" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/no-rtti/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ -// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ -// RUN: --target=xtensa-esp-elf -mcpu=esp32 --rtlib=compiler-rt -fuse-ld= -mfix-esp32-psram-cache-issue 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-RTTI %s - -// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-RTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtbegin.o" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/libclang_rt.builtins.a" -// C-XTENSA-ESP32-RTLIB-COMPILERRT-PSRAM-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32/esp32-psram/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ -// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ -// RUN: --target=xtensa-esp-elf -mcpu=esp32s2 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S2-RTLIB-COMPILERRT-NORTTI %s - -// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s2-elf-ld" -// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtbegin.o" -// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/libclang_rt.builtins.a" -// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/no-rtti/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ -// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ -// RUN: --target=xtensa-esp-elf -mcpu=esp32s2 --rtlib=compiler-rt -fuse-ld= 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S2-RTLIB-COMPILERRT-RTTI %s - -// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s2-elf-ld" -// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtbegin.o" -// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/libclang_rt.builtins.a" -// C-XTENSA-ESP32S2-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s2/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ -// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ -// RUN: --target=xtensa-esp-elf -mcpu=esp32s3 --rtlib=compiler-rt -fuse-ld= -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S3-RTLIB-COMPILERRT-NORTTI %s - -// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" -// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtbegin.o" -// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/libclang_rt.builtins.a" -// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-NORTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/no-rtti/lib/clang_rt.crtend.o" - -// RUN: %t/multilib_xtensa_tree/bin/clang %s -### -no-canonical-prefixes \ -// RUN: --gcc-toolchain=%t/multilib_xtensa_tree \ -// RUN: -resource-dir=%t/multilib_xtensa_tree/lib/clang/15.0.0 \ -// RUN: --target=xtensa-esp-elf -mcpu=esp32s3 --rtlib=compiler-rt -fuse-ld= 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S3-RTLIB-COMPILERRT-RTTI %s - -// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" -// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtbegin.o" -// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/libclang_rt.builtins.a" -// C-XTENSA-ESP32S3-RTLIB-COMPILERRT-RTTI: "{{.*}}/multilib_xtensa_tree/lib/clang/15.0.0/xtensa-esp-elf/esp32s3/lib/clang_rt.crtend.o" diff --git a/clang/test/Driver/xtensa-toolchain.c b/clang/test/Driver/xtensa-toolchain.c deleted file mode 100644 index 234495b0cda0a..0000000000000 --- a/clang/test/Driver/xtensa-toolchain.c +++ /dev/null @@ -1,125 +0,0 @@ -// A basic clang -cc1 command-line, and simple environment check. - -// RUN: %clang %s -### -no-canonical-prefixes -target xtensa-esp-elf \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ -// RUN: | FileCheck -check-prefix=CC1-ESP-DEFAULT %s -// CC1-ESP-DEFAULT: clang{{.*}} "-cc1" "-triple" "xtensa-esp-unknown-elf" - -// RUN: %clang %s -### -no-canonical-prefixes -target xtensa-esp-elf -mcpu=esp32\ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ -// RUN: | FileCheck -check-prefix=CC1-ESP32 %s -// CC1-ESP32: clang{{.*}} "-cc1" "-triple" "xtensa-esp-unknown-elf" {{.*}}"-target-cpu" "esp32" - -// RUN: %clang %s -### -no-canonical-prefixes -target xtensa-esp-elf -mcpu=esp32s2\ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ -// RUN: | FileCheck -check-prefix=CC1-ESP32S2 %s -// CC1-ESP32S2: clang{{.*}} "-cc1" "-triple" "xtensa-esp-unknown-elf" {{.*}}"-target-cpu" "esp32s2" - -// RUN: %clang %s -### -no-canonical-prefixes -target xtensa-esp-elf -mcpu=esp32s3\ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ -// RUN: | FileCheck -check-prefix=CC1-ESP32S3 %s -// CC1-ESP32S3: clang{{.*}} "-cc1" "-triple" "xtensa-esp-unknown-elf" {{.*}}"-target-cpu" "esp32s3" - -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ -// RUN: -target xtensa-esp-elf --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL %s - -// C-XTENSA-ESP32-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/no-rtti" -// C-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/no-rtti" - -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ -// RUN: -target xtensa-esp-elf --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL-RTTI %s - -// C-XTENSA-ESP32-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-ESP32-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" -// C-XTENSA-ESP32-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" - -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ -// RUN: -target xtensa-esp-elf --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -fno-rtti -mfix-esp32-psram-cache-issue 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL-PSRAM %s - -// C-XTENSA-ESP32-BAREMETAL-PSRAM: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-ESP32-BAREMETAL-PSRAM: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/esp32-psram/no-rtti" -// C-XTENSA-ESP32-BAREMETAL-PSRAM: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/esp32-psram/no-rtti" - -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ -// RUN: -target xtensa-esp-elf --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -mfix-esp32-psram-cache-issue 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-BAREMETAL-PSRAM-RTTI %s - -// C-XTENSA-ESP32-BAREMETAL-PSRAM-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-ESP32-BAREMETAL-PSRAM-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0/esp32-psram" -// C-XTENSA-ESP32-BAREMETAL-PSRAM-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib/esp32-psram" - -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ -// RUN: -target xtensa-esp-elf -mcpu=esp32s2 --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S2-BAREMETAL %s - -// C-XTENSA-ESP32S2-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s2-elf-ld" -// C-XTENSA-ESP32S2-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32s2-elf{{/|\\\\}}8.4.0/no-rtti" -// C-XTENSA-ESP32S2-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32s2-elf{{/|\\\\}}lib/no-rtti" - -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ -// RUN: -target xtensa-esp-elf -mcpu=esp32s2 --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S2-BAREMETAL-RTTI %s - -// C-XTENSA-ESP32S2-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s2-elf-ld" -// C-XTENSA-ESP32S2-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32s2-elf{{/|\\\\}}8.4.0" -// C-XTENSA-ESP32S2-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s2-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32s2-elf{{/|\\\\}}lib" - -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ -// RUN: -target xtensa-esp-elf -mcpu=esp32s3 --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree -fno-rtti 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S3-BAREMETAL %s - -// C-XTENSA-ESP32S3-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" -// C-XTENSA-ESP32S3-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32s3-elf{{/|\\\\}}8.4.0/no-rtti" -// C-XTENSA-ESP32S3-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32s3-elf{{/|\\\\}}lib/no-rtti" - -// RUN: %clang %s -### -no-canonical-prefixes -fuse-ld= \ -// RUN: -target xtensa-esp-elf -mcpu=esp32s3 --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32S3-BAREMETAL-RTTI %s - -// C-XTENSA-ESP32S3-BAREMETAL-RTTI: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32s3-elf-ld" -// C-XTENSA-ESP32S3-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32s3-elf{{/|\\\\}}8.4.0" -// C-XTENSA-ESP32S3-BAREMETAL-RTTI: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32s3-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32s3-elf{{/|\\\\}}lib" - -// RUN: %clang %s -### -no-canonical-prefixes \ -// RUN: -target xtensa-esp-elf -mcpu=esp32 --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree \ -// RUN: --sysroot=%S/Inputs/multilib_xtensa_tree/xtensa-esp32-elf 2>&1 \ -// RUN: | FileCheck -check-prefix=C-XTENSA-ESP32-SYSROOT-BAREMETAL %s - -// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "--sysroot={{.*}}/Inputs/multilib_xtensa_tree/xtensa-esp32-elf" -// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" -// C-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" - -// RUN: %clangxx %s -### -no-canonical-prefixes \ -// RUN: -target xtensa-esp-elf -mcpu=esp32 -stdlib=libstdc++ --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree 2>&1 \ -// RUN: | FileCheck -check-prefix=CXX-XTENSA-ESP32-BAREMETAL %s - -// CXX-XTENSA-ESP32-BAREMETAL: "-internal-isystem" "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf/include/c++{{/|\\\\}}8.4.0" -// CXX-XTENSA-ESP32-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// CXX-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" -// CXX-XTENSA-ESP32-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" - -// RUN: %clangxx %s -### -no-canonical-prefixes \ -// RUN: -target xtensa-esp-elf -mcpu=esp32 -stdlib=libstdc++ --rtlib=platform \ -// RUN: --gcc-toolchain=%S/Inputs/multilib_xtensa_tree \ -// RUN: --sysroot=%S/Inputs/multilib_xtensa_tree/xtensa-esp32-elf 2>&1 \ -// RUN: | FileCheck -check-prefix=CXX-XTENSA-ESP32-SYSROOT-BAREMETAL %s - -// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-internal-isystem" "{{.*}}Inputs/multilib_xtensa_tree/xtensa-esp32-elf/include/c++/8.4.0" -// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "{{.*}}Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}bin{{/|\\\\}}xtensa-esp32-elf-ld" -// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}lib{{/|\\\\}}gcc{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}8.4.0" -// CXX-XTENSA-ESP32-SYSROOT-BAREMETAL: "-L{{.*}}/Inputs/multilib_xtensa_tree/lib/gcc/xtensa-esp32-elf/8.4.0/../../..{{/|\\\\}}..{{/|\\\\}}xtensa-esp32-elf{{/|\\\\}}lib" diff --git a/compiler-rt/test/builtins/Unit/lit.cfg.py b/compiler-rt/test/builtins/Unit/lit.cfg.py index 0b6b971bca0d6..a020850fddf65 100644 --- a/compiler-rt/test/builtins/Unit/lit.cfg.py +++ b/compiler-rt/test/builtins/Unit/lit.cfg.py @@ -105,7 +105,7 @@ def get_libgcc_file_name(): # Don't pass dosish path separator to msys bash.exe. base_lib = base_lib.replace("\\", "/") if config.target_triple in ['xtensa-esp-elf', 'riscv32-esp-elf']: - config.substitutions.append( ("%librt ", "-Wl,--start-group," + base_lib + ',-lm,-lc,--whole-archive,-lgloss,--no-whole-archive,-lc,--whole-archive,-lsys_qemu,--no-whole-archive,--end-group ') ) + config.substitutions.append( ("%librt ", "-Wl,--start-group," + base_lib + ',-lc,-lm,--end-group ') ) else: config.substitutions.append( ("%librt ", base_lib + ' -lc -lm ') ) diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h index f8d16ba2be90b..c61f416926a1a 100644 --- a/llvm/include/llvm/TargetParser/Triple.h +++ b/llvm/include/llvm/TargetParser/Triple.h @@ -1047,6 +1047,9 @@ class Triple { return getArch() == Triple::bpfel || getArch() == Triple::bpfeb; } + /// Tests whether the target is Xtensa. + bool isXtensa() const { return getArch() == Triple::xtensa; } + /// Tests whether the target supports comdat bool supportsCOMDAT() const { return !(isOSBinFormatMachO() || isOSBinFormatXCOFF() || diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 11a7c8576fc8f..0cb171324fabb 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -652,9 +652,10 @@ bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, default: break; case Match_Success: - processInstruction(Inst, IDLoc, Out, STI); - Inst.setLoc(IDLoc); - Out.emitInstruction(Inst, getSTI()); + if(processInstruction(Inst, IDLoc, Out, STI)) { + Inst.setLoc(IDLoc); + Out.emitInstruction(Inst, getSTI()); + } return false; case Match_MissingFeature: return Error(IDLoc, "instruction use requires an option to be enabled"); @@ -1169,6 +1170,11 @@ bool XtensaAsmParser::parseBeginDirective(SMLoc L) { StringRef LiteralPrefixName = SE->getSymbol().getName(); TS.setLiteralSectionPrefix(LiteralPrefixName); RegionInProgress.emplace_back(BeginLoc, RegionDirectiveName, LiteralPrefixName); + } else if (RegionDirectiveName == "schedule" || RegionDirectiveName == "no-schedule") { + // Behave like GNU 'as'. + // The schedule directive is recognized only for compatibility with Tensilica’s assembler. + // This directive is ignored and has no effect on 'as'. + RegionInProgress.emplace_back(BeginLoc, RegionDirectiveName); } else { return Error(BeginLoc, "unsupported region directive"); } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index dc002650e6c72..ae0c6cabacd4d 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -426,8 +426,12 @@ def BBCI : RRI8_Inst<0x07, (outs), let imm8 = target; } +def : InstAlias<"_bbci\t$s, $imm, $target", (BBCI AR:$s, uimm5:$imm, brtarget:$target)>; + def : InstAlias<"bbci.l\t$s, $imm, $target", (BBCI AR:$s, uimm5:$imm, brtarget:$target)>; +def : InstAlias<"_bbci.l\t$s, $imm, $target", (BBCI AR:$s, uimm5:$imm, brtarget:$target)>; + def BBSI : RRI8_Inst<0x07, (outs), (ins AR:$s, uimm5:$imm, brtarget:$target), "bbsi\t$s, $imm, $target", []> { @@ -442,6 +446,8 @@ def BBSI : RRI8_Inst<0x07, (outs), def : InstAlias<"bbsi.l\t$s, $imm, $target", (BBSI AR:$s, uimm5:$imm, brtarget:$target)>; +def : InstAlias<"_bbsi.l\t$s, $imm, $target", (BBSI AR:$s, uimm5:$imm, brtarget:$target)>; + //===----------------------------------------------------------------------===// // Call and jump instructions //===----------------------------------------------------------------------===// @@ -686,6 +692,8 @@ def MOVI_N : RI7_Inst<0xc, 0x0, (outs AR:$s), (ins imm32n_95:$imm7), "movi.n\t$s, $imm7", [(set AR:$s, imm32n_95:$imm7)]>, Requires<[HasDensity]>; +def : InstAlias<"_movi.n\t$s, $imm7", (MOVI_N AR:$s, imm32n_95:$imm7)>; + // Load instruction let mayLoad = 1, usesCustomInserter = 1 in { def L32I_N : RRRN_Inst<0x8, (outs AR:$t), (ins mem32n:$addr), @@ -1652,6 +1660,17 @@ let isBarrier = 1, isTerminator = 1 in { } } +//===----------------------------------------------------------------------===// +// Simulator instructions +//===----------------------------------------------------------------------===// + +def SIMCALL : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), + "simcall", []> { + let r = 0x5; + let s = 0x1; + let t = 0x0; +} + //===----------------------------------------------------------------------===// // Atomic patterns //===----------------------------------------------------------------------===// diff --git a/llvm/test/MC/Xtensa/Core/processor-control.s b/llvm/test/MC/Xtensa/Core/processor-control.s index 6295786dfb61a..64888e2aeadef 100644 --- a/llvm/test/MC/Xtensa/Core/processor-control.s +++ b/llvm/test/MC/Xtensa/Core/processor-control.s @@ -84,3 +84,7 @@ xsr.sar a8 # CHECK-INST: xsr a8, sar # CHECK: encoding: [0x80,0x03,0x61] xsr a8, 3 + +# CHECK-INST: simcall +# CHECK: encoding: [0x00,0x51,0x00] +simcall From 3b92cfcb5f27cf066da10af46ece4d64426cc6a3 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 2 Oct 2024 02:32:16 +0300 Subject: [PATCH 220/289] [Toolchain][RISCV][Xtensa] Remove GCC installation support for Espressif toolchain --- clang/lib/Driver/CMakeLists.txt | 2 +- clang/lib/Driver/Driver.cpp | 1 - clang/lib/Driver/ToolChains/Arch/Xtensa.cpp | 42 ++ clang/lib/Driver/ToolChains/Arch/Xtensa.h | 30 ++ clang/lib/Driver/ToolChains/CommonArgs.cpp | 15 +- clang/lib/Driver/ToolChains/CommonArgs.h | 7 - clang/lib/Driver/ToolChains/EspBareMetal.cpp | 3 + clang/lib/Driver/ToolChains/EspBareMetal.h | 4 + clang/lib/Driver/ToolChains/Gnu.cpp | 54 --- .../lib/Driver/ToolChains/RISCVToolchain.cpp | 25 +- clang/lib/Driver/ToolChains/RISCVToolchain.h | 6 - clang/lib/Driver/ToolChains/Xtensa.cpp | 378 ------------------ clang/lib/Driver/ToolChains/Xtensa.h | 91 ----- 13 files changed, 82 insertions(+), 576 deletions(-) create mode 100644 clang/lib/Driver/ToolChains/Arch/Xtensa.cpp create mode 100644 clang/lib/Driver/ToolChains/Arch/Xtensa.h delete mode 100644 clang/lib/Driver/ToolChains/Xtensa.cpp delete mode 100644 clang/lib/Driver/ToolChains/Xtensa.h diff --git a/clang/lib/Driver/CMakeLists.txt b/clang/lib/Driver/CMakeLists.txt index 1a6d89040d886..ab8d31c414196 100644 --- a/clang/lib/Driver/CMakeLists.txt +++ b/clang/lib/Driver/CMakeLists.txt @@ -41,6 +41,7 @@ add_clang_library(clangDriver ToolChains/Arch/SystemZ.cpp ToolChains/Arch/VE.cpp ToolChains/Arch/X86.cpp + ToolChains/Arch/Xtensa.cpp ToolChains/AIX.cpp ToolChains/AMDGPU.cpp ToolChains/AMDGPUOpenMP.cpp @@ -82,7 +83,6 @@ add_clang_library(clangDriver ToolChains/VEToolchain.cpp ToolChains/WebAssembly.cpp ToolChains/XCore.cpp - ToolChains/Xtensa.cpp ToolChains/PPCLinux.cpp ToolChains/PPCFreeBSD.cpp ToolChains/InterfaceStubs.cpp diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index 7ea5a434b5259..57761ff681ac2 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -49,7 +49,6 @@ #include "ToolChains/VEToolchain.h" #include "ToolChains/WebAssembly.h" #include "ToolChains/XCore.h" -#include "ToolChains/Xtensa.h" #include "ToolChains/ZOS.h" #include "clang/Basic/DiagnosticDriver.h" #include "clang/Basic/TargetID.h" diff --git a/clang/lib/Driver/ToolChains/Arch/Xtensa.cpp b/clang/lib/Driver/ToolChains/Arch/Xtensa.cpp new file mode 100644 index 0000000000000..70d0895c92b24 --- /dev/null +++ b/clang/lib/Driver/ToolChains/Arch/Xtensa.cpp @@ -0,0 +1,42 @@ +//===--- Xtensa.cpp - Xtensa Helpers for Tools -------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Xtensa.h" +#include "ToolChains/CommonArgs.h" +#include "clang/Driver/Driver.h" +#include "clang/Driver/DriverDiagnostic.h" +#include "llvm/TargetParser/XtensaTargetParser.h" + +using namespace clang::driver; +using namespace clang::driver::tools; +// using namespace clang; +using namespace llvm::opt; + +// Get features by CPU name +static void getXtensaFeaturesFromMcpu(const Driver &D, + const llvm::opt::ArgList &Args, + const llvm::opt::Arg *A, llvm::StringRef Mcpu, + std::vector &Features) { + if (llvm::Xtensa::parseCPUKind(Mcpu) == llvm::Xtensa::CK_INVALID) { + D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args); + } else { + llvm::SmallVector CPUFeatures; + llvm::Xtensa::getCPUFeatures(Mcpu, CPUFeatures); + for (auto &F : CPUFeatures) { + Features.push_back(F); + } + } +} + +// Xtensa target features. +void xtensa::getXtensaTargetFeatures(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args, + std::vector &Features) { + if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) + getXtensaFeaturesFromMcpu(D, Args, A, A->getValue(), Features); +} diff --git a/clang/lib/Driver/ToolChains/Arch/Xtensa.h b/clang/lib/Driver/ToolChains/Arch/Xtensa.h new file mode 100644 index 0000000000000..544f452042b83 --- /dev/null +++ b/clang/lib/Driver/ToolChains/Arch/Xtensa.h @@ -0,0 +1,30 @@ +//===--- Xtensa.h - Xtensa-specific Tool Helpers -----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_XTENSA_H +#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_XTENSA_H + +#include "clang/Driver/Driver.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Option/Option.h" +// #include +#include + +namespace clang { +namespace driver { +namespace tools { +namespace xtensa { +void getXtensaTargetFeatures(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args, + std::vector &Features); +} // end namespace xtensa +} // namespace tools +} // end namespace driver +} // end namespace clang + +#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_XTENSA_H diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 069560e2b6e47..4c1d3a018fa03 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -19,11 +19,11 @@ #include "Arch/SystemZ.h" #include "Arch/VE.h" #include "Arch/X86.h" +#include "Arch/Xtensa.h" #include "HIPAMD.h" #include "Hexagon.h" #include "MSP430.h" #include "Solaris.h" -#include "Xtensa.h" #include "clang/Basic/CharInfo.h" #include "clang/Basic/CodeGenOptions.h" #include "clang/Basic/LangOptions.h" @@ -2930,16 +2930,3 @@ void tools::addMCModel(const Driver &D, const llvm::opt::ArgList &Args, } } } - -void tools::addEspMultilibsPaths(const Driver &D, const MultilibSet &Multilibs, - const Multilib &Multilib, - StringRef CPU, - StringRef InstallPath, - ToolChain::path_list &Paths) { - if (const auto &PathsCallback = Multilibs.filePathsCallback()) - for (const auto &Path : PathsCallback(Multilib)) { - SmallString<256> LibPath(D.ResourceDir); - llvm::sys::path::append(LibPath, D.getTargetTriple(), CPU, Path, "lib"); - addPathIfExists(D, LibPath, Paths); - } -} diff --git a/clang/lib/Driver/ToolChains/CommonArgs.h b/clang/lib/Driver/ToolChains/CommonArgs.h index 3bde296b43097..157ecbab069c1 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.h +++ b/clang/lib/Driver/ToolChains/CommonArgs.h @@ -227,13 +227,6 @@ void addMCModel(const Driver &D, const llvm::opt::ArgList &Args, const llvm::Triple &Triple, const llvm::Reloc::Model &RelocationModel, llvm::opt::ArgStringList &CmdArgs); - - -void addEspMultilibsPaths(const Driver &D, const MultilibSet &Multilibs, - const Multilib &Multilib, - StringRef CPU, - StringRef InstallPath, - ToolChain::path_list &Paths); } // end namespace tools } // end namespace driver } // end namespace clang diff --git a/clang/lib/Driver/ToolChains/EspBareMetal.cpp b/clang/lib/Driver/ToolChains/EspBareMetal.cpp index a3badac5dd3a7..cbfbcc4a0afbe 100644 --- a/clang/lib/Driver/ToolChains/EspBareMetal.cpp +++ b/clang/lib/Driver/ToolChains/EspBareMetal.cpp @@ -266,6 +266,9 @@ void baremetal::esp::Assembler::ConstructJob(Compilation &C, const JobAction &JA if (!A->getOption().matches(options::OPT_g0)) CmdArgs.push_back("-g"); + if (Args.getLastArg(options::OPT_mtext_section_literals)) + CmdArgs.push_back("--text-section-literals"); + if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm, false)) CmdArgs.push_back("-fverbose-asm"); diff --git a/clang/lib/Driver/ToolChains/EspBareMetal.h b/clang/lib/Driver/ToolChains/EspBareMetal.h index ea074937dbb82..08c544e34ea02 100644 --- a/clang/lib/Driver/ToolChains/EspBareMetal.h +++ b/clang/lib/Driver/ToolChains/EspBareMetal.h @@ -84,6 +84,10 @@ class LLVM_LIBRARY_VISIBILITY Assembler : public Tool { const char *LinkingOutput) const override; }; +void getXtensaTargetFeatures(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args, + std::vector &Features); + } // namespace esp } // namespace baremetal } // namespace tools diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index f24020614a625..fece1966b7837 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -1969,47 +1969,6 @@ static void findRISCVMultilibs(const Driver &D, Result.Multilibs = RISCVMultilibs; } -static void findXtensaMultilibs(const Driver &D, - const llvm::Triple &TargetTriple, - StringRef Path, const ArgList &Args, - DetectedMultilibs &Result) { - FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS()); - - StringRef cpu = Args.getLastArgValue(options::OPT_mcpu_EQ, "esp32"); - bool IsESP32 = cpu == "esp32"; - - Multilib::flags_list Flags; - - addMultilibFlag( - Args.hasFlag(options::OPT_fno_rtti, options::OPT_frtti, false), - "-fno-rtti", Flags); - - addMultilibFlag( - IsESP32 && Args.hasFlag(options::OPT_mfix_esp32_psram_cache_issue, - options::OPT_mfix_esp32_psram_cache_issue, false), - "-mfix-esp32-psram-cache-issue", Flags); - - MultilibBuilder NoRTTI = MultilibBuilder("/no-rtti").flag("-fno-rtti"); - MultilibBuilder FixPSRAM = - MultilibBuilder("/esp32-psram").flag("-mfix-esp32-psram-cache-issue"); - - MultilibSet XtensaMultilibs = MultilibSetBuilder() - .Maybe(FixPSRAM) - .Maybe(NoRTTI) - .makeMultilibSet() - .FilterOut(NonExistent); - - std::string cpu_name = cpu.str(); - XtensaMultilibs.setFilePathsCallback([cpu_name](const Multilib &M) { - return std::vector( - {M.gccSuffix(), - "/../../../../xtensa-" + cpu_name + "-elf/lib" + M.gccSuffix()}); - }); - - if (XtensaMultilibs.select(Flags, Result.SelectedMultilibs)) - Result.Multilibs = XtensaMultilibs; -} - static bool findBiarchMultilibs(const Driver &D, const llvm::Triple &TargetTriple, StringRef Path, const ArgList &Args, @@ -2647,13 +2606,6 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu", "s390x-suse-linux", "s390x-redhat-linux"}; - static const char *const XtensaLibDirs[] = {"/lib"}; - static const char *const XtensaTriples[] = {"xtensa-esp-elf", - "xtensa-esp-unknown-elf", - "xtensa-esp32-elf", - "xtensa-esp32s2-elf", - "xtensa-esp32s3-elf"}; - using std::begin; using std::end; @@ -2925,10 +2877,6 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( LibDirs.append(begin(SystemZLibDirs), end(SystemZLibDirs)); TripleAliases.append(begin(SystemZTriples), end(SystemZTriples)); break; - case llvm::Triple::xtensa: - LibDirs.append(begin(XtensaLibDirs), end(XtensaLibDirs)); - TripleAliases.append(begin(XtensaTriples), end(XtensaTriples)); - break; default: // By default, just rely on the standard lib directories and the original // triple. @@ -2963,8 +2911,6 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs( findMSP430Multilibs(D, TargetTriple, Path, Args, Detected); } else if (TargetArch == llvm::Triple::avr) { // AVR has no multilibs. - } else if (TargetArch == llvm::Triple::xtensa) { - findXtensaMultilibs(D, TargetTriple, Path, Args, Detected); } else if (!findBiarchMultilibs(D, TargetTriple, Path, Args, NeedsBiarchSuffix, Detected)) { return false; diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp index 258d142cd17ee..9d4c11ccbed2a 100644 --- a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp +++ b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp @@ -71,16 +71,6 @@ RISCVToolChain::RISCVToolChain(const Driver &D, const llvm::Triple &Triple, getProgramPaths().push_back(D.Dir); } - if (getTriple().getVendor() == llvm::Triple::Espressif) { - // TODO: need to detect multilibs when GCC installation is not available - addEspMultilibsPaths(D, Multilibs, SelectedMultilibs.back(), - Args.getLastArgValue(options::OPT_mcpu_EQ, "generic-rv32"), - D.Dir, getLibraryPaths()); - addEspMultilibsPaths(D, Multilibs, SelectedMultilibs.back(), - Args.getLastArgValue(options::OPT_mcpu_EQ, "generic-rv32"), - D.Dir, getFilePaths()); - } - getFilePaths().push_back(computeSysRoot() + "/lib"); } @@ -204,17 +194,7 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA, } if (WantCRTs) { - /* Espressif toolcahin uses newlib. crt0.o from it refers to 'main' symbol. - In 'freestanding' mode 'main' is not marked as special symbol by clang, - so when compiling C++ program with 'clang++' 'main' gets mmangled - (if not decalred as 'extern "C"' ) and linker can not resolve it. - The problem can happen, for example, when cmake checks C++ compiler by buiding simple C++ code, - unfortunately 'main' function in that code is not decalred as 'extern "C"'. */ - bool Freestanding = - Args.hasFlag(options::OPT_ffreestanding, options::OPT_fhosted, false); - if (!Freestanding || ToolChain.getTriple().getVendor() != llvm::Triple::Espressif) { - CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); - } + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin))); } @@ -238,9 +218,6 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("--start-group"); CmdArgs.push_back("-lc"); CmdArgs.push_back("-lgloss"); - if (ToolChain.getTriple().getVendor() == llvm::Triple::Espressif) { - CmdArgs.push_back("-lnosys"); - } CmdArgs.push_back("--end-group"); AddRunTimeLibs(ToolChain, ToolChain.getDriver(), CmdArgs, Args); } diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.h b/clang/lib/Driver/ToolChains/RISCVToolchain.h index 42a35ed287925..fa0aa265d842b 100644 --- a/clang/lib/Driver/ToolChains/RISCVToolchain.h +++ b/clang/lib/Driver/ToolChains/RISCVToolchain.h @@ -37,12 +37,6 @@ class LLVM_LIBRARY_VISIBILITY RISCVToolChain : public Generic_ELF { addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args) const override; - bool IsIntegratedAssemblerDefault() const override { - if (GCCInstallation.getTriple().getVendor() == llvm::Triple::Espressif) - return false; - return Generic_ELF::IsIntegratedAssemblerDefault(); - } - protected: Tool *buildLinker() const override; diff --git a/clang/lib/Driver/ToolChains/Xtensa.cpp b/clang/lib/Driver/ToolChains/Xtensa.cpp deleted file mode 100644 index 1f26e6e02ceb2..0000000000000 --- a/clang/lib/Driver/ToolChains/Xtensa.cpp +++ /dev/null @@ -1,378 +0,0 @@ -//===--- Xtensa.cpp - Xtensa ToolChain Implementations ----------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "Xtensa.h" -#include "CommonArgs.h" -#include "clang/Basic/Cuda.h" -#include "clang/Config/config.h" -#include "clang/Driver/Compilation.h" -#include "clang/Driver/Distro.h" -#include "clang/Driver/Driver.h" -#include "clang/Driver/DriverDiagnostic.h" -#include "clang/Driver/MultilibBuilder.h" -#include "clang/Driver/Options.h" -#include "llvm/Option/ArgList.h" -#include "llvm/Support/Path.h" -#include "llvm/Support/VirtualFileSystem.h" -#include "llvm/TargetParser/XtensaTargetParser.h" -#include - -using namespace clang::driver; -using namespace clang::driver::tools; -using namespace clang::driver::toolchains; -using namespace clang; -using namespace llvm::opt; - -/// Xtensa Toolchain -XtensaToolChain::XtensaToolChain(const Driver &D, const llvm::Triple &Triple, - const ArgList &Args) - : Generic_ELF(D, Triple, Args) { - - GCCInstallation.init(Triple, Args); - if (GCCInstallation.isValid()) { - for (auto *A : Args) { - std::string Str = A->getAsString(Args); - if (!Str.compare("-mlongcalls")) - A->claim(); - if (!Str.compare("-fno-tree-switch-conversion")) - A->claim(); - - // Currently don't use integrated assembler for assembler input files - if ((IsIntegratedAsm) && (Str.length() > 2)) { - std::string ExtSubStr = Str.substr(Str.length() - 2); - if (!ExtSubStr.compare(".s")) - IsIntegratedAsm = false; - if (!ExtSubStr.compare(".S")) - IsIntegratedAsm = false; - } - } - - // Currently don't use integrated assembler for assembler input files - if (IsIntegratedAsm) { - if (Args.getLastArgValue(options::OPT_x) == "assembler") - IsIntegratedAsm = false; - - if (Args.getLastArgValue(options::OPT_x) == "assembler-with-cpp") - IsIntegratedAsm = false; - } - - Multilibs = GCCInstallation.getMultilibs(); - SelectedMultilibs.assign({GCCInstallation.getMultilib()}); - - GCCLibAndIncVersion = GCCInstallation.getVersion().Text; - GCCToolchainName = GCCInstallation.getTriple().str(); - SmallString<128> Path(GCCInstallation.getParentLibPath()); - llvm::sys::path::append(Path, ".."); - GCCToolchainDir = Path.c_str(); - - SmallString<128> Libs1(GCCToolchainDir); - llvm::sys::path::append(Libs1, "lib", "gcc", GCCToolchainName, - GCCLibAndIncVersion); - if (!SelectedMultilibs.back().gccSuffix().empty()) - llvm::sys::path::append(Libs1, SelectedMultilibs.back().gccSuffix()); - getFilePaths().push_back(Libs1.c_str()); - - SmallString<128> Libs2(GCCToolchainDir); - llvm::sys::path::append(Libs2, GCCToolchainName, "lib"); - if (!SelectedMultilibs.back().gccSuffix().empty()) - llvm::sys::path::append(Libs2, SelectedMultilibs.back().gccSuffix()); - getFilePaths().push_back(Libs2.c_str()); - - ToolChain::path_list &PPaths = getProgramPaths(); - // Multilib cross-compiler GCC installations put ld in a triple-prefixed - // directory of the GCC installation parent dir. - StringRef ParentDir = llvm::sys::path::parent_path(GCCInstallation.getParentLibPath()); - - SmallString<128> PathTripleBin(ParentDir); - llvm::sys::path::append(PathTripleBin, GCCInstallation.getTriple().str()); - llvm::sys::path::append(PathTripleBin, "bin"); - PPaths.push_back(PathTripleBin.c_str()); - - SmallString<128> PathBin(ParentDir); - llvm::sys::path::append(PathBin, "bin"); - PPaths.push_back(PathBin.c_str()); - - if (!getDriver().SysRoot.empty()) { - SmallString<128> SysRoot(computeSysRoot()); - llvm::sys::path::append(SysRoot, "lib"); - getFilePaths().push_back(SysRoot.c_str()); - } - } else { - getProgramPaths().push_back(D.Dir); - SmallString<128> SysRoot(computeSysRoot()); - llvm::sys::path::append(SysRoot, "lib"); - getFilePaths().push_back(SysRoot.c_str()); - } - - if (getTriple().getVendor() == llvm::Triple::Espressif) { - StringRef CpuName = GetTargetCPUVersion(Args, Triple); - - // TODO: need to detect multilibs when GCC installation is not available - addEspMultilibsPaths(D, Multilibs, SelectedMultilibs.back(), CpuName, - D.Dir, getLibraryPaths()); - addEspMultilibsPaths(D, Multilibs, SelectedMultilibs.back(), CpuName, - D.Dir, getFilePaths()); - } -} - -Tool *XtensaToolChain::buildLinker() const { - return new tools::xtensa::Linker(*this); -} - -Tool *XtensaToolChain::buildAssembler() const { - return new tools::xtensa::Assembler(*this); -} - -void XtensaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, - ArgStringList &CC1Args) const { - if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc)) - return; - - if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) { - SmallString<128> Dir(getDriver().ResourceDir); - llvm::sys::path::append(Dir, "include"); - addSystemInclude(DriverArgs, CC1Args, Dir.str()); - } - - if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) { - if (!getDriver().SysRoot.empty()) { - SmallString<128> Dir(getDriver().SysRoot); - llvm::sys::path::append(Dir, "include"); - addSystemInclude(DriverArgs, CC1Args, Dir.str()); - } else if (GCCInstallation.isValid()) { - SmallString<128> Path1(GCCToolchainDir); - llvm::sys::path::append(Path1, GCCToolchainName, "sys-include"); - SmallString<128> Path2(GCCToolchainDir); - llvm::sys::path::append(Path2, GCCToolchainName, "include"); - - const StringRef Paths[] = {Path1, Path2}; - addSystemIncludes(DriverArgs, CC1Args, Paths); - } else { - SmallString<128> Dir(computeSysRoot()); - llvm::sys::path::append(Dir, "include"); - addSystemInclude(DriverArgs, CC1Args, Dir.str()); - } - } -} - -void XtensaToolChain::addLibStdCxxIncludePaths( - const llvm::opt::ArgList &DriverArgs, - llvm::opt::ArgStringList &CC1Args) const { - if (!GCCInstallation.isValid()) - return; - - const GCCVersion &Version = GCCInstallation.getVersion(); - StringRef TripleStr = GCCInstallation.getTriple().str(); - addLibStdCXXIncludePaths(computeSysRoot() + "/include/c++/" + Version.Text, - TripleStr, "", DriverArgs, CC1Args); -} - -std::string XtensaToolChain::computeSysRoot() const { - if (!getDriver().SysRoot.empty()) - return getDriver().SysRoot; - - SmallString<128> SysRootDir; - if (GCCInstallation.isValid()) { - StringRef LibDir = GCCInstallation.getParentLibPath(); - StringRef TripleStr = GCCInstallation.getTriple().str(); - llvm::sys::path::append(SysRootDir, LibDir, "..", TripleStr); - } else { - // Use the triple as provided to the driver. Unlike the parsed triple - // this has not been normalized to always contain every field. - llvm::sys::path::append(SysRootDir, getDriver().Dir, "..", - getDriver().getTargetTriple()); - } - - if (!llvm::sys::fs::exists(SysRootDir)) - return std::string(); - - return std::string(SysRootDir.str()); -} - -ToolChain::CXXStdlibType -XtensaToolChain::GetCXXStdlibType(const ArgList &Args) const { - Arg *A = Args.getLastArg(options::OPT_stdlib_EQ); - if (!A) - return ToolChain::CST_Libstdcxx; - - StringRef Value = A->getValue(); - if (Value != "libstdc++") - getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args); - - return ToolChain::CST_Libstdcxx; -} - -ToolChain::UnwindLibType -XtensaToolChain::GetUnwindLibType(const llvm::opt::ArgList &Args) const { - return ToolChain::UNW_None; -} - -const StringRef XtensaToolChain::GetTargetCPUVersion(const ArgList &Args, const llvm::Triple &Triple) { - StringRef CPUName; - if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) { - CPUName = A->getValue(); - } else if (Triple.getVendor() == llvm::Triple::Espressif) { - // 'esp32' is default for 'xtensa-esp-xxx' targets, - // for generic 'xtensa' target CPU should be always specified explicitly with '-mcpu' - CPUName = "esp32"; - - } - return CPUName; -} - -void tools::xtensa::Assembler::ConstructJob(Compilation &C, const JobAction &JA, - const InputInfo &Output, - const InputInfoList &Inputs, - const ArgList &Args, - const char *LinkingOutput) const { - const auto &TC = - static_cast(getToolChain()); - - claimNoWarnArgs(Args); - ArgStringList CmdArgs; - - CmdArgs.push_back("-o"); - CmdArgs.push_back(Output.getFilename()); - - CmdArgs.push_back("-c"); - - if (Args.hasArg(options::OPT_v)) - CmdArgs.push_back("-v"); - - if (Arg *A = Args.getLastArg(options::OPT_g_Group)) - if (!A->getOption().matches(options::OPT_g0)) - CmdArgs.push_back("-g"); - - if (Args.getLastArg(options::OPT_mtext_section_literals)) - CmdArgs.push_back("--text-section-literals"); - - if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm, - false)) - CmdArgs.push_back("-fverbose-asm"); - - Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); - - for (const auto &II : Inputs) - CmdArgs.push_back(II.getFilename()); - - SmallString<128> Asm(TC.GCCToolchainDir); - llvm::sys::path::append(Asm, "bin", - TC.GCCToolchainName + "-" + getShortName()); - - C.addCommand( - std::make_unique(JA, *this, ResponseFileSupport::AtFileCurCP(), - Args.MakeArgString(Asm), CmdArgs, Inputs)); -} - -void xtensa::Linker::ConstructJob(Compilation &C, const JobAction &JA, - const InputInfo &Output, - const InputInfoList &Inputs, - const ArgList &Args, - const char *LinkingOutput) const { - ArgStringList CmdArgs; - SmallString<128> Linker; - bool WantCRTs = - !Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles); - const auto &ToolChain = - static_cast(getToolChain()); - const Driver &D = ToolChain.getDriver(); - - if (!D.SysRoot.empty()) - CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); - - bool LinkerIsLLD; - std::string LinkerPath = ToolChain.GetLinkerPath(&LinkerIsLLD); - if (!LinkerIsLLD) { - if (ToolChain.GCCToolchainName != "") { - Linker.assign(ToolChain.GCCToolchainDir); - llvm::sys::path::append( - Linker, "bin", ToolChain.GCCToolchainName + "-" + getShortName()); - } else if (ToolChain.getTriple().getVendor() == llvm::Triple::Espressif) { - // ESP workaround, if there is no GCC installation we need to use xtensa-espXX-elf prefix for ld. - // so guess it basing on selected mcpu - Linker.assign(ToolChain.getDriver().Dir); - llvm::sys::path::append( - Linker, "xtensa-" + ToolChain.GetTargetCPUVersion(Args, ToolChain.getTriple()) + "-elf-" + getShortName()); - } else { - Linker.assign(LinkerPath); - } - } else { - Linker.assign(LinkerPath); - } - - const char *crtbegin, *crtend; - auto RuntimeLib = ToolChain.GetRuntimeLibType(Args); - if (RuntimeLib == ToolChain::RLT_Libgcc) { - crtbegin = "crtbegin.o"; - crtend = "crtend.o"; - } else { - assert(RuntimeLib == ToolChain::RLT_CompilerRT); - crtbegin = ToolChain.getCompilerRTArgString(Args, "crtbegin", - ToolChain::FT_Object); - crtend = - ToolChain.getCompilerRTArgString(Args, "crtend", ToolChain::FT_Object); - } - - if (WantCRTs) { - // TODO: The crt0.o is not used for esp targets, but maybe used in - // future for other vendors - if (ToolChain.getTriple().getVendor() != llvm::Triple::Espressif) - CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); - CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin))); - } - - AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA); - - Args.AddAllArgs(CmdArgs, options::OPT_L); - ToolChain.AddFilePathLibArgs(Args, CmdArgs); - Args.addAllArgs(CmdArgs, - {options::OPT_T_Group, options::OPT_e, options::OPT_s, - options::OPT_t, options::OPT_u_Group}); - - if (!Args.hasArg(options::OPT_nostdlib) && - !Args.hasArg(options::OPT_nodefaultlibs)) { - if (ToolChain.ShouldLinkCXXStdlib(Args)) - ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs); - AddRunTimeLibs(ToolChain, ToolChain.getDriver(), CmdArgs, Args); - } - - if (WantCRTs) - CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend))); - - CmdArgs.push_back("-o"); - CmdArgs.push_back(Output.getFilename()); - C.addCommand( - std::make_unique(JA, *this, ResponseFileSupport::AtFileCurCP(), - Args.MakeArgString(Linker), CmdArgs, Inputs)); -} - -// Get features by CPU name -static void getXtensaFeaturesFromMcpu(const Driver &D, - const llvm::opt::ArgList &Args, - const llvm::opt::Arg *A, StringRef Mcpu, - std::vector &Features) { - if (llvm::Xtensa::parseCPUKind(Mcpu) == llvm::Xtensa::CK_INVALID) { - D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args); - } else { - SmallVector CPUFeatures; - llvm::Xtensa::getCPUFeatures(Mcpu, CPUFeatures); - for (auto &F : CPUFeatures) { - Features.push_back(F); - } - } -} - -// Xtensa target features. -void xtensa::getXtensaTargetFeatures(const Driver &D, const llvm::Triple &Triple, - const llvm::opt::ArgList &Args, - std::vector &Features) { - if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) - getXtensaFeaturesFromMcpu(D, Args, A, A->getValue(), Features); -} diff --git a/clang/lib/Driver/ToolChains/Xtensa.h b/clang/lib/Driver/ToolChains/Xtensa.h deleted file mode 100644 index d7b68e4c10782..0000000000000 --- a/clang/lib/Driver/ToolChains/Xtensa.h +++ /dev/null @@ -1,91 +0,0 @@ -//===--- Xtensa.h - Xtensa Tool and ToolChain Implementations ---*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Xtensa_H -#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Xtensa_H - -#include "Gnu.h" -#include "clang/Driver/InputInfo.h" -#include "clang/Driver/Tool.h" -#include "clang/Driver/ToolChain.h" - -namespace clang { -namespace driver { -namespace toolchains { - -class LLVM_LIBRARY_VISIBILITY XtensaToolChain : public Generic_ELF { -protected: - Tool *buildLinker() const override; - Tool *buildAssembler() const override; - -public: - XtensaToolChain(const Driver &D, const llvm::Triple &Triple, - const llvm::opt::ArgList &Args); - void - AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, - llvm::opt::ArgStringList &CC1Args) const override; - void - addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs, - llvm::opt::ArgStringList &CC1Args) const override; - CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override; - UnwindLibType GetUnwindLibType(const llvm::opt::ArgList &Args) const override; - - bool IsIntegratedAssemblerDefault() const override { - return (IsIntegratedAsm || (GCCToolchainName == "")); - } - - static const StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args, const llvm::Triple &Triple); - - bool IsIntegratedAsm = true; - std::string GCCLibAndIncVersion = ""; - std::string GCCToolchainName = ""; - std::string GCCToolchainDir = ""; - -private: - std::string computeSysRoot() const override; -}; - -} // end namespace toolchains - -namespace tools { -namespace xtensa { -class LLVM_LIBRARY_VISIBILITY Linker : public Tool { -public: - Linker(const ToolChain &TC) - : Tool("Xtensa::Linker", "ld", TC) {} - bool hasIntegratedCPP() const override { return false; } - bool isLinkJob() const override { return true; } - void ConstructJob(Compilation &C, const JobAction &JA, - const InputInfo &Output, const InputInfoList &Inputs, - const llvm::opt::ArgList &TCArgs, - const char *LinkingOutput) const override; -}; - -class LLVM_LIBRARY_VISIBILITY Assembler : public Tool { -public: - Assembler(const ToolChain &TC) - : Tool("Xtensa::Assembler", "as", TC) {} - - bool hasIntegratedCPP() const override { return false; } - void ConstructJob(Compilation &C, const JobAction &JA, - const InputInfo &Output, const InputInfoList &Inputs, - const llvm::opt::ArgList &TCArgs, - const char *LinkingOutput) const override; -}; - -void getXtensaTargetFeatures(const Driver &D, const llvm::Triple &Triple, - const llvm::opt::ArgList &Args, - std::vector &Features); -} // end namespace xtensa -} // end namespace tools -} // end namespace driver -} // end namespace clang - -#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Xtensa_H From 926e3f15aff3bd6b3dd58e0e1b44f1a82f58433b Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 22 Feb 2024 12:54:13 +0300 Subject: [PATCH 221/289] [Clang] Fix undefined std::errc for MinGW build --- clang/lib/Interpreter/IncrementalParser.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/lib/Interpreter/IncrementalParser.cpp b/clang/lib/Interpreter/IncrementalParser.cpp index b7c809c45098c..ee22b60df1106 100644 --- a/clang/lib/Interpreter/IncrementalParser.cpp +++ b/clang/lib/Interpreter/IncrementalParser.cpp @@ -126,7 +126,7 @@ class IncrementalAction : public WrapperFrontendAction { switch (CI.getFrontendOpts().ProgramAction) { default: Err = llvm::createStringError( - std::errc::state_not_recoverable, + std::errc::not_supported, "Driver initialization failed. " "Incremental mode for action %d is not supported", CI.getFrontendOpts().ProgramAction); From 6b10f0ad27c6ab4e2bf76456f45ca63b0ac4f512 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 2 Apr 2024 02:46:46 +0300 Subject: [PATCH 222/289] [LLD] Disable x86_64 specific test if target is no enabled --- lld/test/ELF/lto/resolution-err.ll | 1 + 1 file changed, 1 insertion(+) diff --git a/lld/test/ELF/lto/resolution-err.ll b/lld/test/ELF/lto/resolution-err.ll index f9855abaff327..452d942c72d81 100644 --- a/lld/test/ELF/lto/resolution-err.ll +++ b/lld/test/ELF/lto/resolution-err.ll @@ -1,4 +1,5 @@ ; UNSUPPORTED: system-windows +; REQUIRES: target=x86_64-unknown-linux-gnu ; REQUIRES: shell, non-root-user ; RUN: llvm-as %s -o %t.bc ; RUN: touch %t.resolution.txt From 175f796a6ec6e18328673821c097e82f040473d5 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 19 Oct 2023 18:36:22 +0300 Subject: [PATCH 223/289] esp/ci: Use CMake-based build scripts --- .gitlab-ci.yml | 547 +++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 462 insertions(+), 85 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4e11284b1b001..09891b869dfe0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,114 +1,491 @@ stages: + - test_build - build - pack - - macos_codesign + - sign - private_deploy - test - public_deploy + - update_idf_tools image: ${CI_DOCKER_REGISTRY}/llvm-build:4 variables: - - # move all these to CI/CD settings - REL_SFX: "llvm" - CLANG_VER: "17" - GCC_REL_VER: "12.2.0_20230208" - NEWLIB_REF: "esp-4.1.0_20230425" - BINUTILS_REF: "esp-2.39.0_20230208" - XTENSA_OVERLAYS_REF: "master" - LLVM_GCC_TESTSUITE_REF: "esp-16.0.0-20230425" - XTENSA_CLANG_TOOLCHAIN_REF: "esp-16.0.0-20230516" - + ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "master" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 - PLATFORM_NAME_LINUX: "linux-amd64" - PLATFORM_NAME_LINUX_ARMHF: "linux-armhf" - PLATFORM_NAME_LINUX_ARM64: "linux-arm64" - PLATFORM_NAME_WIN: "win64" - PLATFORM_NAME_MACOS: "macos" - PLATFORM_NAME_MACOS_ARM64: "macos-arm64" - - GCC_PLATFORM_NAME_LINUX: "i686-linux-gnu" - GCC_PLATFORM_NAME_LINUX_ARMHF: "arm-linux-gnueabihf" - GCC_PLATFORM_NAME_LINUX_ARM64: "aarch64-linux-gnu" - GCC_PLATFORM_NAME_WIN: "i686-w64-mingw32" - GCC_PLATFORM_NAME_MACOS: "x86_64-apple-darwin" - GCC_PLATFORM_NAME_MACOS_ARM64: "aarch64-apple-darwin" - - ARCHIVE_TOOL_LINUX: "tar -cJf" - UNARCHIVE_TOOL_LINUX: "tar -xf" - ARCHIVE_EXT_LINUX: "tar.xz" - - ARCHIVE_TOOL_WIN: "zip -9 -r" - UNARCHIVE_TOOL_WIN: "unzip" - ARCHIVE_EXT_WIN: "zip" - - PACK_ARCHIVE_TOOL_WIN: "tar -h -cJf" - PACK_UNARCHIVE_TOOL_WIN: "${UNARCHIVE_TOOL_LINUX}" - PACK_ARCHIVE_EXT_WIN: "${ARCHIVE_EXT_LINUX}" - - ARCHIVE_TOOL_MACOS: "tar -cJf" - UNARCHIVE_TOOL_MACOS: "tar -xf" - ARCHIVE_EXT_MACOS: "tar.xz" - - ARCHIVE_TOOL_NEWLIB: ${ARCHIVE_TOOL_LINUX} - UNARCHIVE_TOOL_NEWLIB: ${UNARCHIVE_TOOL_LINUX} - ARCHIVE_EXT_NEWLIB: ${ARCHIVE_EXT_LINUX} - - ARCHIVE_TOOL_COMPILER_RT: ${ARCHIVE_TOOL_LINUX} - UNARCHIVE_TOOL_COMPILER_RT: ${UNARCHIVE_TOOL_LINUX} - ARCHIVE_EXT_COMPILER_RT: ${ARCHIVE_EXT_LINUX} - - LIBS_ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - LIBS_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - LIBS_ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - + CROSS_WIN_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-win:1 DIST_DIR: "dist" BUILD_DIR: "build" - DOWNLOADS_DIR: "downloads" -########################################################################### -#################### START OF TEMPORARY LEGACY CODE ####################### -# TODO: the code below is to be removed after migration to new build script - CONF_TARGET: "xtensa-esp32-elf" - XTENSA_CLANG_TOOLCHAIN: "${CONF_TARGET}-clang" -##################### END OF TEMPORARY LEGACY CODE ######################## -########################################################################### - -.use_ci_tools_snippet: &use_ci_tools_snippet | +.use_ci_tools: &use_ci_tools | curl -sSL ${CIT_LOADER_URL} -o cit_loader.sh && sh cit_loader.sh source citools/import_functions -.use_ci_tools: +.add_gitlab_key: &add_gitlab_key | + cit_add_ssh_key "${GITLAB_KEY}" + +.get_toolchain_build_scripts: &get_toolchain_build_scripts | + git clone -b ${ESP_LLVM_EMBEDDED_TOOLCHAIN_REF} ${GITLAB_SSH_SERVER}/${ESP_LLVM_EMBEDDED_TOOLCHAIN_REPO}.git + + +before_script: + - *use_ci_tools + - *add_gitlab_key + +.build_template: + tags: [ "amd64", "build" ] + artifacts: + paths: + - ${DIST_DIR}/ + - ${BUILD_DIR}/*.log + when: always + expire_in: 1 day + variables: + USE_LINKER: "ld" + CROSS_BUILD_MINGW: "OFF" + SKIP_TESTS: "ON" + PACK_DISTRO: "ON" + after_script: + # help to identify that build failed due to OOM + - > + if [ $CI_JOB_STATUS == 'failed' ]; then + [ ! -f "${BUILD_DIR}/build.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/build.log || true + [ ! -f "${BUILD_DIR}/tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/tests.log || true + [ ! -f "${BUILD_DIR}/lld-tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/lld-tests.log || true + fi script: - - *use_ci_tools_snippet + - *get_toolchain_build_scripts + - LLVM_PROJECT_PATH=$PWD + - BUILD_PATH=$PWD/${BUILD_DIR} + - INST_PATH=$PWD/_install_dir + - mkdir -p ${BUILD_PATH} + - BUILD_HOST=$(gcc -dumpmachine) + # Build target libraries once when doing native build + - > + if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then + echo "Enable target libraries build" + export USE_LIBC="newlib"; + export USE_LIBCXX="libstdcxx"; + export USE_RTLIB="compiler-rt;libgcc"; + else + echo "Disable target libraries for cross-build" + export USE_LIBC=none; + export USE_LIBCXX=""; + export USE_RTLIB=""; + fi + # build toolchain core w/o any libs and GNU components + - cmake $PWD/esp-llvm-embedded-toolchain -GNinja + -DFETCHCONTENT_SOURCE_DIR_LLVMPROJECT=${LLVM_PROJECT_PATH} + -DNEWLIB_REPO_URL="${GITLAB_SSH_SERVER}/${NEWLIB_REPO_PATH}.git" + -DBINUTILS_REPO_URL="${GITLAB_SSH_SERVER}/${BINUTILS_REPO_PATH}.git" + -DXTENSA_OVERLAYS_REPO_URL="${GITLAB_SSH_SERVER}/${XTENSA_OVERLAYS_REPO_PATH}.git" + -DFETCHCONTENT_QUIET=OFF + -DESP_GNU_TOOLCHAIN_VER="13.2.0_20230928" + -DLLVM_TOOLCHAIN_CROSS_BUILD_MINGW=${CROSS_BUILD_MINGW} + -DUSE_LIBC=${USE_LIBC} + -DUSE_LIBCXX=${USE_LIBCXX} + -DUSE_RTLIB=${USE_RTLIB} + -DUSE_BINUTILS=ON + -DESP_TOOLCHAIN=ON + -DHOST_TRIPLE=${CONF_HOST} + -DLLVM_TOOLCHAIN_ENABLED_TARGETS="${TARGET}" + -DLLVM_USE_LINKER=${USE_LINKER} + -DLLVM_PARALLEL_LINK_JOBS=2 + -DLLVM_PARALLEL_COMPILE_JOBS=2 + -DCLANG_REPOSITORY_STRING="${GH_REPO_HTTPS}" + -DCPACK_ARCHIVE_THREADS=0 + -B ${BUILD_PATH} 2>&1 + --install-prefix=$INST_PATH > ${BUILD_PATH}/build.log + # Do not run unit tests for cross-builds. + # Run as non-root user because permission tests fail when run by root. + - > + if [[ "${CONF_HOST}" == "${BUILD_HOST}" && "${SKIP_TESTS}" != "ON" ]]; then + echo "Run LLVM/Clang unit tests"; + export CUR_USER=$(whoami); + useradd -m test_runner; + chown -R test_runner ${BUILD_PATH}; + touch ${BUILD_PATH}/tests.log; + chmod o+w ${BUILD_PATH}/tests.log; + runuser -u test_runner -- ninja -C ${BUILD_PATH} check-all 2>&1 > ${BUILD_PATH}/tests.log; + echo "Run Compiler-RT unit tests"; + touch ${BUILD_PATH}/compiler-rt-tests.log; + chmod o+w ${BUILD_PATH}/compiler-rt-tests.log; + runuser -u test_runner -- ninja -C ${BUILD_PATH} check-compiler-rt 2>&1 > ${BUILD_PATH}/compiler-rt-tests.log; + echo "Run LLD unit tests"; + touch ${BUILD_PATH}/lld-tests.log; + chmod o+w ${BUILD_PATH}/lld-tests.log; + runuser -u test_runner -- ninja -C ${BUILD_PATH} check-lld 2>&1 > ${BUILD_PATH}/lld-tests.log; + chown -R ${CUR_USER} ${BUILD_PATH}; + fi + - if [ "${PACK_DISTRO}" == "OFF" ]; then exit 0; fi + # pack distro + - mkdir -p ${PWD}/${DIST_DIR} + - ninja -C ${BUILD_PATH} package-llvm-toolchain 2>&1 >> ${BUILD_PATH}/build.log + - DISTRO_PACK_PATH=$(ninja -C ${BUILD_PATH} print-llvm-toolchain-package-path | tail -n 1) + - echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" + - mv ${DISTRO_PACK_PATH} ${PWD}/${DIST_DIR}/ + - ARCHIVE_NAME=$(basename ${DISTRO_PACK_PATH}) + - echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/dist_name_${CONF_HOST}_${TARGET} + # pack distro with standalone libs + - ninja -C ${BUILD_PATH} package-llvm-standalone-libs 2>&1 >> ${BUILD_PATH}/build.log + - DISTRO_PACK_PATH=$(ninja -C ${BUILD_PATH} print-llvm-standalone-libs-package-path | tail -n 1) + - echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" + - mv ${DISTRO_PACK_PATH} ${PWD}/${DIST_DIR}/ + - ARCHIVE_NAME=$(basename ${DISTRO_PACK_PATH}) + - echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/dist_name_libs_${CONF_HOST}_${TARGET} + # pack target libraries to be re-used in distros for other platforms + - > + if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then + ninja -C ${BUILD_PATH} package-llvm-toolchain-target-libs 2>&1 >> ${BUILD_PATH}/build.log + DISTRO_PACK_PATH=$(ninja -C ${BUILD_PATH} print-llvm-toolchain-target-libs-package-path | tail -n 1) + echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" + mv ${DISTRO_PACK_PATH} ${PWD}/${DIST_DIR}/ + ARCHIVE_NAME=$(basename ${DISTRO_PACK_PATH}) + echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/target_libs_arch_name + fi -.add_gitlab_key_snippet: &add_gitlab_key_snippet | - cit_add_ssh_key "${GITLAB_KEY}" +build_and_test: + extends: .build_template + stage: test_build + variables: + TARGET: "Xtensa;RISCV" + SKIP_TESTS: "OFF" + PACK_DISTRO: "OFF" + CONF_HOST: "x86_64-linux-gnu" + USE_LINKER: "gold" + +.build_toolchain_template: + extends: .build_template + stage: build + parallel: + matrix: + - TARGET: + - Xtensa + - RISCV + +.build_linux-gnu_template: + extends: .build_toolchain_template + variables: + USE_LINKER: "gold" + +build_x86_64-linux-gnu: + extends: .build_linux-gnu_template + variables: + CONF_HOST: "x86_64-linux-gnu" + +build_x86_64-w64-mingw32: + extends: .build_toolchain_template + image: ${CROSS_WIN_IMAGE} + variables: + USE_LINKER: "" + CONF_HOST: "x86_64-w64-mingw32" + CROSS_BUILD_MINGW: "ON" + +build_arm-linux-gnueabihf: + extends: .build_linux-gnu_template + image: ${CROSS_ARM_IMAGE} + variables: + CONF_HOST: "arm-linux-gnueabihf" + +build_aarch64-linux-gnu: + extends: .build_linux-gnu_template + image: ${CROSS_ARM_IMAGE} + variables: + CONF_HOST: "aarch64-linux-gnu" + +build_x86_64-apple-darwin: + extends: .build_toolchain_template + variables: + CONF_HOST: "x86_64-apple-darwin21.1" + +build_aarch64-apple-darwin: + extends: .build_toolchain_template + variables: + CONF_HOST: "aarch64-apple-darwin21.1" -.add_gitlab_key: +.pack_x86_64-linux-gnu_template: + stage: pack + tags: [ "amd64", "build" ] + artifacts: + paths: + - ${DIST_DIR}/ + when: always + expire_in: 1 day script: - - *add_gitlab_key_snippet + - pushd ${DIST_DIR} + - ls -l + - TARGET_LIBS_PACK_FILE=$(cat target_libs_arch_name) + - rm -f target_libs_arch_name ${TARGET_LIBS_PACK_FILE} + - ls -l -# LLVM Build System used the remote address to show detailed version info, we'll change it to the public repository -.fix_origin_remote_for_public_snippet: &fix_origin_remote_for_public_snippet | - git remote set-url origin "${GH_REPO_HTTPS}" +pack_x86_64-linux-gnu_riscv: + extends: .pack_x86_64-linux-gnu_template + needs: + - job: "build_x86_64-linux-gnu: [RISCV]" -.fix_origin_remote_for_public: +pack_x86_64-linux-gnu_xtensa: + extends: .pack_x86_64-linux-gnu_template + needs: + - job: "build_x86_64-linux-gnu: [Xtensa]" + +.pack_template: + stage: pack + tags: [ "amd64", "build" ] + artifacts: + paths: + - ${DIST_DIR}/ + when: always + expire_in: 1 day + variables: + PACK_TOOL: "tar cJf" + UNPACK_TOOL: "tar xJf" + script: + - *get_toolchain_build_scripts + # update distro + - pushd ${DIST_DIR} + - ls -l + - DISTRO_PACK_FILE=$(cat dist_name_${CONF_HOST}_${TARGET}) + - echo "DISTRO_PACK_FILE=${DISTRO_PACK_FILE}" + - ${UNPACK_TOOL} ${DISTRO_PACK_FILE} + - DISTRO_PACK_DIR=$(tar tJf ${DISTRO_PACK_FILE} | sed -e 's@/.*@@' | uniq) + - ls -l $PWD/${DISTRO_PACK_DIR}/lib/clang-runtimes/ + - echo "DISTRO_PACK_DIR=${DISTRO_PACK_DIR}" + - rm -f ${DISTRO_PACK_FILE} + - TARGET_LIBS_PACK_FILE=$(cat target_libs_arch_name) + - rm -f target_libs_arch_name + - echo "TARGET_LIBS_PACK_FILE=${TARGET_LIBS_PACK_FILE}" + - tar xJfv ${TARGET_LIBS_PACK_FILE} + - rm -f ${TARGET_LIBS_PACK_FILE} + - ls -l $PWD + - ls -l $PWD/${DISTRO_PACK_DIR} + - ls -l $PWD/${DISTRO_PACK_DIR}/lib/clang-runtimes/ + # both distro and target libs archives have the same root dir name, + # so that dir contains everything we need to re-pack after unpacking steps above + - ${PACK_TOOL} ${DISTRO_PACK_FILE} ${DISTRO_PACK_DIR} + - rm -rf ${DISTRO_PACK_DIR} + # remove x86_64-linux-gnu artifacts + - > + if [ "${CONF_HOST}" != "x86_64-linux-gnu" ]; then + DISTRO_PACK_FILE=$(cat dist_name_x86_64-linux-gnu_${TARGET}) + rm -f ${DISTRO_PACK_FILE} dist_name_x86_64-linux-gnu_${TARGET} + DISTRO_PACK_FILE=$(cat dist_name_libs_x86_64-linux-gnu_${TARGET}) + rm -f ${DISTRO_PACK_FILE} dist_name_libs_x86_64-linux-gnu_${TARGET} + fi + - ls -l + +pack_x86_64-w64-mingw32_riscv: + extends: .pack_template + needs: + # needs target libs archive from native build job + - job: "build_x86_64-linux-gnu: [RISCV]" + - job: "build_x86_64-w64-mingw32: [RISCV]" + variables: + CONF_HOST: "x86_64-w64-mingw32" + TARGET: "RISCV" + +pack_x86_64-w64-mingw32_xtensa: + extends: .pack_template + needs: + # needs target libs archive from native build job + - job: "build_x86_64-linux-gnu: [Xtensa]" + - job: "build_x86_64-w64-mingw32: [Xtensa]" + variables: + CONF_HOST: "x86_64-w64-mingw32" + TARGET: "Xtensa" + +pack_arm-linux-gnueabihf_riscv: + extends: .pack_template + needs: + # needs target libs archive from native build job + - job: "build_x86_64-linux-gnu: [RISCV]" + - job: "build_arm-linux-gnueabihf: [RISCV]" + variables: + CONF_HOST: "arm-linux-gnueabihf" + TARGET: "RISCV" + +pack_arm-linux-gnueabihf_xtensa: + extends: .pack_template + needs: + # needs target libs archive from native build job + - job: "build_x86_64-linux-gnu: [Xtensa]" + - job: "build_arm-linux-gnueabihf: [Xtensa]" + variables: + CONF_HOST: "arm-linux-gnueabihf" + TARGET: "Xtensa" + +pack_aarch64-linux-gnu_riscv: + extends: .pack_template + needs: + # needs target libs archive from native build job + - job: "build_x86_64-linux-gnu: [RISCV]" + - job: "build_aarch64-linux-gnu: [RISCV]" + variables: + CONF_HOST: "aarch64-linux-gnu" + TARGET: "RISCV" + +pack_aarch64-linux-gnu_xtensa: + extends: .pack_template + needs: + # needs target libs archive from native build job + - job: "build_x86_64-linux-gnu: [Xtensa]" + - job: "build_aarch64-linux-gnu: [Xtensa]" + variables: + CONF_HOST: "aarch64-linux-gnu" + TARGET: "Xtensa" + +pack_x86_64-apple-darwin_riscv: + extends: .pack_template + needs: + # needs target libs archive from native build job + - job: "build_x86_64-linux-gnu: [RISCV]" + - job: "build_x86_64-apple-darwin: [RISCV]" + variables: + CONF_HOST: "x86_64-apple-darwin21.1" + TARGET: "RISCV" + +pack_x86_64-apple-darwin_xtensa: + extends: .pack_template + needs: + # needs target libs archive from native build job + - job: "build_x86_64-linux-gnu: [Xtensa]" + - job: "build_x86_64-apple-darwin: [Xtensa]" + variables: + CONF_HOST: "x86_64-apple-darwin21.1" + TARGET: "Xtensa" + +pack_aarch64-apple-darwin_riscv: + extends: .pack_template + needs: + # needs target libs archive from native build job + - job: "build_x86_64-linux-gnu: [RISCV]" + - job: "build_aarch64-apple-darwin: [RISCV]" + variables: + CONF_HOST: "aarch64-apple-darwin21.1" + TARGET: "RISCV" + +pack_aarch64-apple-darwin_xtensa: + extends: .pack_template + needs: + # needs target libs archive from native build job + - job: "build_x86_64-linux-gnu: [Xtensa]" + - job: "build_aarch64-apple-darwin: [Xtensa]" + variables: + CONF_HOST: "aarch64-apple-darwin21.1" + TARGET: "Xtensa" + +.macos_codesign_template: + stage: sign + when: on_success + resource_group: macos_codesign + tags: [ "darwin", "codesign" ] + artifacts: + paths: + - ${DIST_ART_DIR} + variables: + # directory with distro archives + DIST_ART_DIR: ${DIST_DIR} + # command to archive distro + ARCHIVE_TOOL: "tar cJf" + # command to unarchive distro + UNARCHIVE_TOOL: "tar xJf" + # URL to macos codesign repo + NOTARIZATION_SCRIPTS_GIT: "${CI_SERVER_PROTOCOL}://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/macos_codesign_notarization.git" script: - - *fix_origin_remote_for_public_snippet + - git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} -b ${CI_COMMIT_REF_NAME} || + git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} + - ./macos_codesign_notarization/run.sh + +sign_x86_64-apple-darwin_riscv: + extends: .macos_codesign_template + needs: + - pack_x86_64-apple-darwin_riscv -.get_clang_toolchain_build_scripts_snippet: &get_clang_toolchain_build_scripts_snippet | - git clone -b ${XTENSA_CLANG_TOOLCHAIN_REF} ${GITLAB_SSH_SERVER}/${XTENSA_CLANG_TOOLCHAIN_REPO} - cp -r xtensa-clang-toolchain/* . +sign_x86_64-apple-darwin_xtensa: + extends: .macos_codesign_template + needs: + - pack_x86_64-apple-darwin_xtensa -.get_clang_toolchain_build_scripts: +sign_aarch64-apple-darwin_riscv: + extends: .macos_codesign_template + needs: + - pack_aarch64-apple-darwin_riscv + +sign_aarch64-apple-darwin_xtensa: + extends: .macos_codesign_template + needs: + - pack_aarch64-apple-darwin_xtensa + +upload_to_http: + stage: private_deploy + when: manual + allow_failure: true + tags: [ "deploy", "shiny" ] + variables: + # force the fetch strategy to clean old archives up in dist/ dir + GIT_STRATEGY: fetch + needs: + - job: pack_x86_64-linux-gnu_riscv + - job: pack_x86_64-linux-gnu_xtensa script: - - *get_clang_toolchain_build_scripts_snippet + - cit_add_ssh_key "${HTTP_UPLOAD_KEY}" + # List of archives + - FILES=$(find ${DIST_DIR} -name dist_name_\* -exec cat {} \+) + - cd ${DIST_DIR} + - ls -l $FILES + - scp ${FILES} ${HTTP_UPLOAD_DIR}/ct-ng/llvm-builds + # Show info + - echo -e "\nArchives were published there:\n\n$(for n in ${FILES}; do echo "${HTTP_PUBLIC_DIR}/ct-ng/llvm-builds/${n}"; done)\n" -before_script: - - !reference [.use_ci_tools, script] - - !reference [.add_gitlab_key, script] +upload_to_github: + stage: public_deploy + when: manual + allow_failure: true + only: + - tags + tags: [ "amd64", "internet" ] + image: espressif/github-hub:2 + variables: + GIT_STRATEGY: fetch + GITHUB_TOKEN: "${GH_TOKEN}" + GITHUB_REPO: "${GH_REPO_HTTPS}" + TAG: "${CI_COMMIT_TAG}" + needs: + - job: pack_x86_64-linux-gnu_riscv + - job: pack_x86_64-linux-gnu_xtensa + - job: pack_arm-linux-gnueabihf_riscv + - job: pack_arm-linux-gnueabihf_xtensa + - job: pack_aarch64-linux-gnu_riscv + - job: pack_aarch64-linux-gnu_xtensa + - job: pack_x86_64-w64-mingw32_riscv + - job: pack_x86_64-w64-mingw32_xtensa + - job: sign_x86_64-apple-darwin_riscv + - job: sign_x86_64-apple-darwin_xtensa + - job: sign_aarch64-apple-darwin_riscv + - job: sign_aarch64-apple-darwin_xtensa + before_script: [] + script: + - ls -l dist*/ + - git remote add github ${GH_REPO_HTTPS} + - hub release show ${TAG} || { echo "Please create a release on GitHub with ${TAG} tag at first"; exit 1; } + # List of archives + - FILES=$(find ${DIST_DIR} -name dist_name_\* -exec cat {} \+) + - cd ${DIST_DIR} + - ls -l $FILES + # Upload archives + - for n in ${FILES}; do hub release edit -m "" -a "${n}" "${TAG}"; done -include: - - local: .universal-toolchain-release.yml +update_idf_tools: + stage: update_idf_tools + when: manual + allow_failure: true + only: + - tags + variables: + TOOL_NAME: openocd + TOOL_MEMBERS: openocd-esp32 + TOOL_VERSION: ${CI_COMMIT_TAG} + TOOL_SHA256_URL: https://github.com/espressif/openocd-esp32/releases/download/${CI_COMMIT_TAG}/openocd-esp32-${CI_COMMIT_TAG}-checksum.sha256 + RN_SECTION: Toolchain + trigger: + project: idf/idf-tools-updater + strategy: depend From 70ad0c89fc0cfeb52064cde161de0d05d51234e6 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Thu, 7 Mar 2024 11:00:06 +0300 Subject: [PATCH 224/289] [Xtensa] Fix disassembler. Fix disassembling of the Imm8_sh8, Imm64n_4n, Offset8m32, Entry_Imm12 immedaite operands. --- .../Xtensa/Disassembler/XtensaDisassembler.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 64ce0e4ab9b4b..81c02b5de4652 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -562,8 +562,8 @@ static DecodeStatus decodeImm8Operand(MCInst &Inst, uint64_t Imm, static DecodeStatus decodeImm8_sh8Operand(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { - assert(isUInt<8>(Imm) && "Invalid immediate"); - Inst.addOperand(MCOperand::createImm(SignExtend64<16>(Imm << 8))); + assert(isUInt<16>(Imm) && ((Imm & 0xff) == 0) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(SignExtend64<16>(Imm))); return MCDisassembler::Success; } @@ -630,24 +630,24 @@ static DecodeStatus decodeImm8n_7Operand(MCInst &Inst, uint64_t Imm, static DecodeStatus decodeImm64n_4nOperand(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { - assert(isUInt<4>(Imm) && "Invalid immediate"); - Inst.addOperand(MCOperand::createImm((~0x3f) | (Imm << 2))); + assert(isUInt<6>(Imm) && ((Imm & 0x3) == 0) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm((~0x3f) | (Imm))); return MCDisassembler::Success; } static DecodeStatus decodeOffset8m32Operand(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { - assert(isUInt<8>(Imm) && "Invalid immediate"); - Inst.addOperand(MCOperand::createImm(Imm << 2)); + assert(isUInt<10>(Imm) && ((Imm & 0x3) == 0) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); return MCDisassembler::Success; } static DecodeStatus decodeEntry_Imm12OpValue(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { - assert(isUInt<12>(Imm) && "Invalid immediate"); - Inst.addOperand(MCOperand::createImm(Imm << 3)); + assert(isUInt<15>(Imm) && ((Imm & 0x7) == 0) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); return MCDisassembler::Success; } From 5299c925d3a989bd5cb1f5532f37a28c2d80317f Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 15 Mar 2024 12:32:03 +0300 Subject: [PATCH 225/289] [Xtensa] Add predefined macros for core configuration --- clang/lib/Basic/Targets.cpp | 2 + clang/lib/Basic/Targets/Xtensa.cpp | 103 ++++++++++++++++++++++++++++- clang/lib/Basic/Targets/Xtensa.h | 83 ++++++++++++++++++++++- 3 files changed, 186 insertions(+), 2 deletions(-) diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp index 7702bc27a6ba1..5b7a1e17b085c 100644 --- a/clang/lib/Basic/Targets.cpp +++ b/clang/lib/Basic/Targets.cpp @@ -755,6 +755,8 @@ std::unique_ptr AllocateTarget(const llvm::Triple &Triple, } case llvm::Triple::xtensa : + if (Triple.getVendor() == llvm::Triple::Espressif) + return std::make_unique(Triple, Opts); return std::make_unique(Triple, Opts); } } diff --git a/clang/lib/Basic/Targets/Xtensa.cpp b/clang/lib/Basic/Targets/Xtensa.cpp index 7b5346062bccb..c99c25433df42 100644 --- a/clang/lib/Basic/Targets/Xtensa.cpp +++ b/clang/lib/Basic/Targets/Xtensa.cpp @@ -38,13 +38,36 @@ void XtensaTargetInfo::getTargetDefines(const LangOptions &Opts, Builder.defineMacro("__ELF__"); Builder.defineMacro("__xtensa__"); Builder.defineMacro("__XTENSA__"); - Builder.defineMacro("__XTENSA_EL__"); + if (BigEndian) + Builder.defineMacro("__XTENSA_EB__"); + else + Builder.defineMacro("__XTENSA_EL__"); if (HasWindowed) Builder.defineMacro("__XTENSA_WINDOWED_ABI__"); else Builder.defineMacro("__XTENSA_CALL0_ABI__"); if (!HasFP) Builder.defineMacro("__XTENSA_SOFT_FLOAT__"); + Builder.defineMacro("__XCHAL_HAVE_BE", BigEndian ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_DENSITY", HasDensity ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_MAC16", HasMAC16 ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_MUL32", HasMul32 ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_MUL32_HIGH", HasMul32High ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_DIV32", HasDiv32 ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_NSA", HasNSA ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_MINMAX", HasMINMAX ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_SEXT", HasSEXT ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_LOOPS", HasLoop ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_THREADPTR", HasTHREADPTR ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_S32C1I", HasS32C1I ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_BOOLEANS", HasBoolean ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_FP", HasFP ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_DFP_ACCEL", HasDFP ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_WINDOWED", HasWindowed ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_DEBUG", HasDebug ? "1" : "0"); + // XSHAL_ABI + // XTHAL_ABI_WINDOWED + // XTHAL_ABI_CALL0 } void XtensaTargetInfo::fillValidCPUList( @@ -79,6 +102,32 @@ bool XtensaTargetInfo::hasFeature(StringRef Feature) const { .Case("windowed", HasWindowed) .Case("bool", HasBoolean) .Case("hifi3", HasHIFI3) + .Case("+density", HasDensity) + .Case("+loop", HasLoop) + .Case("+sext", HasSEXT) + .Case("+nsa", HasNSA) + .Case("+clamps", HasCLAPMS) + .Case("+minmax", HasMINMAX) + .Case("+mul32", HasMul32) + .Case("+mul32high", HasMul32High) + .Case("+div32", HasDiv32) + .Case("+mac16", HasMAC16) + .Case("+dfpaccel", HasDFP) + .Case("+s32c1i", HasS32C1I) + .Case("+threadptr", HasTHREADPTR) + .Case("+extendedl32r", HasExtendedL32R) + .Case("+atomctl", HasATOMCTL) + .Case("+memctl", HasMEMCTL) + .Case("+debug", HasDebug) + .Case("+exception", HasException) + .Case("+highpriinterrupts", HasHighPriInterrupts) + .Case("+coprocessor", HasCoprocessor) + .Case("+interrupt", HasInterrupt) + .Case("+rvector", HasRelocatableVector) + .Case("+timerint", HasTimerInt) + .Case("+prid", HasPRID) + .Case("+regprotect", HasRegionProtection) + .Case("+miscsr", HasMiscSR) .Default(false); } @@ -94,6 +143,58 @@ bool XtensaTargetInfo::handleTargetFeatures(std::vector &Features, HasWindowed = true; else if (Feature == "+hifi3") HasHIFI3 = true; + else if (Feature == "+density") + HasDensity = true; + else if (Feature == "+loop") + HasLoop = true; + else if (Feature == "+sext") + HasSEXT = true; + else if (Feature == "+nsa") + HasNSA = true; + else if (Feature == "+clamps") + HasCLAPMS = true; + else if (Feature == "+minmax") + HasMINMAX = true; + else if (Feature == "+mul32") + HasMul32 = true; + else if (Feature == "+mul32high") + HasMul32High = true; + else if (Feature == "+div32") + HasDiv32 = true; + else if (Feature == "+mac16") + HasMAC16 = true; + else if (Feature == "+dfpaccel") + HasDFP = true; + else if (Feature == "+s32c1i") + HasS32C1I = true; + else if (Feature == "+threadptr") + HasTHREADPTR = true; + else if (Feature == "+extendedl32r") + HasExtendedL32R = true; + else if (Feature == "+atomctl") + HasATOMCTL = true; + else if (Feature == "+memctl") + HasMEMCTL = true; + else if (Feature == "+debug") + HasDebug = true; + else if (Feature == "+exception") + HasException = true; + else if (Feature == "+highpriinterrupts") + HasHighPriInterrupts = true; + else if (Feature == "+coprocessor") + HasCoprocessor = true; + else if (Feature == "+interrupt") + HasInterrupt = true; + else if (Feature == "+rvector") + HasRelocatableVector = true; + else if (Feature == "+timerint") + HasTimerInt = true; + else if (Feature == "+prid") + HasPRID = true; + else if (Feature == "+regprotect") + HasRegionProtection = true; + else if (Feature == "+miscsr") + HasMiscSR = true; } return true; diff --git a/clang/lib/Basic/Targets/Xtensa.h b/clang/lib/Basic/Targets/Xtensa.h index b2c923b2cd24a..ca62b8159b60a 100644 --- a/clang/lib/Basic/Targets/Xtensa.h +++ b/clang/lib/Basic/Targets/Xtensa.h @@ -30,11 +30,38 @@ namespace clang { namespace targets { class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { +protected: std::string CPU; bool HasFP = false; bool HasWindowed = false; bool HasBoolean = false; bool HasHIFI3 = false; + bool HasDensity = false; + bool HasLoop = false; + bool HasSEXT = false; + bool HasNSA = false; + bool HasCLAPMS = false; + bool HasMINMAX = false; + bool HasMul32 = false; + bool HasMul32High = false; + bool HasDiv32 = false; + bool HasMAC16 = false; + bool HasDFP = false; + bool HasS32C1I = false; + bool HasTHREADPTR = false; + bool HasExtendedL32R = false; + bool HasATOMCTL = false; + bool HasMEMCTL = false; + bool HasDebug = false; + bool HasException = false; + bool HasHighPriInterrupts = false; + bool HasCoprocessor = false; + bool HasInterrupt = false; + bool HasRelocatableVector = false; + bool HasTimerInt = false; + bool HasPRID = false; + bool HasRegionProtection = false; + bool HasMiscSR = false; public: XtensaTargetInfo(const llvm::Triple &Triple, const TargetOptions &) @@ -54,7 +81,7 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { resetDataLayout("e-m:e-p:32:32-v1:8:8-i64:64-i128:128-n32"); } - void getTargetDefines(const LangOptions &Opts, + virtual void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override; ArrayRef getTargetBuiltins() const override; @@ -118,6 +145,60 @@ class LLVM_LIBRARY_VISIBILITY XtensaTargetInfo : public TargetInfo { bool handleTargetFeatures(std::vector &Features, DiagnosticsEngine &Diags) override; }; + +class LLVM_LIBRARY_VISIBILITY EspXtensaTargetInfo : public XtensaTargetInfo { + +public: + EspXtensaTargetInfo(const llvm::Triple &Triple, const TargetOptions &opts) + : XtensaTargetInfo(Triple, opts) { + } + + void getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const override { + XtensaTargetInfo::getTargetDefines(Opts, Builder); + Builder.defineMacro("__XCHAL_HAVE_CONST16", "0"); + Builder.defineMacro("__XCHAL_HAVE_ABS"); + Builder.defineMacro("__XCHAL_HAVE_ADDX"); + Builder.defineMacro("__XCHAL_HAVE_L32R"); + Builder.defineMacro("__XCHAL_HAVE_MUL16"); + // FIXME: should we enable FP options below for all Xtensa CPUs if __XCHAL_HAVE_FP is 1 + Builder.defineMacro("__XCHAL_HAVE_FP_DIV", HasFP ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_FP_RECIP", HasFP ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_FP_SQRT", HasFP ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_FP_RSQRT", HasFP ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_FP_POSTINC", HasFP ? "1" : "0"); + // FIXME: should we enable DFP options below for all Xtensa CPUs if __XCHAL_HAVE_DFP_ACCEL is 1 + Builder.defineMacro("__XCHAL_HAVE_DFP_DIV", HasDFP ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_DFP_RECIP", HasDFP ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_DFP_SQRT", HasDFP ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_DFP_RSQRT", HasDFP ? "1" : "0"); + Builder.defineMacro("__XCHAL_HAVE_RELEASE_SYNC", "1"); + // XSHAL_USE_ABSOLUTE_LITERALS + // XSHAL_HAVE_TEXT_SECTION_LITERALS + Builder.defineMacro("__XCHAL_NUM_AREGS", "64"); + Builder.defineMacro("__XCHAL_HAVE_WIDE_BRANCHES", "0"); + Builder.defineMacro("__XCHAL_HAVE_PREDICTED_BRANCHES", "0"); + Builder.defineMacro("__XCHAL_ICACHE_SIZE", "0"); + Builder.defineMacro("__XCHAL_DCACHE_SIZE", "0"); + Builder.defineMacro("__XCHAL_ICACHE_LINESIZE", "16"); + Builder.defineMacro("__XCHAL_DCACHE_LINESIZE", "16"); + Builder.defineMacro("__XCHAL_ICACHE_LINEWIDTH", "4"); + Builder.defineMacro("__XCHAL_DCACHE_LINEWIDTH", "4"); + Builder.defineMacro("__XCHAL_DCACHE_IS_WRITEBACK", "0"); + Builder.defineMacro("__XCHAL_HAVE_MMU", "0"); + Builder.defineMacro("__XCHAL_NUM_IBREAK", "2"); + Builder.defineMacro("__XCHAL_NUM_DBREAK", "2"); + Builder.defineMacro("__XCHAL_DEBUGLEVEL", "6"); + if (CPU == "esp32") + Builder.defineMacro("__XCHAL_MAX_INSTRUCTION_SIZE", "3"); + else if (CPU == "esp32s2") + Builder.defineMacro("__XCHAL_MAX_INSTRUCTION_SIZE", "3"); + else if (CPU == "esp32s3") + Builder.defineMacro("__XCHAL_MAX_INSTRUCTION_SIZE", "4"); + Builder.defineMacro("__XCHAL_INST_FETCH_WIDTH", "4"); + } +}; + } // namespace targets } // namespace clang #endif // LLVM_CLANG_LIB_BASIC_TARGETS_XTENSA_H From d7d815c5a21a527bc1506289552a077c5383ae2a Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 15 Mar 2024 21:27:34 +0300 Subject: [PATCH 226/289] [Clang] Add stdint preprocessor tests for Xtensa --- clang/test/Preprocessor/init.c | 2 + clang/test/Preprocessor/stdint.c | 108 +++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+) diff --git a/clang/test/Preprocessor/init.c b/clang/test/Preprocessor/init.c index 63a3afd3c90e4..c739aa66e7594 100644 --- a/clang/test/Preprocessor/init.c +++ b/clang/test/Preprocessor/init.c @@ -2743,6 +2743,8 @@ // RISCV64-LINUX: #define linux 1 // RISCV64-LINUX: #define unix 1 +// RUN: %clang_cc1 -E -dM -ffreestanding -triple=xtensa < /dev/null \ +// RUN: | FileCheck -match-full-lines -check-prefix=XTENSA %s // RUN: %clang_cc1 -E -dM -ffreestanding -triple=xtensa-esp-unknown-elf < /dev/null \ // RUN: | FileCheck -match-full-lines -check-prefix=XTENSA %s // XTENSA: #define _ILP32 1 diff --git a/clang/test/Preprocessor/stdint.c b/clang/test/Preprocessor/stdint.c index 7cb33ed54739a..e90d6b8c9ea3a 100644 --- a/clang/test/Preprocessor/stdint.c +++ b/clang/test/Preprocessor/stdint.c @@ -1498,6 +1498,114 @@ // XCORE:INTMAX_C_(0) 0LL // XCORE:UINTMAX_C_(0) 0ULL // +// RUN: %clang_cc1 -E -ffreestanding -triple=xtensa %s | FileCheck -check-prefix XTENSA %s +// RUN: %clang_cc1 -E -ffreestanding -triple=xtensa-esp-unknown-elf %s | FileCheck -check-prefix XTENSA %s +// +// XTENSA:typedef long long int int64_t; +// XTENSA:typedef long long unsigned int uint64_t; +// XTENSA:typedef int64_t int_least64_t; +// XTENSA:typedef uint64_t uint_least64_t; +// XTENSA:typedef int64_t int_fast64_t; +// XTENSA:typedef uint64_t uint_fast64_t; +// +// XTENSA:typedef int int32_t; +// XTENSA:typedef unsigned int uint32_t; +// XTENSA:typedef int32_t int_least32_t; +// XTENSA:typedef uint32_t uint_least32_t; +// XTENSA:typedef int32_t int_fast32_t; +// XTENSA:typedef uint32_t uint_fast32_t; +// +// XTENSA:typedef short int16_t; +// XTENSA:typedef unsigned short uint16_t; +// XTENSA:typedef int16_t int_least16_t; +// XTENSA:typedef uint16_t uint_least16_t; +// XTENSA:typedef int16_t int_fast16_t; +// XTENSA:typedef uint16_t uint_fast16_t; +// +// XTENSA:typedef signed char int8_t; +// XTENSA:typedef unsigned char uint8_t; +// XTENSA:typedef int8_t int_least8_t; +// XTENSA:typedef uint8_t uint_least8_t; +// XTENSA:typedef int8_t int_fast8_t; +// XTENSA:typedef uint8_t uint_fast8_t; +// +// XTENSA:typedef int intptr_t; +// XTENSA:typedef unsigned int uintptr_t; +// +// XTENSA:typedef long long int intmax_t; +// XTENSA:typedef long long unsigned int uintmax_t; +// +// XTENSA:INT8_MAX_ 127 +// XTENSA:INT8_MIN_ (-127 -1) +// XTENSA:UINT8_MAX_ 255 +// XTENSA:INT_LEAST8_MIN_ (-127 -1) +// XTENSA:INT_LEAST8_MAX_ 127 +// XTENSA:UINT_LEAST8_MAX_ 255 +// XTENSA:INT_FAST8_MIN_ (-127 -1) +// XTENSA:INT_FAST8_MAX_ 127 +// XTENSA:UINT_FAST8_MAX_ 255 +// +// XTENSA:INT16_MAX_ 32767 +// XTENSA:INT16_MIN_ (-32767 -1) +// XTENSA:UINT16_MAX_ 65535 +// XTENSA:INT_LEAST16_MIN_ (-32767 -1) +// XTENSA:INT_LEAST16_MAX_ 32767 +// XTENSA:UINT_LEAST16_MAX_ 65535 +// XTENSA:INT_FAST16_MIN_ (-32767 -1) +// XTENSA:INT_FAST16_MAX_ 32767 +// XTENSA:UINT_FAST16_MAX_ 65535 +// +// XTENSA:INT32_MAX_ 2147483647 +// XTENSA:INT32_MIN_ (-2147483647 -1) +// XTENSA:UINT32_MAX_ 4294967295U +// XTENSA:INT_LEAST32_MIN_ (-2147483647 -1) +// XTENSA:INT_LEAST32_MAX_ 2147483647 +// XTENSA:UINT_LEAST32_MAX_ 4294967295U +// XTENSA:INT_FAST32_MIN_ (-2147483647 -1) +// XTENSA:INT_FAST32_MAX_ 2147483647 +// XTENSA:UINT_FAST32_MAX_ 4294967295U +// +// XTENSA:INT64_MAX_ 9223372036854775807LL +// XTENSA:INT64_MIN_ (-9223372036854775807LL -1) +// XTENSA:UINT64_MAX_ 18446744073709551615ULL +// XTENSA:INT_LEAST64_MIN_ (-9223372036854775807LL -1) +// XTENSA:INT_LEAST64_MAX_ 9223372036854775807LL +// XTENSA:UINT_LEAST64_MAX_ 18446744073709551615ULL +// XTENSA:INT_FAST64_MIN_ (-9223372036854775807LL -1) +// XTENSA:INT_FAST64_MAX_ 9223372036854775807LL +// XTENSA:UINT_FAST64_MAX_ 18446744073709551615ULL +// +// XTENSA:INTPTR_MIN_ (-2147483647 -1) +// XTENSA:INTPTR_MAX_ 2147483647 +// XTENSA:UINTPTR_MAX_ 4294967295U +// XTENSA:PTRDIFF_MIN_ (-2147483647 -1) +// XTENSA:PTRDIFF_MAX_ 2147483647 +// XTENSA:SIZE_MAX_ 4294967295U +// +// XTENSA:INTMAX_MIN_ (-9223372036854775807LL -1) +// XTENSA:INTMAX_MAX_ 9223372036854775807LL +// XTENSA:UINTMAX_MAX_ 18446744073709551615ULL +// +// XTENSA:SIG_ATOMIC_MIN_ (-2147483647 -1) +// XTENSA:SIG_ATOMIC_MAX_ 2147483647 +// XTENSA:WINT_MIN_ 0U +// XTENSA:WINT_MAX_ 4294967295U +// +// XTENSA:WCHAR_MAX_ 2147483647 +// XTENSA:WCHAR_MIN_ (-2147483647 -1) +// +// XTENSA:INT8_C_(0) 0 +// XTENSA:UINT8_C_(0) 0U +// XTENSA:INT16_C_(0) 0 +// XTENSA:UINT16_C_(0) 0U +// XTENSA:INT32_C_(0) 0 +// XTENSA:UINT32_C_(0) 0U +// XTENSA:INT64_C_(0) 0LL +// XTENSA:UINT64_C_(0) 0ULL +// +// XTENSA:INTMAX_C_(0) 0LL +// XTENSA:UINTMAX_C_(0) 0ULL +// // // stdint.h forms several macro definitions by pasting together identifiers // to form names (eg. int32_t is formed from int ## 32 ## _t). The following From 7d821ecb40a354a9b386891148fd8e1d2e2f041f Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 15 Mar 2024 23:17:38 +0300 Subject: [PATCH 227/289] [LLVM][Xtensa] Remove DFP accelrator feature from ESP32-S3 --- clang/test/Driver/xtensa-cpus.c | 2 +- llvm/include/llvm/TargetParser/XtensaTargetParser.def | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/clang/test/Driver/xtensa-cpus.c b/clang/test/Driver/xtensa-cpus.c index 44e380adbf7ca..dd798e314048f 100644 --- a/clang/test/Driver/xtensa-cpus.c +++ b/clang/test/Driver/xtensa-cpus.c @@ -32,7 +32,7 @@ // MCPU-ESP32S3: "-target-feature" "+density" "-target-feature" "+fp" "-target-feature" "+windowed" "-target-feature" "+bool" // MCPU-ESP32S3: "-target-feature" "+loop" "-target-feature" "+sext" "-target-feature" "+nsa" "-target-feature" "+clamps" // MCPU-ESP32S3: "-target-feature" "+minmax" "-target-feature" "+mul32" "-target-feature" "+mul32high" "-target-feature" "+div32" -// MCPU-ESP32S3: "-target-feature" "+mac16" "-target-feature" "+dfpaccel" "-target-feature" "+s32c1i" +// MCPU-ESP32S3: "-target-feature" "+mac16" "-target-feature" "+s32c1i" // MCPU-ESP32S3: "-target-feature" "+threadptr" "-target-feature" "+atomctl" "-target-feature" "+memctl" // MCPU-ESP32S3: "-target-feature" "+debug" "-target-feature" "+exception" "-target-feature" "+highpriinterrupts" // MCPU-ESP32S3: "-target-feature" "+coprocessor" "-target-feature" "+interrupt" "-target-feature" "+rvector" "-target-feature" "+timerint" diff --git a/llvm/include/llvm/TargetParser/XtensaTargetParser.def b/llvm/include/llvm/TargetParser/XtensaTargetParser.def index edc178ac559f5..82d5c45b8e879 100644 --- a/llvm/include/llvm/TargetParser/XtensaTargetParser.def +++ b/llvm/include/llvm/TargetParser/XtensaTargetParser.def @@ -69,7 +69,7 @@ XTENSA_CPU(ESP32S2, {"esp32s2"}, FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | FK_ESP32S2OPS)) XTENSA_CPU(ESP32S3, {"esp32s3"}, (FK_DENSITY | FK_FP | FK_LOOP | FK_MAC16 | FK_WINDOWED | FK_BOOLEAN | - FK_SEXT | FK_NSA | FK_CLAMPS | FK_MINMAX | FK_MUL32 | FK_MUL32HIGH | FK_DFPACCEL | FK_S32C1I | + FK_SEXT | FK_NSA | FK_CLAMPS | FK_MINMAX | FK_MUL32 | FK_MUL32HIGH | FK_S32C1I | FK_THREADPTR | FK_DIV32 | FK_ATOMCTL | FK_MEMCTL | FK_DEBUG | FK_EXCEPTION | FK_HIGHPRIINTERRUPTS | FK_COPROCESSOR | FK_INTERRUPT | FK_RVECTOR | FK_TIMERINT | FK_PRID | FK_REGPROTECT | FK_MISCSR | FK_ESP32S3OPS)) From 1d54b93b763e8f57e5b867e740b96d6fa9614464 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 15 Mar 2024 23:20:58 +0300 Subject: [PATCH 228/289] [Clang][Xtensa] Add tests for core config pre-defined macros --- clang/test/Preprocessor/init.c | 10 +-- .../Preprocessor/xtensa-target-features.c | 89 +++++++++++++++++++ 2 files changed, 91 insertions(+), 8 deletions(-) create mode 100644 clang/test/Preprocessor/xtensa-target-features.c diff --git a/clang/test/Preprocessor/init.c b/clang/test/Preprocessor/init.c index c739aa66e7594..0a1164ce009c9 100644 --- a/clang/test/Preprocessor/init.c +++ b/clang/test/Preprocessor/init.c @@ -3042,11 +3042,9 @@ // XTENSA: #define __WINT_TYPE__ unsigned int // XTENSA: #define __WINT_UNSIGNED__ 1 // XTENSA: #define __WINT_WIDTH__ 32 -// XTENSA: #define __XTENSA_EL__ 1 -// XTENSA: #define __XTENSA_WINDOWED_ABI__ 1 -// XTENSA: #define __XTENSA__ 1 -// XTENSA: #define __xtensa__ 1 +// RUN: %clang_cc1 -E -dM -ffreestanding -triple=xtensa -mfast-int-min32 < /dev/null \ +// RUN: | FileCheck -match-full-lines -check-prefix=XTENSA_FAST32 %s // RUN: %clang_cc1 -E -dM -ffreestanding -triple=xtensa-esp-unknown-elf -mfast-int-min32 < /dev/null \ // RUN: | FileCheck -match-full-lines -check-prefix=XTENSA_FAST32 %s // XTENSA_FAST32: #define _ILP32 1 @@ -3344,7 +3342,3 @@ // XTENSA_FAST32: #define __WINT_TYPE__ unsigned int // XTENSA_FAST32: #define __WINT_UNSIGNED__ 1 // XTENSA_FAST32: #define __WINT_WIDTH__ 32 -// XTENSA_FAST32: #define __XTENSA_EL__ 1 -// XTENSA_FAST32: #define __XTENSA_WINDOWED_ABI__ 1 -// XTENSA_FAST32: #define __XTENSA__ 1 -// XTENSA_FAST32: #define __xtensa__ 1 diff --git a/clang/test/Preprocessor/xtensa-target-features.c b/clang/test/Preprocessor/xtensa-target-features.c new file mode 100644 index 0000000000000..96a4743b3ae55 --- /dev/null +++ b/clang/test/Preprocessor/xtensa-target-features.c @@ -0,0 +1,89 @@ +// RUN: %clang -target xtensa -Wno-missing-multilib -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-XTENSA %s +// RUN: %clang -target xtensa-esp-unknown-elf -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-XTENSA %s +// CHECK-XTENSA: #define __XTENSA__ 1 +// CHECK-XTENSA: #define __xtensa__ 1 + +// RUN: %clang -target xtensa-esp-unknown-elf -mcpu=esp32 -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-ESP %s +// RUN: %clang -target xtensa-esp-unknown-elf -mcpu=esp32s2 -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-ESP %s +// RUN: %clang -target xtensa-esp-unknown-elf -mcpu=esp32s3 -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-ESP %s +// CHECK-ESP: #define __XCHAL_DCACHE_IS_WRITEBACK 0 +// CHECK-ESP: #define __XCHAL_DCACHE_LINESIZE 16 +// CHECK-ESP: #define __XCHAL_DCACHE_LINEWIDTH 4 +// CHECK-ESP: #define __XCHAL_DCACHE_SIZE 0 +// CHECK-ESP: #define __XCHAL_DEBUGLEVEL 6 +// CHECK-ESP: #define __XCHAL_HAVE_ABS 1 +// CHECK-ESP: #define __XCHAL_HAVE_ADDX 1 +// CHECK-ESP: #define __XCHAL_HAVE_BE 0 +// CHECK-ESP: #define __XCHAL_HAVE_CONST16 0 +// CHECK-ESP: #define __XCHAL_HAVE_DEBUG 1 +// CHECK-ESP: #define __XCHAL_HAVE_DENSITY 1 +// CHECK-ESP: #define __XCHAL_HAVE_DIV32 1 +// CHECK-ESP: #define __XCHAL_HAVE_L32R 1 +// CHECK-ESP: #define __XCHAL_HAVE_MINMAX 1 +// CHECK-ESP: #define __XCHAL_HAVE_MMU 0 +// CHECK-ESP: #define __XCHAL_HAVE_MUL16 1 +// CHECK-ESP: #define __XCHAL_HAVE_MUL32 1 +// CHECK-ESP: #define __XCHAL_HAVE_MUL32_HIGH 1 +// CHECK-ESP: #define __XCHAL_HAVE_NSA 1 +// CHECK-ESP: #define __XCHAL_HAVE_PREDICTED_BRANCHES 0 +// CHECK-ESP: #define __XCHAL_HAVE_RELEASE_SYNC 1 +// CHECK-ESP: #define __XCHAL_HAVE_SEXT 1 +// CHECK-ESP: #define __XCHAL_HAVE_THREADPTR 1 +// CHECK-ESP: #define __XCHAL_HAVE_WIDE_BRANCHES 0 +// CHECK-ESP: #define __XCHAL_HAVE_WINDOWED 1 +// CHECK-ESP: #define __XCHAL_ICACHE_LINESIZE 16 +// CHECK-ESP: #define __XCHAL_ICACHE_LINEWIDTH 4 +// CHECK-ESP: #define __XCHAL_ICACHE_SIZE 0 +// CHECK-ESP: #define __XCHAL_INST_FETCH_WIDTH 4 +// CHECK-ESP: #define __XCHAL_NUM_AREGS 64 +// CHECK-ESP: #define __XCHAL_NUM_DBREAK 2 +// CHECK-ESP: #define __XCHAL_NUM_IBREAK 2 +// CHECK-ESP: #define __XTENSA_EL__ 1 +// CHECK-ESP: #define __XTENSA_WINDOWED_ABI__ 1 + +// RUN: %clang -target xtensa-esp-unknown-elf -mcpu=esp32 -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-ESP32_S3 %s +// RUN: %clang -target xtensa-esp-unknown-elf -mcpu=esp32s3 -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-ESP32_S3 %s +// CHECK-ESP32_S3: #define __XCHAL_HAVE_BOOLEANS 1 +// CHECK-ESP32_S3: #define __XCHAL_HAVE_FP 1 +// CHECK-ESP32_S3: #define __XCHAL_HAVE_FP_DIV 1 +// CHECK-ESP32_S3: #define __XCHAL_HAVE_FP_POSTINC 1 +// CHECK-ESP32_S3: #define __XCHAL_HAVE_FP_RECIP 1 +// CHECK-ESP32_S3: #define __XCHAL_HAVE_FP_RSQRT 1 +// CHECK-ESP32_S3: #define __XCHAL_HAVE_FP_SQRT 1 +// CHECK-ESP32_S3: #define __XCHAL_HAVE_LOOPS 1 +// CHECK-ESP32_S3: #define __XCHAL_HAVE_MAC16 1 +// CHECK-ESP32_S3: #define __XCHAL_HAVE_S32C1I 1 + +// RUN: %clang -target xtensa-esp-unknown-elf -mcpu=esp32 -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-ESP32 %s +// CHECK-ESP32: #define __XCHAL_HAVE_DFP_ACCEL 1 +// CHECK-ESP32: #define __XCHAL_HAVE_DFP_DIV 1 +// CHECK-ESP32: #define __XCHAL_HAVE_DFP_RECIP 1 +// CHECK-ESP32: #define __XCHAL_HAVE_DFP_RSQRT 1 +// CHECK-ESP32: #define __XCHAL_HAVE_DFP_SQRT 1 +// CHECK-ESP32: #define __XCHAL_MAX_INSTRUCTION_SIZE 3 + +// RUN: %clang -target xtensa-esp-unknown-elf -mcpu=esp32s3 -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-ESP32S3 %s +// CHECK-ESP32S3: #define __XCHAL_HAVE_DFP_ACCEL 0 +// CHECK-ESP32S3: #define __XCHAL_HAVE_DFP_DIV 0 +// CHECK-ESP32S3: #define __XCHAL_HAVE_DFP_RECIP 0 +// CHECK-ESP32S3: #define __XCHAL_HAVE_DFP_RSQRT 0 +// CHECK-ESP32S3: #define __XCHAL_HAVE_DFP_SQRT 0 +// CHECK-ESP32S3: #define __XCHAL_MAX_INSTRUCTION_SIZE 4 + +// RUN: %clang -target xtensa-esp-unknown-elf -mcpu=esp32s2 -x c -E -dM %s -o - | FileCheck -match-full-lines --check-prefix=CHECK-ESP32S2 %s +// CHECK-ESP32S2: #define __XCHAL_HAVE_BOOLEANS 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_DFP_ACCEL 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_DFP_DIV 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_DFP_RECIP 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_DFP_RSQRT 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_DFP_SQRT 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_FP 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_FP_DIV 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_FP_POSTINC 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_FP_RECIP 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_FP_RSQRT 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_FP_SQRT 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_LOOPS 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_MAC16 0 +// CHECK-ESP32S2: #define __XCHAL_HAVE_S32C1I 0 +// CHECK-ESP32S2: #define __XCHAL_MAX_INSTRUCTION_SIZE 3 From 69dbc78ccc04883427b202c935289d36e4940b61 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 21 Mar 2024 12:54:39 +0300 Subject: [PATCH 229/289] esp/ci: Upgrade GCC toolchain components version to '13.2.0_20240305' --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 09891b869dfe0..bff2ef4f7add4 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -80,7 +80,7 @@ before_script: -DBINUTILS_REPO_URL="${GITLAB_SSH_SERVER}/${BINUTILS_REPO_PATH}.git" -DXTENSA_OVERLAYS_REPO_URL="${GITLAB_SSH_SERVER}/${XTENSA_OVERLAYS_REPO_PATH}.git" -DFETCHCONTENT_QUIET=OFF - -DESP_GNU_TOOLCHAIN_VER="13.2.0_20230928" + -DESP_GNU_TOOLCHAIN_VER="13.2.0_20240305" -DLLVM_TOOLCHAIN_CROSS_BUILD_MINGW=${CROSS_BUILD_MINGW} -DUSE_LIBC=${USE_LIBC} -DUSE_LIBCXX=${USE_LIBCXX} From b720717781aaa1e20fce46c1fd4a808504ed088c Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Fri, 29 Mar 2024 01:40:29 +0300 Subject: [PATCH 230/289] [Clang][RISCV] Fix baremetal test --- clang/test/Driver/baremetal-esp.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/clang/test/Driver/baremetal-esp.cpp b/clang/test/Driver/baremetal-esp.cpp index 18c1888daad48..bdc8388106f03 100644 --- a/clang/test/Driver/baremetal-esp.cpp +++ b/clang/test/Driver/baremetal-esp.cpp @@ -289,8 +289,10 @@ // RUN: touch %T/baremetal_clang_rt_noarch/lib/libclang_rt.builtins.a // RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 \ // RUN: --target=riscv32-esp-elf \ -// RUN: --sysroot=%T/baremetal_clang_rt_noarch \clang-runtimes/riscv32-esp-unknown-elf -// used if present. +// RUN: --sysroot=%T/baremetal_clang_rt_noarch \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32_CLANGRT-NOARCH %s +// CHECK-ESP-RV32_CLANGRT-NOARCH: "-lclang_rt.builtins" +// CHECK-ESP-RV32_CLANGRT-NOARCH-NOT: "-lclang_rt.builtins-riscv32" // RUN: rm -rf %T/baremetal_clang_rt_arch // RUN: mkdir -p %T/baremetal_clang_rt_arch/lib // RUN: touch %T/baremetal_clang_rt_arch/lib/libclang_rt.builtins-riscv32.a From 19d06ae7603efb373e94ff65282fdb8d70dda3a2 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Wed, 27 Mar 2024 23:22:13 +0300 Subject: [PATCH 231/289] [Toolchain][Espressif] Use custom prefixes for 'as' and 'ld' --- clang/lib/Driver/ToolChains/EspBareMetal.h | 2 +- ...32-esp-elf-as => riscv32-esp-elf-clang-as} | 0 ...32-esp-elf-ld => riscv32-esp-elf-clang-ld} | 0 ...esp32-elf-as => xtensa-esp32-elf-clang-as} | 0 ...esp32-elf-ld => xtensa-esp32-elf-clang-ld} | 0 ...2s2-elf-as => xtensa-esp32s2-elf-clang-as} | 0 ...2s2-elf-ld => xtensa-esp32s2-elf-clang-ld} | 0 ...2s3-elf-as => xtensa-esp32s3-elf-clang-as} | 0 ...2s3-elf-ld => xtensa-esp32s3-elf-clang-ld} | 0 clang/test/Driver/baremetal-esp.cpp | 42 +++++++++---------- 10 files changed, 22 insertions(+), 22 deletions(-) rename clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/{riscv32-esp-elf-as => riscv32-esp-elf-clang-as} (100%) rename clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/{riscv32-esp-elf-ld => riscv32-esp-elf-clang-ld} (100%) rename clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/{xtensa-esp32-elf-as => xtensa-esp32-elf-clang-as} (100%) rename clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/{xtensa-esp32-elf-ld => xtensa-esp32-elf-clang-ld} (100%) rename clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/{xtensa-esp32s2-elf-as => xtensa-esp32s2-elf-clang-as} (100%) rename clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/{xtensa-esp32s2-elf-ld => xtensa-esp32s2-elf-clang-ld} (100%) rename clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/{xtensa-esp32s3-elf-as => xtensa-esp32s3-elf-clang-as} (100%) rename clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/{xtensa-esp32s3-elf-ld => xtensa-esp32s3-elf-clang-ld} (100%) diff --git a/clang/lib/Driver/ToolChains/EspBareMetal.h b/clang/lib/Driver/ToolChains/EspBareMetal.h index 08c544e34ea02..5618564c0a4e9 100644 --- a/clang/lib/Driver/ToolChains/EspBareMetal.h +++ b/clang/lib/Driver/ToolChains/EspBareMetal.h @@ -75,7 +75,7 @@ class LLVM_LIBRARY_VISIBILITY Linker : public Tool { class LLVM_LIBRARY_VISIBILITY Assembler : public Tool { public: Assembler(const ToolChain &TC) - : Tool("baremetal::esp::Assembler", "as", TC) {} + : Tool("baremetal::esp::Assembler", "clang-as", TC) {} bool hasIntegratedCPP() const override { return false; } void ConstructJob(Compilation &C, const JobAction &JA, diff --git a/clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-as b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-as similarity index 100% rename from clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-as rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-as diff --git a/clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-ld b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-ld similarity index 100% rename from clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-ld rename to clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-ld diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-as b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-clang-as similarity index 100% rename from clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-as rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-clang-as diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-ld b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-clang-ld similarity index 100% rename from clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-ld rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-clang-ld diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-as b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-clang-as similarity index 100% rename from clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-as rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-clang-as diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-ld b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-clang-ld similarity index 100% rename from clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-ld rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-clang-ld diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-as b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-clang-as similarity index 100% rename from clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-as rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-clang-as diff --git a/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-ld b/clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-clang-ld similarity index 100% rename from clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-ld rename to clang/test/Driver/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-clang-ld diff --git a/clang/test/Driver/baremetal-esp.cpp b/clang/test/Driver/baremetal-esp.cpp index bdc8388106f03..140df519ca951 100644 --- a/clang/test/Driver/baremetal-esp.cpp +++ b/clang/test/Driver/baremetal-esp.cpp @@ -8,8 +8,8 @@ // RUN: mkdir -p %t/basic_riscv32_esp_tree/bin // RUN: ln -s %clang %t/basic_riscv32_esp_tree/bin/clang // RUN: ln -s %S/Inputs/basic_riscv32_esp_tree/bin/ld.lld %t/basic_riscv32_esp_tree/bin/ld.lld -// RUN: ln -s %S/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-as %t/basic_riscv32_esp_tree/bin/riscv32-esp-elf-as -// RUN: ln -s %S/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-ld %t/basic_riscv32_esp_tree/bin/riscv32-esp-elf-ld +// RUN: ln -s %S/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-as %t/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-as +// RUN: ln -s %S/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-ld %t/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-ld // RUN: ln -s %S/Inputs/basic_riscv32_esp_tree/lib %t/basic_riscv32_esp_tree/lib // RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ @@ -56,7 +56,7 @@ // CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" // CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}include" // CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-x" "c++" "{{.*}}baremetal-esp.cpp" -// CHECK-ESP-RV32IMAC-FORCEAS-NEXT: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imac" "-mabi=ilp32" +// CHECK-ESP-RV32IMAC-FORCEAS-NEXT: riscv32-esp-elf-clang-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imac" "-mabi=ilp32" // CHECK-ESP-RV32IMAC-FORCEAS-NEXT: ld.lld{{(.exe)?}}" // CHECK-ESP-RV32IMAC-FORCEAS-SAME: "--sysroot=[[SYSROOT]]" // CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-m" "elf32lriscv" @@ -67,12 +67,12 @@ // CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" // CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-lclang_rt.builtins" -// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf --ld-path=riscv32-esp-elf-ld \ +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf --ld-path=riscv32-esp-elf-clang-ld \ // RUN: -L some/directory/user/asked/for \ // RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-FORCELD %s // CHECK-ESP-RV32IMAC-FORCELD: "-isysroot" "[[SYSROOT:[^"]*]]" -// CHECK-ESP-RV32IMAC-FORCELD: riscv32-esp-elf-ld{{(.exe)?}}" +// CHECK-ESP-RV32IMAC-FORCELD: riscv32-esp-elf-clang-ld{{(.exe)?}}" // CHECK-ESP-RV32IMAC-FORCELD-SAME: "--sysroot=[[SYSROOT]]" // CHECK-ESP-RV32IMAC-FORCELD-SAME: "-m" "elf32lriscv" // CHECK-ESP-RV32IMAC-FORCELD-SAME: "-o" "a.out" @@ -175,7 +175,7 @@ // RUN: -march=rv32i -mabi=ilp32 \ // RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32I-FORCEAS %s -// CHECK-ESP-RV32I-FORCEAS: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32i" "-mabi=ilp32" +// CHECK-ESP-RV32I-FORCEAS: riscv32-esp-elf-clang-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32i" "-mabi=ilp32" // RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ // RUN: -march=rv32i -mabi=ilp32 \ @@ -229,19 +229,19 @@ // RUN: -march=rv32im -mabi=ilp32 \ // RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IM-FORCEAS %s -// CHECK-ESP-RV32IM-FORCEAS: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32im" "-mabi=ilp32" +// CHECK-ESP-RV32IM-FORCEAS: riscv32-esp-elf-clang-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32im" "-mabi=ilp32" // RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -fno-integrated-as \ // RUN: -march=rv32imc -mabi=ilp32 \ // RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMC-FORCEAS %s -// CHECK-ESP-RV32IMC-FORCEAS: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imc" "-mabi=ilp32" +// CHECK-ESP-RV32IMC-FORCEAS: riscv32-esp-elf-clang-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imc" "-mabi=ilp32" // RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -fno-integrated-as \ // RUN: -march=rv32imac -mabi=ilp32 \ // RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-FORCEAS2 %s -// CHECK-ESP-RV32IMAC-FORCEAS2: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imac" "-mabi=ilp32" +// CHECK-ESP-RV32IMAC-FORCEAS2: riscv32-esp-elf-clang-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imac" "-mabi=ilp32" // RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -march=rv32imafc -mabi=ilp32f \ // RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ @@ -280,7 +280,7 @@ // RUN: -march=rv32imafc -mabi=ilp32f \ // RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAFC-FORCEAS %s -// CHECK-ESP-RV32IMAFC-FORCEAS: riscv32-esp-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imafc" "-mabi=ilp32f" +// CHECK-ESP-RV32IMAFC-FORCEAS: riscv32-esp-elf-clang-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" "-march=rv32imafc" "-mabi=ilp32f" // Check that compiler-rt library without the arch filename suffix will // be used if present. @@ -310,12 +310,12 @@ // RUN: mkdir -p %t/basic_xtensa_esp_tree/bin // RUN: ln -s %clang %t/basic_xtensa_esp_tree/bin/clang // RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/ld.lld %t/basic_xtensa_esp_tree/bin/ld.lld -// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-as %t/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-as -// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-ld %t/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-ld -// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-as %t/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-as -// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-ld %t/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-ld -// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-as %t/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-as -// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-ld %t/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-ld +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-clang-as %t/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-clang-as +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-clang-ld %t/basic_xtensa_esp_tree/bin/xtensa-esp32-elf-clang-ld +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-clang-as %t/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-clang-as +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-clang-ld %t/basic_xtensa_esp_tree/bin/xtensa-esp32s2-elf-clang-ld +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-clang-as %t/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-clang-as +// RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-clang-ld %t/basic_xtensa_esp_tree/bin/xtensa-esp32s3-elf-clang-ld // RUN: ln -s %S/Inputs/basic_xtensa_esp_tree/lib %t/basic_xtensa_esp_tree/lib // ESP32 is default @@ -362,7 +362,7 @@ // CHECK-ESP-ESP32-FORCEAS-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" // CHECK-ESP-ESP32-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}include" // CHECK-ESP-ESP32-FORCEAS-SAME: "-x" "c++" "{{.*}}baremetal-esp.cpp" -// CHECK-ESP-ESP32-FORCEAS-NEXT: xtensa-esp32-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" +// CHECK-ESP-ESP32-FORCEAS-NEXT: xtensa-esp32-elf-clang-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" // CHECK-ESP-ESP32-FORCEAS-NEXT: ld.lld{{(.exe)?}}" // CHECK-ESP-ESP32-FORCEAS-SAME: "--sysroot=[[SYSROOT]]" // CHECK-ESP-ESP32-FORCEAS-SAME: "-o" "a.out" @@ -372,12 +372,12 @@ // CHECK-ESP-ESP32-FORCEAS-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" // CHECK-ESP-ESP32-FORCEAS-SAME: "-lclang_rt.builtins" -// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf --ld-path=xtensa-esp32-elf-ld \ +// RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf --ld-path=xtensa-esp32-elf-clang-ld \ // RUN: -L some/directory/user/asked/for \ // RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ // RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-FORCELD %s // CHECK-ESP-ESP32-FORCELD: "-isysroot" "[[SYSROOT:[^"]*]]" -// CHECK-ESP-ESP32-FORCELD-NEXT: xtensa-esp32-elf-ld{{(.exe)?}}" +// CHECK-ESP-ESP32-FORCELD-NEXT: xtensa-esp32-elf-clang-ld{{(.exe)?}}" // CHECK-ESP-ESP32-FORCELD-SAME: "--sysroot=[[SYSROOT]]" // CHECK-ESP-ESP32-FORCELD-SAME: "-o" "a.out" // CHECK-ESP-ESP32-FORCELD-SAME: "-X" "{{.*}}.o" @@ -529,7 +529,7 @@ // CHECK-ESP-ESP32S2-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" // CHECK-ESP-ESP32S2-FORCEAS-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" // CHECK-ESP-ESP32S2-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2{{[/\\]+}}include" -// CHECK-ESP-ESP32S2-FORCEAS: xtensa-esp32s2-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" +// CHECK-ESP-ESP32S2-FORCEAS: xtensa-esp32s2-elf-clang-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" // CHECK-ESP-ESP32S2-FORCEAS-NEXT: ld.lld{{(.exe)?}}" // CHECK-ESP-ESP32S2-FORCEAS-SAME: "--sysroot=[[SYSROOT]]" // CHECK-ESP-ESP32S2-FORCEAS-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s2{{[/\\]+}}lib" @@ -575,7 +575,7 @@ // CHECK-ESP-ESP32S3-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}include{{[/\\]+}}c++{{[/\\]+}}11.2.0" // CHECK-ESP-ESP32S3-FORCEAS-SAME: "-internal-isystem" "[[RESOURCE_DIR]]{{[/\\]+}}include" // CHECK-ESP-ESP32S3-FORCEAS-SAME: "-internal-isystem" "[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3{{[/\\]+}}include" -// CHECK-ESP-ESP32S3-FORCEAS: xtensa-esp32s3-elf-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" +// CHECK-ESP-ESP32S3-FORCEAS: xtensa-esp32s3-elf-clang-as{{(.exe)?}}" "-o" "{{.*}}.o" "-c" "{{.*}}.s" // CHECK-ESP-ESP32S3-FORCEAS-NEXT: ld.lld{{(.exe)?}}" // CHECK-ESP-ESP32S3-FORCEAS-SAME: "--sysroot=[[SYSROOT]]" // CHECK-ESP-ESP32S3-FORCEAS-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32s3{{[/\\]+}}lib" From 76de4740ca0b2bf06bdb9bd154e7bb20333e67db Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Wed, 27 Mar 2024 20:16:23 +0300 Subject: [PATCH 232/289] esp/ci: Switch to combined all-in-one toolchain --- .gitlab-ci.yml | 307 ++++++++++++++++++++----------------------------- 1 file changed, 123 insertions(+), 184 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bff2ef4f7add4..cb8aa697a551d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -12,6 +12,7 @@ image: ${CI_DOCKER_REGISTRY}/llvm-build:4 variables: ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "master" + ESP_GNU_TOOLCHAIN_VER: "13.2.0_20240305" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 CROSS_WIN_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-win:1 DIST_DIR: "dist" @@ -32,7 +33,7 @@ before_script: - *use_ci_tools - *add_gitlab_key -.build_template: +.build_toolchain_template: tags: [ "amd64", "build" ] artifacts: paths: @@ -40,17 +41,23 @@ before_script: - ${BUILD_DIR}/*.log when: always expire_in: 1 day + dependencies: [] variables: + TARGET: "Xtensa;RISCV" USE_LINKER: "ld" CROSS_BUILD_MINGW: "OFF" - SKIP_TESTS: "ON" - PACK_DISTRO: "ON" + RUN_CORE_TESTS: "OFF" + RUN_TARGET_LIB_TESTS: "OFF" + PACK_TOOLCHAIN: "ON" + PACK_STANDALONE_LIBS: "ON" + PACK_TARGET_LIBS: "OFF" after_script: # help to identify that build failed due to OOM - > if [ $CI_JOB_STATUS == 'failed' ]; then [ ! -f "${BUILD_DIR}/build.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/build.log || true [ ! -f "${BUILD_DIR}/tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/tests.log || true + [ ! -f "${BUILD_DIR}/compiler-rt-tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/compiler-rt-tests.log || true [ ! -f "${BUILD_DIR}/lld-tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/lld-tests.log || true fi script: @@ -60,18 +67,28 @@ before_script: - INST_PATH=$PWD/_install_dir - mkdir -p ${BUILD_PATH} - BUILD_HOST=$(gcc -dumpmachine) - # Build target libraries once when doing native build + # Config to build target libraries + # TODO: do not build core tools (clang, lld, binutils etc) + # when PACK_TOOLCHAIN is OFF and PACK_TARGET_LIBS is ON. + # Re-use core tools built in another job. + # LLVM-xxx - > - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then + if [ "${PACK_TARGET_LIBS}" == "ON" ]; then echo "Enable target libraries build" export USE_LIBC="newlib"; export USE_LIBCXX="libstdcxx"; export USE_RTLIB="compiler-rt;libgcc"; else - echo "Disable target libraries for cross-build" + echo "Disable target libraries" export USE_LIBC=none; export USE_LIBCXX=""; - export USE_RTLIB=""; + if [ "${RUN_CORE_TESTS}" == "ON" ]; then + # Need to have multilib dir structure to avoid test failures due to '-Wmissing-multilib'. + # So enable copying of libgcc from GNU toolchain. It is fast and not heavy. + export USE_RTLIB="libgcc"; + else + export USE_RTLIB=""; + fi fi # build toolchain core w/o any libs and GNU components - cmake $PWD/esp-llvm-embedded-toolchain -GNinja @@ -80,7 +97,7 @@ before_script: -DBINUTILS_REPO_URL="${GITLAB_SSH_SERVER}/${BINUTILS_REPO_PATH}.git" -DXTENSA_OVERLAYS_REPO_URL="${GITLAB_SSH_SERVER}/${XTENSA_OVERLAYS_REPO_PATH}.git" -DFETCHCONTENT_QUIET=OFF - -DESP_GNU_TOOLCHAIN_VER="13.2.0_20240305" + -DESP_GNU_TOOLCHAIN_VER=${ESP_GNU_TOOLCHAIN_VER} -DLLVM_TOOLCHAIN_CROSS_BUILD_MINGW=${CROSS_BUILD_MINGW} -DUSE_LIBC=${USE_LIBC} -DUSE_LIBCXX=${USE_LIBCXX} @@ -99,43 +116,52 @@ before_script: # Do not run unit tests for cross-builds. # Run as non-root user because permission tests fail when run by root. - > - if [[ "${CONF_HOST}" == "${BUILD_HOST}" && "${SKIP_TESTS}" != "ON" ]]; then - echo "Run LLVM/Clang unit tests"; + if [[ "${CONF_HOST}" == "${BUILD_HOST}" ]]; then export CUR_USER=$(whoami); useradd -m test_runner; chown -R test_runner ${BUILD_PATH}; - touch ${BUILD_PATH}/tests.log; - chmod o+w ${BUILD_PATH}/tests.log; - runuser -u test_runner -- ninja -C ${BUILD_PATH} check-all 2>&1 > ${BUILD_PATH}/tests.log; - echo "Run Compiler-RT unit tests"; - touch ${BUILD_PATH}/compiler-rt-tests.log; - chmod o+w ${BUILD_PATH}/compiler-rt-tests.log; - runuser -u test_runner -- ninja -C ${BUILD_PATH} check-compiler-rt 2>&1 > ${BUILD_PATH}/compiler-rt-tests.log; - echo "Run LLD unit tests"; - touch ${BUILD_PATH}/lld-tests.log; - chmod o+w ${BUILD_PATH}/lld-tests.log; - runuser -u test_runner -- ninja -C ${BUILD_PATH} check-lld 2>&1 > ${BUILD_PATH}/lld-tests.log; + if [[ "${RUN_CORE_TESTS}" == "ON" ]]; then + echo "Run LLVM/Clang unit tests"; + touch ${BUILD_PATH}/tests.log; + chmod o+w ${BUILD_PATH}/tests.log; + runuser -u test_runner -- ninja -C ${BUILD_PATH} check-all 2>&1 > ${BUILD_PATH}/tests.log; + echo "Run LLD unit tests"; + touch ${BUILD_PATH}/lld-tests.log; + chmod o+w ${BUILD_PATH}/lld-tests.log; + runuser -u test_runner -- ninja -C ${BUILD_PATH} check-lld 2>&1 > ${BUILD_PATH}/lld-tests.log; + fi + if [[ "${RUN_TARGET_LIB_TESTS}" == "ON" ]]; then + echo "Run Compiler-RT unit tests"; + touch ${BUILD_PATH}/compiler-rt-tests.log; + chmod o+w ${BUILD_PATH}/compiler-rt-tests.log; + runuser -u test_runner -- ninja -C ${BUILD_PATH} check-compiler-rt 2>&1 > ${BUILD_PATH}/compiler-rt-tests.log; + fi chown -R ${CUR_USER} ${BUILD_PATH}; fi - - if [ "${PACK_DISTRO}" == "OFF" ]; then exit 0; fi # pack distro - mkdir -p ${PWD}/${DIST_DIR} - - ninja -C ${BUILD_PATH} package-llvm-toolchain 2>&1 >> ${BUILD_PATH}/build.log - - DISTRO_PACK_PATH=$(ninja -C ${BUILD_PATH} print-llvm-toolchain-package-path | tail -n 1) - - echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" - - mv ${DISTRO_PACK_PATH} ${PWD}/${DIST_DIR}/ - - ARCHIVE_NAME=$(basename ${DISTRO_PACK_PATH}) - - echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/dist_name_${CONF_HOST}_${TARGET} + - > + if [[ "${PACK_TOOLCHAIN}" == "ON" ]]; then + ninja -C ${BUILD_PATH} package-llvm-toolchain 2>&1 >> ${BUILD_PATH}/build.log + DISTRO_PACK_PATH=$(ninja -C ${BUILD_PATH} print-llvm-toolchain-package-path | tail -n 1) + echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" + mv ${DISTRO_PACK_PATH} ${PWD}/${DIST_DIR}/ + ARCHIVE_NAME=$(basename ${DISTRO_PACK_PATH}) + echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/dist_name_${CONF_HOST} + fi # pack distro with standalone libs - - ninja -C ${BUILD_PATH} package-llvm-standalone-libs 2>&1 >> ${BUILD_PATH}/build.log - - DISTRO_PACK_PATH=$(ninja -C ${BUILD_PATH} print-llvm-standalone-libs-package-path | tail -n 1) - - echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" - - mv ${DISTRO_PACK_PATH} ${PWD}/${DIST_DIR}/ - - ARCHIVE_NAME=$(basename ${DISTRO_PACK_PATH}) - - echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/dist_name_libs_${CONF_HOST}_${TARGET} + - > + if [[ "${PACK_STANDALONE_LIBS}" == "ON" ]]; then + ninja -C ${BUILD_PATH} package-llvm-standalone-libs 2>&1 >> ${BUILD_PATH}/build.log + DISTRO_PACK_PATH=$(ninja -C ${BUILD_PATH} print-llvm-standalone-libs-package-path | tail -n 1) + echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" + mv ${DISTRO_PACK_PATH} ${PWD}/${DIST_DIR}/ + ARCHIVE_NAME=$(basename ${DISTRO_PACK_PATH}) + echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/dist_name_libs_${CONF_HOST} + fi # pack target libraries to be re-used in distros for other platforms - > - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then + if [[ "${PACK_TARGET_LIBS}" == "ON" ]]; then ninja -C ${BUILD_PATH} package-llvm-toolchain-target-libs 2>&1 >> ${BUILD_PATH}/build.log DISTRO_PACK_PATH=$(ninja -C ${BUILD_PATH} print-llvm-toolchain-target-libs-package-path | tail -n 1) echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" @@ -144,25 +170,6 @@ before_script: echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/target_libs_arch_name fi -build_and_test: - extends: .build_template - stage: test_build - variables: - TARGET: "Xtensa;RISCV" - SKIP_TESTS: "OFF" - PACK_DISTRO: "OFF" - CONF_HOST: "x86_64-linux-gnu" - USE_LINKER: "gold" - -.build_toolchain_template: - extends: .build_template - stage: build - parallel: - matrix: - - TARGET: - - Xtensa - - RISCV - .build_linux-gnu_template: extends: .build_toolchain_template variables: @@ -170,11 +177,24 @@ build_and_test: build_x86_64-linux-gnu: extends: .build_linux-gnu_template + stage: test_build variables: CONF_HOST: "x86_64-linux-gnu" + RUN_CORE_TESTS: "ON" + +build_target_libs: + extends: .build_linux-gnu_template + stage: build + variables: + CONF_HOST: "x86_64-linux-gnu" + RUN_TARGET_LIB_TESTS: "ON" + PACK_TARGET_LIBS: "ON" + PACK_TOOLCHAIN: "OFF" + PACK_STANDALONE_LIBS: "OFF" build_x86_64-w64-mingw32: extends: .build_toolchain_template + stage: build image: ${CROSS_WIN_IMAGE} variables: USE_LINKER: "" @@ -183,51 +203,30 @@ build_x86_64-w64-mingw32: build_arm-linux-gnueabihf: extends: .build_linux-gnu_template + stage: build image: ${CROSS_ARM_IMAGE} variables: CONF_HOST: "arm-linux-gnueabihf" build_aarch64-linux-gnu: extends: .build_linux-gnu_template + stage: build image: ${CROSS_ARM_IMAGE} variables: CONF_HOST: "aarch64-linux-gnu" build_x86_64-apple-darwin: extends: .build_toolchain_template + stage: build variables: CONF_HOST: "x86_64-apple-darwin21.1" build_aarch64-apple-darwin: extends: .build_toolchain_template + stage: build variables: CONF_HOST: "aarch64-apple-darwin21.1" -.pack_x86_64-linux-gnu_template: - stage: pack - tags: [ "amd64", "build" ] - artifacts: - paths: - - ${DIST_DIR}/ - when: always - expire_in: 1 day - script: - - pushd ${DIST_DIR} - - ls -l - - TARGET_LIBS_PACK_FILE=$(cat target_libs_arch_name) - - rm -f target_libs_arch_name ${TARGET_LIBS_PACK_FILE} - - ls -l - -pack_x86_64-linux-gnu_riscv: - extends: .pack_x86_64-linux-gnu_template - needs: - - job: "build_x86_64-linux-gnu: [RISCV]" - -pack_x86_64-linux-gnu_xtensa: - extends: .pack_x86_64-linux-gnu_template - needs: - - job: "build_x86_64-linux-gnu: [Xtensa]" - .pack_template: stage: pack tags: [ "amd64", "build" ] @@ -244,7 +243,7 @@ pack_x86_64-linux-gnu_xtensa: # update distro - pushd ${DIST_DIR} - ls -l - - DISTRO_PACK_FILE=$(cat dist_name_${CONF_HOST}_${TARGET}) + - DISTRO_PACK_FILE=$(cat dist_name_${CONF_HOST}) - echo "DISTRO_PACK_FILE=${DISTRO_PACK_FILE}" - ${UNPACK_TOOL} ${DISTRO_PACK_FILE} - DISTRO_PACK_DIR=$(tar tJf ${DISTRO_PACK_FILE} | sed -e 's@/.*@@' | uniq) @@ -263,115 +262,61 @@ pack_x86_64-linux-gnu_xtensa: # so that dir contains everything we need to re-pack after unpacking steps above - ${PACK_TOOL} ${DISTRO_PACK_FILE} ${DISTRO_PACK_DIR} - rm -rf ${DISTRO_PACK_DIR} - # remove x86_64-linux-gnu artifacts - - > - if [ "${CONF_HOST}" != "x86_64-linux-gnu" ]; then - DISTRO_PACK_FILE=$(cat dist_name_x86_64-linux-gnu_${TARGET}) - rm -f ${DISTRO_PACK_FILE} dist_name_x86_64-linux-gnu_${TARGET} - DISTRO_PACK_FILE=$(cat dist_name_libs_x86_64-linux-gnu_${TARGET}) - rm -f ${DISTRO_PACK_FILE} dist_name_libs_x86_64-linux-gnu_${TARGET} - fi - ls -l -pack_x86_64-w64-mingw32_riscv: +pack_x86_64-linux-gnu: extends: .pack_template needs: # needs target libs archive from native build job - - job: "build_x86_64-linux-gnu: [RISCV]" - - job: "build_x86_64-w64-mingw32: [RISCV]" + - job: "build_target_libs" + - job: "build_x86_64-linux-gnu" variables: - CONF_HOST: "x86_64-w64-mingw32" - TARGET: "RISCV" + CONF_HOST: "x86_64-linux-gnu" -pack_x86_64-w64-mingw32_xtensa: +pack_x86_64-w64-mingw32: extends: .pack_template needs: # needs target libs archive from native build job - - job: "build_x86_64-linux-gnu: [Xtensa]" - - job: "build_x86_64-w64-mingw32: [Xtensa]" + - job: "build_target_libs" + - job: "build_x86_64-w64-mingw32" variables: CONF_HOST: "x86_64-w64-mingw32" - TARGET: "Xtensa" -pack_arm-linux-gnueabihf_riscv: +pack_arm-linux-gnueabihf: extends: .pack_template needs: # needs target libs archive from native build job - - job: "build_x86_64-linux-gnu: [RISCV]" - - job: "build_arm-linux-gnueabihf: [RISCV]" + - job: "build_target_libs" + - job: "build_arm-linux-gnueabihf" variables: CONF_HOST: "arm-linux-gnueabihf" - TARGET: "RISCV" - -pack_arm-linux-gnueabihf_xtensa: - extends: .pack_template - needs: - # needs target libs archive from native build job - - job: "build_x86_64-linux-gnu: [Xtensa]" - - job: "build_arm-linux-gnueabihf: [Xtensa]" - variables: - CONF_HOST: "arm-linux-gnueabihf" - TARGET: "Xtensa" - -pack_aarch64-linux-gnu_riscv: - extends: .pack_template - needs: - # needs target libs archive from native build job - - job: "build_x86_64-linux-gnu: [RISCV]" - - job: "build_aarch64-linux-gnu: [RISCV]" - variables: - CONF_HOST: "aarch64-linux-gnu" - TARGET: "RISCV" -pack_aarch64-linux-gnu_xtensa: +pack_aarch64-linux-gnu: extends: .pack_template needs: # needs target libs archive from native build job - - job: "build_x86_64-linux-gnu: [Xtensa]" - - job: "build_aarch64-linux-gnu: [Xtensa]" + - job: "build_target_libs" + - job: "build_aarch64-linux-gnu" variables: CONF_HOST: "aarch64-linux-gnu" - TARGET: "Xtensa" -pack_x86_64-apple-darwin_riscv: +pack_x86_64-apple-darwin: extends: .pack_template needs: # needs target libs archive from native build job - - job: "build_x86_64-linux-gnu: [RISCV]" - - job: "build_x86_64-apple-darwin: [RISCV]" + - job: "build_target_libs" + - job: "build_x86_64-apple-darwin" variables: CONF_HOST: "x86_64-apple-darwin21.1" - TARGET: "RISCV" -pack_x86_64-apple-darwin_xtensa: +pack_aarch64-apple-darwin: extends: .pack_template needs: # needs target libs archive from native build job - - job: "build_x86_64-linux-gnu: [Xtensa]" - - job: "build_x86_64-apple-darwin: [Xtensa]" - variables: - CONF_HOST: "x86_64-apple-darwin21.1" - TARGET: "Xtensa" - -pack_aarch64-apple-darwin_riscv: - extends: .pack_template - needs: - # needs target libs archive from native build job - - job: "build_x86_64-linux-gnu: [RISCV]" - - job: "build_aarch64-apple-darwin: [RISCV]" + - job: "build_target_libs" + - job: "build_aarch64-apple-darwin" variables: CONF_HOST: "aarch64-apple-darwin21.1" - TARGET: "RISCV" - -pack_aarch64-apple-darwin_xtensa: - extends: .pack_template - needs: - # needs target libs archive from native build job - - job: "build_x86_64-linux-gnu: [Xtensa]" - - job: "build_aarch64-apple-darwin: [Xtensa]" - variables: - CONF_HOST: "aarch64-apple-darwin21.1" - TARGET: "Xtensa" .macos_codesign_template: stage: sign @@ -395,25 +340,15 @@ pack_aarch64-apple-darwin_xtensa: git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} - ./macos_codesign_notarization/run.sh -sign_x86_64-apple-darwin_riscv: - extends: .macos_codesign_template - needs: - - pack_x86_64-apple-darwin_riscv - -sign_x86_64-apple-darwin_xtensa: +sign_x86_64-apple-darwin: extends: .macos_codesign_template needs: - - pack_x86_64-apple-darwin_xtensa + - pack_x86_64-apple-darwin -sign_aarch64-apple-darwin_riscv: +sign_aarch64-apple-darwin: extends: .macos_codesign_template needs: - - pack_aarch64-apple-darwin_riscv - -sign_aarch64-apple-darwin_xtensa: - extends: .macos_codesign_template - needs: - - pack_aarch64-apple-darwin_xtensa + - pack_aarch64-apple-darwin upload_to_http: stage: private_deploy @@ -424,8 +359,7 @@ upload_to_http: # force the fetch strategy to clean old archives up in dist/ dir GIT_STRATEGY: fetch needs: - - job: pack_x86_64-linux-gnu_riscv - - job: pack_x86_64-linux-gnu_xtensa + - job: pack_x86_64-linux-gnu script: - cit_add_ssh_key "${HTTP_UPLOAD_KEY}" # List of archives @@ -449,19 +383,14 @@ upload_to_github: GITHUB_TOKEN: "${GH_TOKEN}" GITHUB_REPO: "${GH_REPO_HTTPS}" TAG: "${CI_COMMIT_TAG}" + SHA256_FILE: esp-clang-${CI_COMMIT_TAG}-checksum.sha256 needs: - - job: pack_x86_64-linux-gnu_riscv - - job: pack_x86_64-linux-gnu_xtensa - - job: pack_arm-linux-gnueabihf_riscv - - job: pack_arm-linux-gnueabihf_xtensa - - job: pack_aarch64-linux-gnu_riscv - - job: pack_aarch64-linux-gnu_xtensa - - job: pack_x86_64-w64-mingw32_riscv - - job: pack_x86_64-w64-mingw32_xtensa - - job: sign_x86_64-apple-darwin_riscv - - job: sign_x86_64-apple-darwin_xtensa - - job: sign_aarch64-apple-darwin_riscv - - job: sign_aarch64-apple-darwin_xtensa + - job: pack_x86_64-linux-gnu + - job: pack_arm-linux-gnueabihf + - job: pack_aarch64-linux-gnu + - job: pack_x86_64-w64-mingw32 + - job: sign_x86_64-apple-darwin + - job: sign_aarch64-apple-darwin before_script: [] script: - ls -l dist*/ @@ -471,6 +400,16 @@ upload_to_github: - FILES=$(find ${DIST_DIR} -name dist_name_\* -exec cat {} \+) - cd ${DIST_DIR} - ls -l $FILES + # Generate checksum file + - > + for n in $FILES; do + sz=$(stat -c%s "${n}") >> ${SHA256_FILE}; + printf "# %s: %s bytes\n" "${n}" "${sz}" >> ${SHA256_FILE}; + sha256sum -b "${n}" >> ${SHA256_FILE}; + done + # Append FILES with checksum file + - FILES=$(echo -e "${FILES}\n${SHA256_FILE}") + - ls -l $FILES # Upload archives - for n in ${FILES}; do hub release edit -m "" -a "${n}" "${TAG}"; done @@ -481,10 +420,10 @@ update_idf_tools: only: - tags variables: - TOOL_NAME: openocd - TOOL_MEMBERS: openocd-esp32 + TOOL_NAME: esp-clang + TOOL_MEMBERS: esp-clang TOOL_VERSION: ${CI_COMMIT_TAG} - TOOL_SHA256_URL: https://github.com/espressif/openocd-esp32/releases/download/${CI_COMMIT_TAG}/openocd-esp32-${CI_COMMIT_TAG}-checksum.sha256 + TOOL_SHA256_URL: https://github.com/espressif/openocd-esp32/releases/download/esp-clang-${CI_COMMIT_TAG}/esp-clang-${CI_COMMIT_TAG}-checksum.sha256 RN_SECTION: Toolchain trigger: project: idf/idf-tools-updater From 337b1fc1e36da2ab986b91fa8f41c8ecd1ffdfae Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Sat, 13 Apr 2024 00:21:19 +0300 Subject: [PATCH 233/289] esp/ci: Add special build to run tests --- .gitlab-ci.yml | 48 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 5 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cb8aa697a551d..43cd6b195caa6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -64,7 +64,6 @@ before_script: - *get_toolchain_build_scripts - LLVM_PROJECT_PATH=$PWD - BUILD_PATH=$PWD/${BUILD_DIR} - - INST_PATH=$PWD/_install_dir - mkdir -p ${BUILD_PATH} - BUILD_HOST=$(gcc -dumpmachine) # Config to build target libraries @@ -111,8 +110,7 @@ before_script: -DLLVM_PARALLEL_COMPILE_JOBS=2 -DCLANG_REPOSITORY_STRING="${GH_REPO_HTTPS}" -DCPACK_ARCHIVE_THREADS=0 - -B ${BUILD_PATH} 2>&1 - --install-prefix=$INST_PATH > ${BUILD_PATH}/build.log + -B ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log # Do not run unit tests for cross-builds. # Run as non-root user because permission tests fail when run by root. - > @@ -170,6 +168,47 @@ before_script: echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/target_libs_arch_name fi +build_and_test: + tags: [ "amd64", "build" ] + stage: test_build + artifacts: + paths: + - ${BUILD_DIR}/*.log + when: always + expire_in: 1 day + variables: + after_script: + # help to identify that build failed due to OOM + - > + if [ $CI_JOB_STATUS == 'failed' ]; then + [ ! -f "${BUILD_DIR}/build.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/build.log || true + [ ! -f "${BUILD_DIR}/tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/tests.log || true + [ ! -f "${BUILD_DIR}/lld-tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/lld-tests.log || true + fi + script: + - BUILD_PATH=$PWD/${BUILD_DIR} + - mkdir -p ${BUILD_PATH} + - cmake -G Ninja + -S llvm + -DLLVM_ENABLE_PROJECTS="clang;lld;clang-tools-extra" + -DCMAKE_BUILD_TYPE=Release + -DLLVM_ENABLE_ASSERTIONS=ON + -DLLDB_INCLUDE_TESTS=OFF + -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=Xtensa + -B ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log + - export CUR_USER=$(whoami); + - useradd -m test_runner; + - chown -R test_runner ${BUILD_PATH}; + - echo "Run LLVM/Clang unit tests"; + - touch ${BUILD_PATH}/tests.log; + - chmod o+w ${BUILD_PATH}/tests.log; + - runuser -u test_runner -- ninja -C ${BUILD_PATH} check-all 2>&1 > ${BUILD_PATH}/tests.log; + - echo "Run LLD unit tests"; + - touch ${BUILD_PATH}/lld-tests.log; + - chmod o+w ${BUILD_PATH}/lld-tests.log; + - runuser -u test_runner -- ninja -C ${BUILD_PATH} check-lld 2>&1 > ${BUILD_PATH}/lld-tests.log; + - chown -R ${CUR_USER} ${BUILD_PATH}; + .build_linux-gnu_template: extends: .build_toolchain_template variables: @@ -177,10 +216,9 @@ before_script: build_x86_64-linux-gnu: extends: .build_linux-gnu_template - stage: test_build + stage: build variables: CONF_HOST: "x86_64-linux-gnu" - RUN_CORE_TESTS: "ON" build_target_libs: extends: .build_linux-gnu_template From 443eac056eff870b23bd822a84f60623f35d7d53 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 2 Oct 2024 02:45:53 +0300 Subject: [PATCH 234/289] [Xtensa] Fix LOOP* pseudo instructions. --- llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp | 2 ++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp index b9ddf19750fe9..169ee494f6536 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp @@ -381,6 +381,8 @@ bool XtensaInstrInfo::isBranchOffsetInRange(unsigned BranchOp, case Xtensa::JX: return true; case Xtensa::LOOPEND: + assert((BrOffset <= 0) && "Wrong hardware loop"); + return true; case Xtensa::LOOPBR: BrOffset += 4; assert((BrOffset <= 0) && "Wrong hardware loop"); diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index ae0c6cabacd4d..aa2775a0cffe4 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1428,13 +1428,13 @@ let isTerminator = 1, isBarrier = 1, hasSideEffects = 1, Size = 3 in { } // LOOPSTART pseudo instruction reserves 9 bytes for LOOP operation and NOP operations for possible alignment. -let isTerminator = 1, isBarrier = 1, hasSideEffects = 1, Size = 9 in { +let hasSideEffects = 1, Size = 9 in { def LOOPSTART : Pseudo<(outs), (ins AR:$s, brtarget:$target), "!loopstart $s, $target", []>; } // LOOPEND pseudo instruction reserves 6 bytes for Jump and NOP operations. -let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 0, Size = 6 in { +let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 0, isNotDuplicable = 1, Size = 6 in { def LOOPEND : Pseudo<(outs), (ins brtarget:$target), "!loopend $target", [(Xtensa_loopend bb:$target)]>; } From 93a4e7dca1c645c291f78f20a7b9a05e57b1d6e8 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 3 May 2024 11:23:30 +0300 Subject: [PATCH 235/289] esp/ci: Upgrade docker image for Windows build to use MinGW 10 --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 43cd6b195caa6..02aadbc944b06 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -14,7 +14,7 @@ variables: ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "master" ESP_GNU_TOOLCHAIN_VER: "13.2.0_20240305" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 - CROSS_WIN_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-win:1 + CROSS_WIN_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-win:2 DIST_DIR: "dist" BUILD_DIR: "build" From 2cc4c5770ab2bf650c4f625c11be149ab3ba5033 Mon Sep 17 00:00:00 2001 From: Stefan Stipanovic Date: Thu, 18 Apr 2024 10:44:37 +0200 Subject: [PATCH 236/289] [Xtensa] Fix issue with adding scavenging frame index --- llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp index b9b3bb0ad3a1d..e40e73d4b84f3 100644 --- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp @@ -376,7 +376,8 @@ void XtensaFrameLowering::processFunctionBeforeFrameFinalized( const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); unsigned Size = TRI.getSpillSize(RC); Align Alignment = TRI.getSpillAlign(RC); - for (int i = 0; i < NeedRegs; i++) + // If NeedsRegs == 0, we still need a spill slot + for (int i = 0; i <= NeedRegs; i++) RS->addScavengingFrameIndex( MFI.CreateStackObject(Size, Alignment, false)); } From c7d3e98963b8f71174d38459cac684cc87f79883 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 14 May 2024 23:24:55 +0300 Subject: [PATCH 237/289] esp/ci: Separate checksum files for toolchain and standalone libs distros --- .gitlab-ci.yml | 47 +++++++++++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 02aadbc944b06..b28cf30282426 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -155,7 +155,7 @@ before_script: echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" mv ${DISTRO_PACK_PATH} ${PWD}/${DIST_DIR}/ ARCHIVE_NAME=$(basename ${DISTRO_PACK_PATH}) - echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/dist_name_libs_${CONF_HOST} + echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/libs_dist_name_${CONF_HOST} fi # pack target libraries to be re-used in distros for other platforms - > @@ -408,6 +408,17 @@ upload_to_http: # Show info - echo -e "\nArchives were published there:\n\n$(for n in ${FILES}; do echo "${HTTP_PUBLIC_DIR}/ct-ng/llvm-builds/${n}"; done)\n" +.gen_checksum_file: &gen_checksum_file | + ls -l ${FILES} + for n in ${FILES}; do + sz=$(stat -c%s "${n}") >> ${SHA256_FILE}; + printf "# %s: %s bytes\n" "${n}" "${sz}" >> ${SHA256_FILE}; + sha256sum -b "${n}" >> ${SHA256_FILE}; + done + # Append FILES with checksum file + FILES=$(echo -e "${FILES}\n${SHA256_FILE}") + ls -l ${FILES} + upload_to_github: stage: public_deploy when: manual @@ -421,7 +432,8 @@ upload_to_github: GITHUB_TOKEN: "${GH_TOKEN}" GITHUB_REPO: "${GH_REPO_HTTPS}" TAG: "${CI_COMMIT_TAG}" - SHA256_FILE: esp-clang-${CI_COMMIT_TAG}-checksum.sha256 + TOOLCHAIN_SHA256_FILE: clang-${CI_COMMIT_TAG}-checksum.sha256 + LIBS_SHA256_FILE: libs-clang-${CI_COMMIT_TAG}-checksum.sha256 needs: - job: pack_x86_64-linux-gnu - job: pack_arm-linux-gnueabihf @@ -431,25 +443,24 @@ upload_to_github: - job: sign_aarch64-apple-darwin before_script: [] script: - - ls -l dist*/ + - ls -l ${DIST_DIR} + - cd ${DIST_DIR} - git remote add github ${GH_REPO_HTTPS} - hub release show ${TAG} || { echo "Please create a release on GitHub with ${TAG} tag at first"; exit 1; } + # Generate checksum file for toolchain # List of archives - - FILES=$(find ${DIST_DIR} -name dist_name_\* -exec cat {} \+) - - cd ${DIST_DIR} - - ls -l $FILES - # Generate checksum file - - > - for n in $FILES; do - sz=$(stat -c%s "${n}") >> ${SHA256_FILE}; - printf "# %s: %s bytes\n" "${n}" "${sz}" >> ${SHA256_FILE}; - sha256sum -b "${n}" >> ${SHA256_FILE}; - done - # Append FILES with checksum file - - FILES=$(echo -e "${FILES}\n${SHA256_FILE}") - - ls -l $FILES + - FILES=$(find ${PWD} -name dist_name_\* -exec cat {} \+) + - SHA256_FILE=${TOOLCHAIN_SHA256_FILE} + - *gen_checksum_file + - DIST_FILES=${FILES} + # Generate checksum file for standalone libraries + - FILES=$(find ${PWD} -name libs_dist_name_\* -exec cat {} \+) + - SHA256_FILE=${LIBS_SHA256_FILE} + - *gen_checksum_file + - DIST_FILES=$(echo -e "${DIST_FILES}\n${FILES}") + - ls -l ${DIST_FILES} # Upload archives - - for n in ${FILES}; do hub release edit -m "" -a "${n}" "${TAG}"; done + - for n in ${DIST_FILES}; do hub release edit -m "" -a "${n}" "${TAG}"; done update_idf_tools: stage: update_idf_tools @@ -461,7 +472,7 @@ update_idf_tools: TOOL_NAME: esp-clang TOOL_MEMBERS: esp-clang TOOL_VERSION: ${CI_COMMIT_TAG} - TOOL_SHA256_URL: https://github.com/espressif/openocd-esp32/releases/download/esp-clang-${CI_COMMIT_TAG}/esp-clang-${CI_COMMIT_TAG}-checksum.sha256 + TOOL_SHA256_URL: https://github.com/espressif/llvm-project/releases/download/${CI_COMMIT_TAG}/clang-${CI_COMMIT_TAG}-checksum.sha256 RN_SECTION: Toolchain trigger: project: idf/idf-tools-updater From 052f6cf71cd898a42ec513eee8f7e0d12a568624 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 14 May 2024 20:27:27 +0300 Subject: [PATCH 238/289] [RISCV] Add user trap CSRs --- llvm/lib/Target/RISCV/RISCVSystemOperands.td | 16 +++ llvm/test/MC/RISCV/rv32e-valid.s | 4 +- llvm/test/MC/RISCV/rv32i-valid.s | 4 +- llvm/test/MC/RISCV/user-csr-names.s | 116 +++++++++++++++++++ 4 files changed, 136 insertions(+), 4 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVSystemOperands.td b/llvm/lib/Target/RISCV/RISCVSystemOperands.td index 5f51775ea64a9..71565b42967c8 100644 --- a/llvm/lib/Target/RISCV/RISCVSystemOperands.td +++ b/llvm/lib/Target/RISCV/RISCVSystemOperands.td @@ -338,6 +338,22 @@ let AltName = "dscratch" in def : SysReg<"dscratch0", 0x7B2>; def : SysReg<"dscratch1", 0x7B3>; +//===----------------------------------------------------------------------===// +// User Trap Setup +//===----------------------------------------------------------------------===// +def : SysReg<"ustatus", 0x000>; +def : SysReg<"uie", 0x004>; +def : SysReg<"utvec", 0x005>; + +//===----------------------------------------------------------------------===// +// User Trap Handling +//===----------------------------------------------------------------------===// +def : SysReg<"uscratch", 0x040>; +def : SysReg<"uepc", 0x041>; +def : SysReg<"ucause", 0x042>; +def : SysReg<"utval", 0x043>; +def : SysReg<"uip", 0x044>; + //===----------------------------------------------------------------------===// // User Vector CSRs //===----------------------------------------------------------------------===// diff --git a/llvm/test/MC/RISCV/rv32e-valid.s b/llvm/test/MC/RISCV/rv32e-valid.s index ccb47f1557c69..1c252deec83ee 100644 --- a/llvm/test/MC/RISCV/rv32e-valid.s +++ b/llvm/test/MC/RISCV/rv32e-valid.s @@ -116,9 +116,9 @@ csrrw t0, 0xfff, t1 csrrs s0, 0xc00, x0 # CHECK-ASM-AND-OBJ: csrrs s0, fflags, a5 csrrs s0, 0x001, a5 -# CHECK-ASM-AND-OBJ: csrrc sp, 0, ra +# CHECK-ASM-AND-OBJ: csrrc sp, ustatus, ra csrrc sp, 0x000, ra -# CHECK-ASM-AND-OBJ: csrrwi a5, 0, 0 +# CHECK-ASM-AND-OBJ: csrrwi a5, ustatus, 0 csrrwi a5, 0x000, 0 # CHECK-ASM-AND-OBJ: csrrsi t2, 4095, 31 csrrsi t2, 0xfff, 31 diff --git a/llvm/test/MC/RISCV/rv32i-valid.s b/llvm/test/MC/RISCV/rv32i-valid.s index f03c2e1c23cf3..8a561eba43b7e 100644 --- a/llvm/test/MC/RISCV/rv32i-valid.s +++ b/llvm/test/MC/RISCV/rv32i-valid.s @@ -361,10 +361,10 @@ csrrs s0, 0xc00, x0 # CHECK-ASM-AND-OBJ: csrrs s3, fflags, s5 # CHECK-ASM: encoding: [0xf3,0xa9,0x1a,0x00] csrrs s3, 0x001, s5 -# CHECK-ASM-AND-OBJ: csrrc sp, 0, ra +# CHECK-ASM-AND-OBJ: csrrc sp, ustatus, ra # CHECK-ASM: encoding: [0x73,0xb1,0x00,0x00] csrrc sp, 0x000, ra -# CHECK-ASM-AND-OBJ: csrrwi a5, 0, 0 +# CHECK-ASM-AND-OBJ: csrrwi a5, ustatus, 0 # CHECK-ASM: encoding: [0xf3,0x57,0x00,0x00] csrrwi a5, 0x000, 0 # CHECK-ASM-AND-OBJ: csrrsi t2, 4095, 31 diff --git a/llvm/test/MC/RISCV/user-csr-names.s b/llvm/test/MC/RISCV/user-csr-names.s index f49eace659ac9..6dccbdd2fc38b 100644 --- a/llvm/test/MC/RISCV/user-csr-names.s +++ b/llvm/test/MC/RISCV/user-csr-names.s @@ -480,3 +480,119 @@ csrrs t2, 0xC1E, zero csrrs t1, hpmcounter31, zero # uimm12 csrrs t2, 0xC1F, zero + +################################## +# User Trap Setup +################################## + +# ustatus +# name +# CHECK-INST: csrrs t1, ustatus, zero +# CHECK-ENC: encoding: [0x73,0x23,0x00,0x00] +# CHECK-INST-ALIAS: csrr t1, ustatus +# uimm12 +# CHECK-INST: csrrs t2, ustatus, zero +# CHECK-ENC: encoding: [0xf3,0x23,0x00,0x00] +# CHECK-INST-ALIAS: csrr t2, ustatus +# name +csrrs t1, ustatus, zero +# uimm12 +csrrs t2, 0x000, zero + +# uie +# name +# CHECK-INST: csrrs t1, uie, zero +# CHECK-ENC: encoding: [0x73,0x23,0x40,0x00] +# CHECK-INST-ALIAS: csrr t1, uie +# uimm12 +# CHECK-INST: csrrs t2, uie, zero +# CHECK-ENC: encoding: [0xf3,0x23,0x40,0x00] +# CHECK-INST-ALIAS: csrr t2, uie +# name +csrrs t1, uie, zero +# uimm12 +csrrs t2, 0x004, zero + +# utvec +# name +# CHECK-INST: csrrs t1, utvec, zero +# CHECK-ENC: encoding: [0x73,0x23,0x50,0x00] +# CHECK-INST-ALIAS: csrr t1, utvec +# uimm12 +# CHECK-INST: csrrs t2, utvec, zero +# CHECK-ENC: encoding: [0xf3,0x23,0x50,0x00] +# CHECK-INST-ALIAS: csrr t2, utvec +# name +csrrs t1, utvec, zero +# uimm12 +csrrs t2, 0x005, zero + +# uscratch +# name +# CHECK-INST: csrrs t1, uscratch, zero +# CHECK-ENC: encoding: [0x73,0x23,0x00,0x04] +# CHECK-INST-ALIAS: csrr t1, uscratch +# uimm12 +# CHECK-INST: csrrs t2, uscratch, zero +# CHECK-ENC: encoding: [0xf3,0x23,0x00,0x04] +# CHECK-INST-ALIAS: csrr t2, uscratch +# name +csrrs t1, uscratch, zero +# uimm12 +csrrs t2, 0x040, zero + +# uepc +# name +# CHECK-INST: csrrs t1, uepc, zero +# CHECK-ENC: encoding: [0x73,0x23,0x10,0x04] +# CHECK-INST-ALIAS: csrr t1, uepc +# uimm12 +# CHECK-INST: csrrs t2, uepc, zero +# CHECK-ENC: encoding: [0xf3,0x23,0x10,0x04] +# CHECK-INST-ALIAS: csrr t2, uepc +# name +csrrs t1, uepc, zero +# uimm12 +csrrs t2, 0x041, zero + +# ucause +# name +# CHECK-INST: csrrs t1, ucause, zero +# CHECK-ENC: encoding: [0x73,0x23,0x20,0x04] +# CHECK-INST-ALIAS: csrr t1, ucause +# uimm12 +# CHECK-INST: csrrs t2, ucause, zero +# CHECK-ENC: encoding: [0xf3,0x23,0x20,0x04] +# CHECK-INST-ALIAS: csrr t2, ucause +# name +csrrs t1, ucause, zero +# uimm12 +csrrs t2, 0x042, zero + +# utval +# name +# CHECK-INST: csrrs t1, utval, zero +# CHECK-ENC: encoding: [0x73,0x23,0x30,0x04] +# CHECK-INST-ALIAS: csrr t1, utval +# uimm12 +# CHECK-INST: csrrs t2, utval, zero +# CHECK-ENC: encoding: [0xf3,0x23,0x30,0x04] +# CHECK-INST-ALIAS: csrr t2, utval +# name +csrrs t1, utval, zero +# uimm12 +csrrs t2, 0x043, zero + +# uip +# name +# CHECK-INST: csrrs t1, uip, zero +# CHECK-ENC: encoding: [0x73,0x23,0x40,0x04] +# CHECK-INST-ALIAS: csrr t1, uip +# uimm12 +# CHECK-INST: csrrs t2, uip, zero +# CHECK-ENC: encoding: [0xf3,0x23,0x40,0x04] +# CHECK-INST-ALIAS: csrr t2, uip +# name +csrrs t1, uip, zero +# uimm12 +csrrs t2, 0x044, zero From 730507e48b0566bd028add618e863fb750f50c64 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 5 Jul 2024 17:13:34 +0300 Subject: [PATCH 239/289] [Xtensa] Add Xtensa builtins into last builtin ID calculation --- clang/include/clang/Basic/TargetBuiltins.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h index 55ee7715e02a7..7370d46b45f66 100644 --- a/clang/include/clang/Basic/TargetBuiltins.h +++ b/clang/include/clang/Basic/TargetBuiltins.h @@ -383,7 +383,7 @@ namespace clang { PPC::LastTSBuiltin, NVPTX::LastTSBuiltin, AMDGPU::LastTSBuiltin, X86::LastTSBuiltin, VE::LastTSBuiltin, RISCV::LastTSBuiltin, Hexagon::LastTSBuiltin, Mips::LastTSBuiltin, XCore::LastTSBuiltin, - SystemZ::LastTSBuiltin, WebAssembly::LastTSBuiltin}); + SystemZ::LastTSBuiltin, WebAssembly::LastTSBuiltin, Xtensa::LastTSBuiltin}); } // end namespace clang. From e112a97db408f02f1d3a7c0e0ff60532dbbf160b Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 8 Oct 2024 12:38:46 +0300 Subject: [PATCH 240/289] [Xtensa] Support for asm underscore prefix --- .../Xtensa/AsmParser/XtensaAsmParser.cpp | 18 ++ .../Disassembler/XtensaDisassembler.cpp | 8 + .../Xtensa/MCTargetDesc/XtensaInstPrinter.cpp | 11 ++ .../Xtensa/MCTargetDesc/XtensaInstPrinter.h | 1 + .../MCTargetDesc/XtensaMCCodeEmitter.cpp | 16 ++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 117 ++++++++++++- llvm/lib/Target/Xtensa/XtensaOperands.td | 7 + llvm/test/MC/Xtensa/Core/arith.s | 10 ++ llvm/test/MC/Xtensa/Core/branch.s | 156 ++++++++++++++++++ llvm/test/MC/Xtensa/Core/invalid.s | 7 +- llvm/test/MC/Xtensa/Core/memory.s | 10 ++ llvm/test/MC/Xtensa/Core/shift.s | 5 + 12 files changed, 362 insertions(+), 4 deletions(-) diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 0cb171324fabb..9a5d645dc68e2 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -301,6 +301,8 @@ struct XtensaOperand : public MCParsedAsmOperand { bool isImm8n_7() const { return isImm(-8, 7); } bool isShimm1_31() const { return isImm(1, 31); } + + bool isShimm0_31() const { return isImm(0, 31); } bool isImm16_31() const { return isImm(16, 31); } @@ -632,6 +634,19 @@ bool XtensaAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, Inst = TmpInst; } } break; + case Xtensa::SLLI: { + uint32_t ImmOp32 = static_cast(Inst.getOperand(2).getImm()); + int64_t Imm = ImmOp32; + if (Imm == 0) { + MCInst TmpInst; + TmpInst.setLoc(IDLoc); + TmpInst.setOpcode(Xtensa::OR); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + TmpInst.addOperand(Inst.getOperand(1)); + Inst = TmpInst; + } + } break; default: break; } @@ -710,6 +725,9 @@ bool XtensaAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, case Match_InvalidShimm1_31: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [1, 31]"); + case Match_InvalidShimm0_31: + return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), + "expected immediate in range [0, 31]"); case Match_InvalidUimm4: return Error(RefineErrorLoc(IDLoc, Operands, ErrorInfo), "expected immediate in range [0, 15]"); diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index 81c02b5de4652..e613c56e146d0 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -659,6 +659,14 @@ static DecodeStatus decodeShimm1_31Operand(MCInst &Inst, uint64_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeShimm0_31Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<5>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(32 - Imm)); + return MCDisassembler::Success; +} + static DecodeStatus decodeImm7_22Operand(MCInst &Inst, uint64_t Imm, int64_t Address, const void *Decoder) { diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp index a8c1aaed10b1a..7a5809c7059f7 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.cpp @@ -264,6 +264,17 @@ void XtensaInstPrinter::printShimm1_31_AsmOperand(const MCInst *MI, int OpNum, printOperand(MI, OpNum, O); } +void XtensaInstPrinter::printShimm0_31_AsmOperand(const MCInst *MI, int OpNum, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 31) && + "Invalid argument, value must be in range [0,31]"); + O << Value; + } else + printOperand(MI, OpNum, O); +} + void XtensaInstPrinter::printImm1_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O) { if (MI->getOperand(OpNum).isImm()) { diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h index 756554bcf09b9..174fb51a6f054 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaInstPrinter.h @@ -59,6 +59,7 @@ class XtensaInstPrinter : public MCInstPrinter { void printUimm4_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printUimm5_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printShimm1_31_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); + void printShimm0_31_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printImm1_16_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printImm1n_15_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); void printImm32n_95_AsmOperand(const MCInst *MI, int OpNum, raw_ostream &O); diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp index 958dedbaaa2e5..3ac46b07e04dd 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCCodeEmitter.cpp @@ -135,6 +135,10 @@ class XtensaMCCodeEmitter : public MCCodeEmitter { SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + uint32_t getShimm0_31OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + uint32_t getB4constOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; @@ -454,6 +458,18 @@ XtensaMCCodeEmitter::getShimm1_31OpValue(const MCInst &MI, unsigned OpNo, return ((32 - Res) & 0x1f); } +uint32_t +XtensaMCCodeEmitter::getShimm0_31OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint32_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 31)) && "Unexpected operand value!"); + + return ((32 - Res) & 0x1f); +} + uint32_t XtensaMCCodeEmitter::getImm1_16OpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index aa2775a0cffe4..e7b68e1f2cea1 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -40,6 +40,8 @@ def AND : ArithLogic_RRR<0x01, 0x00, "and", and, 1>; def OR : ArithLogic_RRR<0x02, 0x00, "or", or, 1>; def XOR : ArithLogic_RRR<0x03, 0x00, "xor", xor, 1>; +def : InstAlias<"_add\t$r, $s, $t", (ADD AR:$r, AR:$s, AR:$t)>; + class ADDX oper, string instrAsm, list pattern> : RRR_Inst<0x00, 0x00, oper, (outs AR:$r), (ins AR:$s, AR:$t), instrAsm#"\t$r, $s, $t", pattern>; @@ -67,6 +69,8 @@ def ADDI : RRI8_Inst<0x02, (outs AR:$t), (ins AR:$s, imm8:$imm8), let r = 0x0C; } +def : InstAlias<"_addi\t$r, $s, $imm8", (ADDI AR:$r, AR:$s, imm8:$imm8)>; + def ADDMI : RRI8_Inst<0x02, (outs AR:$t), (ins AR:$s, imm8_sh8:$imm_sh8), "addmi\t$t, $s, $imm_sh8", [(set AR:$t, (add AR:$s, imm8_sh8:$imm_sh8))]> { @@ -95,6 +99,17 @@ def MOVI : RRI8_Inst<0x02, (outs AR:$t), (ins imm12m:$imm), let r = 0xa; } +def _MOVI : RRI8_Inst<0x02, (outs AR:$t), (ins imm12:$imm), + "_movi\t$t, $imm", + [(set AR:$t, imm12m:$imm)]> { + let DecoderNamespace = "Fallback"; + bits<12> imm; + + let imm8{7-0} = imm{7-0}; + let s{3-0} = imm{11-8}; + let r = 0xa; +} + def MOVEQZ : RRR_Inst<0x00, 0x03, 0x08, (outs AR:$r), (ins AR:$s, AR:$t), "moveqz\t$r, $s, $t", []>; def MOVNEZ : RRR_Inst<0x00, 0x03, 0x09, (outs AR:$r), (ins AR:$s, AR:$t), @@ -179,9 +194,18 @@ def _SRLI : RRR_Inst<0x00, 0x01, 0x04, (outs AR:$r), (ins AR:$t, uimm4:$sa), let s = sa; } -def SLLI : RRR_Inst<0x00, 0x01, 0x00, (outs AR:$r), (ins AR:$s, shimm1_31:$sa), - "slli\t$r, $s, $sa", +def SLLI : RRR_Inst<0x00, 0x01, 0x00, (outs AR:$r), (ins AR:$s, shimm0_31:$sa), + "slli\t$r, $s, $sa", []> { + bits<5> sa; + + let Inst{20} = sa{4}; + let t = sa{3-0}; +} + +def _SLLI : RRR_Inst<0x00, 0x01, 0x00, (outs AR:$r), (ins AR:$s, shimm1_31:$sa), + "_slli\t$r, $s, $sa", [(set AR:$r, (shl AR:$s, shimm1_31:$sa))]> { + let DecoderNamespace = "Fallback"; bits<5> sa; let Inst{20} = sa{4}; @@ -228,6 +252,9 @@ def L8UI : Load_RRI8<0x00, "l8ui", zextloadi8, addr_ish1, mem8>; def L16SI : Load_RRI8<0x09, "l16si", sextloadi16, addr_ish2, mem16>; def L16UI : Load_RRI8<0x01, "l16ui", zextloadi16, addr_ish2, mem16>; def L32I : Load_RRI8<0x02, "l32i", load, addr_ish4, mem32>; +def _L32I : Load_RRI8<0x02, "_l32i", load, addr_ish4, mem32> { + let DecoderNamespace = "Fallback"; +} // Store instructions let mayStore = 1, usesCustomInserter = 1 in { @@ -247,6 +274,9 @@ let mayStore = 1, usesCustomInserter = 1 in { def S8I : Store_II8<0x04, "s8i", truncstorei8, addr_ish1, mem8>; def S16I : Store_II8<0x05, "s16i", truncstorei16, addr_ish2, mem16>; def S32I : Store_II8<0x06, "s32i", store, addr_ish4, mem32>; +def _S32I : Store_II8<0x06, "_s32i", store, addr_ish4, mem32> { + let DecoderNamespace = "Fallback"; +} def L32R : RI16_Inst<0x01, (outs AR:$t), (ins L32Rtarget:$label), "l32r\t$t, $label", []> { @@ -348,6 +378,14 @@ def BLT : Branch_RR<0x02, "blt", SETLT>; def BGEU : Branch_RR<0x0B, "bgeu", SETUGE>; def BLTU : Branch_RR<0x03, "bltu", SETULT>; +// RR aliases +def : InstAlias<"_beq\t$s, $t, $target", (BEQ AR:$s, AR:$t, brtarget:$target)>; +def : InstAlias<"_bne\t$s, $t, $target", (BNE AR:$s, AR:$t, brtarget:$target)>; +def : InstAlias<"_bge\t$s, $t, $target", (BGE AR:$s, AR:$t, brtarget:$target)>; +def : InstAlias<"_blt\t$s, $t, $target", (BLT AR:$s, AR:$t, brtarget:$target)>; +def : InstAlias<"_bgeu\t$s, $t, $target", (BGEU AR:$s, AR:$t, brtarget:$target)>; +def : InstAlias<"_bltu\t$s, $t, $target", (BLTU AR:$s, AR:$t, brtarget:$target)>; + def BEQI : Branch_RI<0x02, "beqi", SETEQ>; def BNEI : Branch_RI<0x06, "bnei", SETNE>; def BGEI : Branch_RI<0x0E, "bgei", SETGE>; @@ -355,11 +393,25 @@ def BLTI : Branch_RI<0x0A, "blti", SETLT>; def BGEUI : Branch_RIU<0x0F, "bgeui", SETUGE>; def BLTUI : Branch_RIU<0x0B, "bltui", SETULT>; +// RI aliases +def : InstAlias<"_beqi\t$s, $imm, $target", (BEQI AR:$s, b4const:$imm, brtarget:$target)>; +def : InstAlias<"_bnei\t$s, $imm, $target", (BNEI AR:$s, b4const:$imm, brtarget:$target)>; +def : InstAlias<"_bgei\t$s, $imm, $target", (BGEI AR:$s, b4const:$imm, brtarget:$target)>; +def : InstAlias<"_blti\t$s, $imm, $target", (BLTI AR:$s, b4const:$imm, brtarget:$target)>; +def : InstAlias<"_bgeui\t$s, $imm, $target", (BGEUI AR:$s, b4constu:$imm, brtarget:$target)>; +def : InstAlias<"_bltui\t$s, $imm, $target", (BLTUI AR:$s, b4constu:$imm, brtarget:$target)>; + def BEQZ : Branch_RZ<0x01, 0x00, "beqz", SETEQ>; def BNEZ : Branch_RZ<0x01, 0x01, "bnez", SETNE>; def BGEZ : Branch_RZ<0x01, 0x03, "bgez", SETGE>; def BLTZ : Branch_RZ<0x01, 0x02, "bltz", SETLT>; +// RZ aliases +def : InstAlias<"_beqz\t$s, $target", (BEQZ AR:$s, brtarget:$target)>; +def : InstAlias<"_bnez\t$s, $target", (BNEZ AR:$s, brtarget:$target)>; +def : InstAlias<"_bgez\t$s, $target", (BGEZ AR:$s, brtarget:$target)>; +def : InstAlias<"_bltz\t$s, $target", (BLTZ AR:$s, brtarget:$target)>; + def BALL : RRI8_Inst<0x07, (outs), (ins AR:$s, AR:$t, brtarget:$target), "ball\t$s, $t, $target", []> { @@ -369,6 +421,8 @@ def BALL : RRI8_Inst<0x07, (outs), let imm8 = target; } +def : InstAlias<"_ball\t$s, $t, $target", (BALL AR:$s, AR:$t, brtarget:$target)>; + def BANY : RRI8_Inst<0x07, (outs), (ins AR:$s, AR:$t, brtarget:$target), "bany\t$s, $t, $target", []> { @@ -378,6 +432,8 @@ def BANY : RRI8_Inst<0x07, (outs), let imm8 = target; } +def : InstAlias<"_bany\t$s, $t, $target", (BANY AR:$s, AR:$t, brtarget:$target)>; + def BBC : RRI8_Inst<0x07, (outs), (ins AR:$s, AR:$t, brtarget:$target), "bbc\t$s, $t, $target", []> { @@ -387,6 +443,8 @@ def BBC : RRI8_Inst<0x07, (outs), let imm8 = target; } +def : InstAlias<"_bbc\t$s, $t, $target", (BBC AR:$s, AR:$t, brtarget:$target)>; + def BBS : RRI8_Inst<0x07, (outs), (ins AR:$s, AR:$t, brtarget:$target), "bbs\t$s, $t, $target", []> { @@ -396,6 +454,8 @@ def BBS : RRI8_Inst<0x07, (outs), let imm8 = target; } +def : InstAlias<"_bbs\t$s, $t, $target", (BBS AR:$s, AR:$t, brtarget:$target)>; + def BNALL : RRI8_Inst<0x07, (outs), (ins AR:$s, AR:$t, brtarget:$target), "bnall\t$s, $t, $target", []> { @@ -405,6 +465,8 @@ def BNALL : RRI8_Inst<0x07, (outs), let imm8 = target; } +def : InstAlias<"_bnall\t$s, $t, $target", (BNALL AR:$s, AR:$t, brtarget:$target)>; + def BNONE : RRI8_Inst<0x07, (outs), (ins AR:$s, AR:$t, brtarget:$target), "bnone\t$s, $t, $target", []> { @@ -414,6 +476,8 @@ def BNONE : RRI8_Inst<0x07, (outs), let imm8 = target; } +def : InstAlias<"_bnone\t$s, $t, $target", (BNONE AR:$s, AR:$t, brtarget:$target)>; + def BBCI : RRI8_Inst<0x07, (outs), (ins AR:$s, uimm5:$imm, brtarget:$target), "bbci\t$s, $imm, $target", []> { @@ -444,6 +508,8 @@ def BBSI : RRI8_Inst<0x07, (outs), let imm8 = target; } +def : InstAlias<"_bbsi\t$s, $imm, $target", (BBSI AR:$s, uimm5:$imm, brtarget:$target)>; + def : InstAlias<"bbsi.l\t$s, $imm, $target", (BBSI AR:$s, uimm5:$imm, brtarget:$target)>; def : InstAlias<"_bbsi.l\t$s, $imm, $target", (BBSI AR:$s, uimm5:$imm, brtarget:$target)>; @@ -495,6 +561,8 @@ let isReturn = 1, isTerminator = 1, } } +def : InstAlias<"_ret", (RET)>; + // Call patterns def : Pat<(Xtensa_call (i32 tglobaladdr:$dst)), (CALL0 tglobaladdr:$dst)>; @@ -573,6 +641,8 @@ def NOP : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), let t = 0x0f; } +def : InstAlias<"_nop", (NOP)>; + def WSR : RSR_Inst<0x00, 0x03, 0x01, (outs SR:$sr), (ins AR:$t), "wsr\t$t, $sr", []>; @@ -673,6 +743,8 @@ class ArithLogic_RRRN oper0, string instrAsm, def ADD_N : ArithLogic_RRRN<0x0a, "add.n", add, 1>; +def : InstAlias<"_add.n\t$r, $s, $t", (ADD_N AR:$r, AR:$s, AR:$t)>; + def ADDI_N : RRRN_Inst<0x0B, (outs AR:$r), (ins AR:$s, imm1n_15:$imm), "addi.n\t$r, $s, $imm", [(set AR:$r, (add AR:$s, imm1n_15:$imm))]>, Requires<[HasDensity]> { @@ -681,11 +753,14 @@ def ADDI_N : RRRN_Inst<0x0B, (outs AR:$r), (ins AR:$s, imm1n_15:$imm), let t = imm; } +def : InstAlias<"_addi.n\t$r, $s, $imm", (ADDI_N AR:$r, AR:$s, imm1n_15:$imm)>; + def MOV_N : RRRN_Inst<0x0D, (outs AR:$t), (ins AR:$s), "mov.n\t$t, $s", []>, Requires<[HasDensity]> { let r = 0; } +def : InstAlias<"_mov.n\t $t, $s", (MOV_N AR:$t, AR:$s)>; def : InstAlias<"mov\t $t, $s", (OR AR:$t, AR:$s, AR:$s)>; def MOVI_N : RI7_Inst<0xc, 0x0, (outs AR:$s), (ins imm32n_95:$imm7), @@ -705,6 +780,17 @@ let mayLoad = 1, usesCustomInserter = 1 in { } } +let mayLoad = 1, usesCustomInserter = 1 in { + def _L32I_N : RRRN_Inst<0x8, (outs AR:$t), (ins mem32n:$addr), + "_l32i.n\t$t, $addr", []>, Requires<[HasDensity]> { + bits<8> addr; + let DecoderNamespace = "Fallback"; + + let r{3-0} = addr{7-4}; + let s{3-0} = addr{3-0}; + } +} + // Store instruction let mayStore = 1, usesCustomInserter = 1 in { def S32I_N : RRRN_Inst<0x9, (outs), (ins AR:$t, mem32n:$addr), @@ -716,6 +802,17 @@ let mayStore = 1, usesCustomInserter = 1 in { } } +let mayStore = 1, usesCustomInserter = 1 in { + def _S32I_N : RRRN_Inst<0x9, (outs), (ins AR:$t, mem32n:$addr), + "_s32i.n\t$t, $addr", []>, Requires<[HasDensity]> { + bits<8> addr; + let DecoderNamespace = "Fallback"; + + let r{3-0} = addr{7-4}; + let s{3-0} = addr{3-0}; + } +} + //Return instruction let isReturn = 1, isTerminator = 1, isBarrier = 1, Uses = [A0] in { @@ -728,6 +825,8 @@ let isReturn = 1, isTerminator = 1, } } +def : InstAlias<"_ret.n", (RET_N)>; + //===----------------------------------------------------------------------===// // Windowed instructions //===----------------------------------------------------------------------===// @@ -815,6 +914,9 @@ let isReturn = 1, isTerminator = 1, } } +def : InstAlias<"_retw", (RETW)>; +def : InstAlias<"_retw.n", (RETW_N)>; + //Store 32-bit for Window Exceptions def S32E : RRI4_Inst<0x00, 0x09, (outs), (ins AR:$t, AR:$s, imm64n_4n:$imm), "s32e\t$t, $s, $imm", []>, Requires<[HasWindowed]> { @@ -926,6 +1028,9 @@ let isBranch = 1, isTerminator = 1, Predicates = [HasBoolean] in { let imm8 = target; } } + +def : InstAlias<"_BT\t$b, $target", (BT BR:$b, brtarget:$target)>; +def : InstAlias<"_BF\t$b, $target", (BF BR:$b, brtarget:$target)>; let Constraints = "$dr = $r,@earlyclobber $dr" in { def MOVF : RRR_Inst<0x00, 0x03, 0x0C, (outs AR:$dr), (ins AR:$r, AR:$s, BR:$t), @@ -1404,6 +1509,8 @@ def LOOP : RRI8_Inst<0x06, (outs), (ins AR:$s, ltarget:$target), let imm8 = target; } +def : InstAlias<"_loop\t$s, $target", (LOOP AR:$s, ltarget:$target)>; + def LOOPGTZ : RRI8_Inst<0x06, (outs), (ins AR:$s, ltarget:$target), "loopgtz\t$s, $target", []>, Requires<[HasLoop]> { bits<8> target; @@ -1413,6 +1520,8 @@ def LOOPGTZ : RRI8_Inst<0x06, (outs), (ins AR:$s, ltarget:$target), let imm8 = target; } +def : InstAlias<"_loopgtz\t$s, $target", (LOOPGTZ AR:$s, ltarget:$target)>; + def LOOPNEZ : RRI8_Inst<0x06, (outs), (ins AR:$s, ltarget:$target), "loopnez\t$s, $target", []>, Requires<[HasLoop]> { bits<8> target; @@ -1422,6 +1531,8 @@ def LOOPNEZ : RRI8_Inst<0x06, (outs), (ins AR:$s, ltarget:$target), let imm8 = target; } +def : InstAlias<"_loopnez\t$s, $target", (LOOPNEZ AR:$s, ltarget:$target)>; + let isTerminator = 1, isBarrier = 1, hasSideEffects = 1, Size = 3 in { def LOOPINIT : Pseudo<(outs AR:$elts), (ins AR:$eltsin), "!loopinit $elts, $eltsin", [(set AR:$elts, (int_start_loop_iterations AR:$eltsin))]>; @@ -1560,6 +1671,8 @@ let isBarrier = 1, isTerminator = 1 in { } } +def : InstAlias<"_break.n\t$imm", (BREAK_N uimm4:$imm)>; + def : Pat<(trap), (BREAK (i32 1), (i32 15))>; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaOperands.td b/llvm/lib/Target/Xtensa/XtensaOperands.td index 3d10410a77599..1976d50b8c4bb 100644 --- a/llvm/lib/Target/Xtensa/XtensaOperands.td +++ b/llvm/lib/Target/Xtensa/XtensaOperands.td @@ -127,6 +127,13 @@ def shimm1_31 : Immediate= 1 && Imm <= 31; }], "Shimm1_31_A let DecoderMethod = "decodeShimm1_31Operand"; } +// shimm0_31 predicate - Immediate in the range [0,31] +def Shimm0_31_AsmOperand : ImmAsmOperand<"Shimm0_31">; +def shimm0_31 : Immediate= 0 && Imm <= 31; }], "Shimm0_31_AsmOperand"> { + let EncoderMethod = "getShimm0_31OpValue"; + let DecoderMethod = "decodeShimm1_31Operand"; +} + defm imm32n_28: ImmRangeDecl<-32, 28, 4>; defm imm64n_56: ImmRangeDecl<-64, 56, 8>; defm imm0_56: ImmRangeDecl<0, 56, 8>; diff --git a/llvm/test/MC/Xtensa/Core/arith.s b/llvm/test/MC/Xtensa/Core/arith.s index fb6ac2608b0a9..1a1090e26ef87 100644 --- a/llvm/test/MC/Xtensa/Core/arith.s +++ b/llvm/test/MC/Xtensa/Core/arith.s @@ -15,6 +15,11 @@ abs a5, a6 # CHECK: encoding: [0x40,0x39,0x80] add a3, a9, a4 +# Instruction format RRR +# CHECK-INST: add a3, a9, a4 +# CHECK: encoding: [0x40,0x39,0x80] +_add a3, a9, a4 + # CHECK-INST: add a15, a9, a1 # CHECK: encoding: [0x10,0xf9,0x80] add a15, a9, sp @@ -24,6 +29,11 @@ add a15, a9, sp # CHECK: encoding: [0x82,0xc1,0x80] addi a8, sp, -128 +# Instruction format RRI8 +# CHECK-INST: addi a8, a1, -128 +# CHECK: encoding: [0x82,0xc1,0x80] +_addi a8, sp, -128 + # CHECK-INST: addi a8, a1, -12 # CHECK: encoding: [0x82,0xc1,0xf4] addi a8, a1, -12 diff --git a/llvm/test/MC/Xtensa/Core/branch.s b/llvm/test/MC/Xtensa/Core/branch.s index 66b68a610c1d2..d8e586d224591 100644 --- a/llvm/test/MC/Xtensa/Core/branch.s +++ b/llvm/test/MC/Xtensa/Core/branch.s @@ -10,152 +10,308 @@ LBL0: # CHECK: encoding: [0x37,0x41,A] ball a1, a3, LBL0 +# Instruction format RRI8 +# CHECK-INST: ball a1, a3, LBL0 +# CHECK: encoding: [0x37,0x41,A] +_ball a1, a3, LBL0 + # Instruction format RRI8 # CHECK-INST: bany a8, a13, LBL0 # CHECK: encoding: [0xd7,0x88,A] bany a8, a13, LBL0 +# Instruction format RRI8 +# CHECK-INST: bany a8, a13, LBL0 +# CHECK: encoding: [0xd7,0x88,A] +_bany a8, a13, LBL0 + # Instruction format RRI8 # CHECK-INST: bbc a8, a7, LBL0 # CHECK: encoding: [0x77,0x58,A] bbc a8, a7, LBL0 +# Instruction format RRI8 +# CHECK-INST: bbc a8, a7, LBL0 +# CHECK: encoding: [0x77,0x58,A] +_bbc a8, a7, LBL0 + # Instruction format RRI8 # CHECK-INST: bbci a3, 16, LBL0 # CHECK: encoding: [0x07,0x73,A] bbci a3, 16, LBL0 +# Instruction format RRI8 +# CHECK-INST: bbci a3, 16, LBL0 +# CHECK: encoding: [0x07,0x73,A] +_bbci a3, 16, LBL0 + # CHECK-INST: bbci a3, 16, LBL0 # CHECK: encoding: [0x07,0x73,A] bbci a3, (16), LBL0 +# CHECK-INST: bbci a3, 16, LBL0 +# CHECK: encoding: [0x07,0x73,A] +_bbci a3, (16), LBL0 + # CHECK-INST: bbci a3, 16, LBL0 # CHECK: encoding: [0x07,0x73,A] bbci.l a3, 16, LBL0 +# CHECK-INST: bbci a3, 16, LBL0 +# CHECK: encoding: [0x07,0x73,A] +_bbci.l a3, 16, LBL0 + # Instruction format RRI8 # CHECK-INST: bbs a12, a5, LBL0 # CHECK: encoding: [0x57,0xdc,A] bbs a12, a5, LBL0 +# Instruction format RRI8 +# CHECK-INST: bbs a12, a5, LBL0 +# CHECK: encoding: [0x57,0xdc,A] +_bbs a12, a5, LBL0 + # Instruction format RRI8 # CHECK-INST: bbsi a3, 16, LBL0 # CHECK: encoding: [0x07,0xf3,A] bbsi a3, 16, LBL0 +# Instruction format RRI8 +# CHECK-INST: bbsi a3, 16, LBL0 +# CHECK: encoding: [0x07,0xf3,A] +_bbsi a3, 16, LBL0 + # CHECK-INST: bbsi a3, 16, LBL0 # CHECK: encoding: [0x07,0xf3,A] bbsi.l a3, 16, LBL0 +# CHECK-INST: bbsi a3, 16, LBL0 +# CHECK: encoding: [0x07,0xf3,A] +_bbsi.l a3, 16, LBL0 + # Instruction format RRI8 # CHECK-INST: bnall a7, a3, LBL0 # CHECK: encoding: [0x37,0xc7,A] bnall a7, a3, LBL0 +# Instruction format RRI8 +# CHECK-INST: bnall a7, a3, LBL0 +# CHECK: encoding: [0x37,0xc7,A] +_bnall a7, a3, LBL0 + # Instruction format RRI8 # CHECK-INST: bnone a2, a4, LBL0 # CHECK: encoding: [0x47,0x02,A] bnone a2, a4, LBL0 +# Instruction format RRI8 +# CHECK-INST: bnone a2, a4, LBL0 +# CHECK: encoding: [0x47,0x02,A] +_bnone a2, a4, LBL0 + # Instruction format RRI8 # CHECK-INST: beq a1, a2, LBL0 # CHECK: encoding: [0x27,0x11,A] beq a1, a2, LBL0 +# Instruction format RRI8 +# CHECK-INST: beq a1, a2, LBL0 +# CHECK: encoding: [0x27,0x11,A] +_beq a1, a2, LBL0 + # CHECK-INST: beq a11, a5, LBL0 # CHECK: encoding: [0x57,0x1b,A] beq a11, a5, LBL0 +# CHECK-INST: beq a11, a5, LBL0 +# CHECK: encoding: [0x57,0x1b,A] +_beq a11, a5, LBL0 + # Instruction format BRI8 # CHECK-INST: beqi a1, 256, LBL0 # CHECK: encoding: [0x26,0xf1,A] beqi a1, 256, LBL0 +# Instruction format BRI8 +# CHECK-INST: beqi a1, 256, LBL0 +# CHECK: encoding: [0x26,0xf1,A] +_beqi a1, 256, LBL0 + # CHECK-INST: beqi a11, -1, LBL0 # CHECK: encoding: [0x26,0x0b,A] beqi a11, -1, LBL0 +# CHECK-INST: beqi a11, -1, LBL0 +# CHECK: encoding: [0x26,0x0b,A] +_beqi a11, -1, LBL0 + # Instruction format BRI12 # CHECK-INST: beqz a8, LBL0 # CHECK: encoding: [0x16,0bAAAA1000,A] beqz a8, LBL0 +# Instruction format BRI12 +# CHECK-INST: beqz a8, LBL0 +# CHECK: encoding: [0x16,0bAAAA1000,A] +_beqz a8, LBL0 + # Instruction format RRI8 # CHECK-INST: bge a14, a2, LBL0 # CHECK: encoding: [0x27,0xae,A] bge a14, a2, LBL0 +# Instruction format RRI8 +# CHECK-INST: bge a14, a2, LBL0 +# CHECK: encoding: [0x27,0xae,A] +_bge a14, a2, LBL0 + # Instruction format BRI8 # CHECK-INST: bgei a11, -1, LBL0 # CHECK: encoding: [0xe6,0x0b,A] bgei a11, -1, LBL0 +# Instruction format BRI8 +# CHECK-INST: bgei a11, -1, LBL0 +# CHECK: encoding: [0xe6,0x0b,A] +_bgei a11, -1, LBL0 + # CHECK-INST: bgei a11, 128, LBL0 # CHECK: encoding: [0xe6,0xeb,A] bgei a11, 128, LBL0 +# CHECK-INST: bgei a11, 128, LBL0 +# CHECK: encoding: [0xe6,0xeb,A] +_bgei a11, 128, LBL0 + # Instruction format RRI8 # CHECK-INST: bgeu a14, a2, LBL0 # CHECK: encoding: [0x27,0xbe,A] bgeu a14, a2, LBL0 +# Instruction format RRI8 +# CHECK-INST: bgeu a14, a2, LBL0 +# CHECK: encoding: [0x27,0xbe,A] +_bgeu a14, a2, LBL0 + # CHECK-INST: bgeu a13, a1, LBL0 # CHECK: encoding: [0x17,0xbd,A] bgeu a13, a1, LBL0 +# CHECK-INST: bgeu a13, a1, LBL0 +# CHECK: encoding: [0x17,0xbd,A] +_bgeu a13, a1, LBL0 + # Instruction format BRI8 # CHECK-INST: bgeui a9, 32768, LBL0 # CHECK: encoding: [0xf6,0x09,A] bgeui a9, 32768, LBL0 +# Instruction format BRI8 +# CHECK-INST: bgeui a9, 32768, LBL0 +# CHECK: encoding: [0xf6,0x09,A] +_bgeui a9, 32768, LBL0 + # CHECK-INST: bgeui a7, 65536, LBL0 # CHECK: encoding: [0xf6,0x17,A] bgeui a7, 65536, LBL0 +# CHECK-INST: bgeui a7, 65536, LBL0 +# CHECK: encoding: [0xf6,0x17,A] +_bgeui a7, 65536, LBL0 + # CHECK-INST: bgeui a7, 64, LBL0 # CHECK: encoding: [0xf6,0xd7,A] bgeui a7, 64, LBL0 +# CHECK-INST: bgeui a7, 64, LBL0 +# CHECK: encoding: [0xf6,0xd7,A] +_bgeui a7, 64, LBL0 + # Instruction format BRI12 # CHECK-INST: bgez a8, LBL0 # CHECK: encoding: [0xd6,0bAAAA1000,A] bgez a8, LBL0 +# Instruction format BRI12 +# CHECK-INST: bgez a8, LBL0 +# CHECK: encoding: [0xd6,0bAAAA1000,A] +_bgez a8, LBL0 + # Instruction format RRI8 # CHECK-INST: blt a14, a2, LBL0 # CHECK: encoding: [0x27,0x2e,A] blt a14, a2, LBL0 +# Instruction format RRI8 +# CHECK-INST: blt a14, a2, LBL0 +# CHECK: encoding: [0x27,0x2e,A] +_blt a14, a2, LBL0 + # Instruction format BRI8 # CHECK-INST: blti a12, -1, LBL0 # CHECK: encoding: [0xa6,0x0c,A] blti a12, -1, LBL0 +# Instruction format BRI8 +# CHECK-INST: blti a12, -1, LBL0 +# CHECK: encoding: [0xa6,0x0c,A] +_blti a12, -1, LBL0 + # CHECK-INST: blti a0, 32, LBL0 # CHECK: encoding: [0xa6,0xc0,A] blti a0, 32, LBL0 +# CHECK-INST: blti a0, 32, LBL0 +# CHECK: encoding: [0xa6,0xc0,A] +_blti a0, 32, LBL0 + # Instruction format BRI8 # CHECK-INST: bltui a7, 16, LBL0 # CHECK: encoding: [0xb6,0xb7,A] bltui a7, 16, LBL0 +# Instruction format BRI8 +# CHECK-INST: bltui a7, 16, LBL0 +# CHECK: encoding: [0xb6,0xb7,A] +_bltui a7, 16, LBL0 + # Instruction format BRI12 # CHECK-INST: bltz a6, LBL0 # CHECK: encoding: [0x96,0bAAAA0110,A] bltz a6, LBL0 +# Instruction format BRI12 +# CHECK-INST: bltz a6, LBL0 +# CHECK: encoding: [0x96,0bAAAA0110,A] +_bltz a6, LBL0 + # Instruction format RRI8 # CHECK-INST: bne a3, a4, LBL0 # CHECK: encoding: [0x47,0x93,A] bne a3, a4, LBL0 +# Instruction format RRI8 +# CHECK-INST: bne a3, a4, LBL0 +# CHECK: encoding: [0x47,0x93,A] +_bne a3, a4, LBL0 + # Instruction format BRI8 # CHECK-INST: bnei a5, 12, LBL0 # CHECK: encoding: [0x66,0xa5,A] bnei a5, 12, LBL0 +# Instruction format BRI8 +# CHECK-INST: bnei a5, 12, LBL0 +# CHECK: encoding: [0x66,0xa5,A] +_bnei a5, 12, LBL0 + # Instruction format BRI12 # CHECK-INST: bnez a5, LBL0 # CHECK: encoding: [0x56,0bAAAA0101,A] bnez a5, LBL0 + +# Instruction format BRI12 +# CHECK-INST: bnez a5, LBL0 +# CHECK: encoding: [0x56,0bAAAA0101,A] +_bnez a5, LBL0 + diff --git a/llvm/test/MC/Xtensa/Core/invalid.s b/llvm/test/MC/Xtensa/Core/invalid.s index b36f3509ea9bc..b8701e147dc06 100644 --- a/llvm/test/MC/Xtensa/Core/invalid.s +++ b/llvm/test/MC/Xtensa/Core/invalid.s @@ -17,8 +17,8 @@ addmi a1, a2, 33 # CHECK: :[[#@LINE-1]]:15: error: expected immediate in range [-32768, 32512], first 8 bits should be zero # shimm1_31 -slli a1, a2, 0 -# CHECK: :[[#@LINE-1]]:14: error: expected immediate in range [1, 31] +_slli a1, a2, 0 +# CHECK: :[[#@LINE-1]]:15: error: expected immediate in range [1, 31] # uimm4 _srli a1, a2, 16 @@ -122,3 +122,6 @@ bltui 16, a7, LBL0 # CHECK: :[[#@LINE-1]]:7: error: invalid operand for instruction bltui a7, LBL0, 16 # CHECK: :[[#@LINE-1]]:19: error: unknown operand + +_movi a1, -2059 +# CHECK: :[[#@LINE-1]]:11: error: expected immediate in range [-2048, 2047] \ No newline at end of file diff --git a/llvm/test/MC/Xtensa/Core/memory.s b/llvm/test/MC/Xtensa/Core/memory.s index 1e5a457828331..876fe5871d672 100644 --- a/llvm/test/MC/Xtensa/Core/memory.s +++ b/llvm/test/MC/Xtensa/Core/memory.s @@ -25,6 +25,11 @@ l16ui a4, sp, 6 # CHECK: encoding: [0x52,0x21,0x02] l32i a5, sp, 8 +# Instruction format RRI8 +# CHECK-INST: l32i a5, a1, 8 +# CHECK: encoding: [0x52,0x21,0x08] +_l32i a5, sp, 8 + # Instruction format RRI8 # CHECK-INST: s8i a2, a1, 3 # CHECK: encoding: [0x22,0x41,0x03] @@ -39,3 +44,8 @@ s16i a3, sp, 4 # CHECK-INST: s32i a5, a1, 8 # CHECK: encoding: [0x52,0x61,0x02] s32i a5, sp, 8 + +# Instruction format RRI8 +# CHECK-INST: s32i a5, a1, 8 +# CHECK: encoding: [0x52,0x61,0x08] +_s32i a5, sp, 8 diff --git a/llvm/test/MC/Xtensa/Core/shift.s b/llvm/test/MC/Xtensa/Core/shift.s index fbe00dc107d80..21c0e11b10366 100644 --- a/llvm/test/MC/Xtensa/Core/shift.s +++ b/llvm/test/MC/Xtensa/Core/shift.s @@ -20,6 +20,11 @@ sll a10, a11 # CHECK: encoding: [0x10,0x51,0x11] slli a5, a1, 15 +# Instruction format RRR +# CHECK-INST: or a5, a1, a1 +# CHECK: encoding: [0x10,0x51,0x20] +slli a5, a1, 0 + # Instruction format RRR # CHECK-INST: sra a12, a3 # CHECK: encoding: [0x30,0xc0,0xb1] From d34fe2fd160183d557f53fb864b397f4273fca2d Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 16 Oct 2024 12:32:55 +0300 Subject: [PATCH 241/289] [Clang][Xtensa] Improve esp baremetal toolchain. Fix tests. --- clang/lib/Driver/ToolChains/EspBareMetal.cpp | 5 +++ clang/lib/Driver/ToolChains/EspBareMetal.h | 2 + .../CodeGen/Xtensa/xtensa-ee-intrinsics.c | 2 +- .../bin/riscv32-esp-elf-clang-ld | 0 clang/test/Driver/baremetal-esp.cpp | 42 ++++++++++--------- clang/test/Driver/baremetal-sysroot.cpp | 8 +++- 6 files changed, 36 insertions(+), 23 deletions(-) mode change 100644 => 100755 clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-ld diff --git a/clang/lib/Driver/ToolChains/EspBareMetal.cpp b/clang/lib/Driver/ToolChains/EspBareMetal.cpp index cbfbcc4a0afbe..4c47238b3570c 100644 --- a/clang/lib/Driver/ToolChains/EspBareMetal.cpp +++ b/clang/lib/Driver/ToolChains/EspBareMetal.cpp @@ -163,6 +163,11 @@ EspBareMetal::getMultilibFlags(const llvm::opt::ArgList &Args) const { return Result; } +std::string EspBareMetal::getCompilerRTPath() const { + SmallString<128> Dir(getLibraryPaths().back()); + return std::string(Dir); +} + Tool *EspBareMetal::buildLinker() const { return new tools::baremetal::esp::Linker(*this); } diff --git a/clang/lib/Driver/ToolChains/EspBareMetal.h b/clang/lib/Driver/ToolChains/EspBareMetal.h index 5618564c0a4e9..f8baed6c00ae2 100644 --- a/clang/lib/Driver/ToolChains/EspBareMetal.h +++ b/clang/lib/Driver/ToolChains/EspBareMetal.h @@ -51,6 +51,8 @@ class LLVM_LIBRARY_VISIBILITY EspBareMetal : public BareMetal { virtual Multilib::flags_list getMultilibFlags(const llvm::opt::ArgList &) const override; + std::string getCompilerRTPath() const override; + private: bool IsIntegratedAsm = true; }; diff --git a/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c index 3624bff2b318a..442174c73548a 100644 --- a/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c +++ b/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple xtensa -S -emit-llvm -O0 -o - %s \ +// RUN: %clang_cc1 -triple xtensa -emit-llvm -O0 -o - %s \ // RUN: | FileCheck %s #include diff --git a/clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-ld b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/bin/riscv32-esp-elf-clang-ld old mode 100644 new mode 100755 diff --git a/clang/test/Driver/baremetal-esp.cpp b/clang/test/Driver/baremetal-esp.cpp index 140df519ca951..41d7e882483a8 100644 --- a/clang/test/Driver/baremetal-esp.cpp +++ b/clang/test/Driver/baremetal-esp.cpp @@ -42,7 +42,7 @@ // CHECK-ESP-RV32IMAC-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" // CHECK-ESP-RV32IMAC-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" // CHECK-ESP-RV32IMAC-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" -// CHECK-ESP-RV32IMAC-SAME: "-lclang_rt.builtins" +// CHECK-ESP-RV32IMAC-SAME: "{{[^"]*}}libclang_rt.builtins.a" // RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -fno-integrated-as \ // RUN: -L some/directory/user/asked/for \ @@ -65,11 +65,12 @@ // CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" // CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" // CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" -// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "-lclang_rt.builtins" +// CHECK-ESP-RV32IMAC-FORCEAS-SAME: "{{[^"]*}}libclang_rt.builtins.a" // RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf --ld-path=riscv32-esp-elf-clang-ld \ // RUN: -L some/directory/user/asked/for \ // RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: -ccc-install-dir %t/basic_riscv32_esp_tree/bin \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-FORCELD %s // CHECK-ESP-RV32IMAC-FORCELD: "-isysroot" "[[SYSROOT:[^"]*]]" // CHECK-ESP-RV32IMAC-FORCELD: riscv32-esp-elf-clang-ld{{(.exe)?}}" @@ -80,7 +81,7 @@ // CHECK-ESP-RV32IMAC-FORCELD-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" // CHECK-ESP-RV32IMAC-FORCELD-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" // CHECK-ESP-RV32IMAC-FORCELD-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" -// CHECK-ESP-RV32IMAC-FORCELD-SAME: "-lclang_rt.builtins" +// CHECK-ESP-RV32IMAC-FORCELD-SAME: "{{[^"]*}}libclang_rt.builtins.a" // RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf \ // RUN: -nostdlibinc -nobuiltininc \ @@ -96,7 +97,7 @@ // RUN: -rtlib=libgcc \ // RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAC-LIBGCC %s -// CHECK-ESP-RV32IMAC-LIBGCC-NOT: "-lclang_rt.builtins" +// CHECK-ESP-RV32IMAC-LIBGCC-NOT: "{{[^"]*}}libclang_rt.builtins.a" // CHECK-ESP-RV32IMAC-LIBGCC: "-lgcc" // RUN: %t/basic_riscv32_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=riscv32-esp-elf \ @@ -121,7 +122,7 @@ // CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" // CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-lstdc++" // CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" -// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "-lclang_rt.builtins" +// CHECK-ESP-RV32IMAC-DEFAULTSTDCXX-SAME: "{{[^"]*}}libclang_rt.builtins.a" // RUN: %t/basic_riscv32_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=riscv32-esp-elf \ // RUN: -stdlib=libc++ \ @@ -141,7 +142,7 @@ // CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-L[[SYSROOT]]{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" // CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-lc++" "-lc++abi" "-lunwind" // CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" -// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "-lclang_rt.builtins" +// CHECK-ESP-RV32IMAC-LIBCXX-SAME: "{{[^"]*}}libclang_rt.builtins.a" // RUN: %t/basic_riscv32_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=riscv32-esp-elf \ // RUN: -nodefaultlibs \ @@ -291,8 +292,8 @@ // RUN: --target=riscv32-esp-elf \ // RUN: --sysroot=%T/baremetal_clang_rt_noarch \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32_CLANGRT-NOARCH %s -// CHECK-ESP-RV32_CLANGRT-NOARCH: "-lclang_rt.builtins" -// CHECK-ESP-RV32_CLANGRT-NOARCH-NOT: "-lclang_rt.builtins-riscv32" +// CHECK-ESP-RV32_CLANGRT-NOARCH: "{{[^"]*}}libclang_rt.builtins.a" +// CHECK-ESP-RV32_CLANGRT-NOARCH-NOT: "{{[^"]*}}libclang_rt.builtins-riscv32.a" // RUN: rm -rf %T/baremetal_clang_rt_arch // RUN: mkdir -p %T/baremetal_clang_rt_arch/lib // RUN: touch %T/baremetal_clang_rt_arch/lib/libclang_rt.builtins-riscv32.a @@ -300,8 +301,8 @@ // RUN: --target=riscv32-esp-elf \ // RUN: --sysroot=%T/baremetal_clang_rt_arch \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32-CLANGRT-ARCH %s -// CHECK-ESP-RV32-CLANGRT-ARCH: "-lclang_rt.builtins-riscv32" -// CHECK-ESP-RV32-CLANGRT-ARCH-NOT: "-lclang_rt.builtins" +// CHECK-ESP-RV32-CLANGRT-ARCH: "{{[^"]*}}libclang_rt.builtins-riscv32.a" +// CHECK-ESP-RV32-CLANGRT-ARCH-NOT: "{{[^"]*}}libclang_rt.builtins.a" //////////////////// XTENSA ///////////////////////// @@ -348,7 +349,7 @@ // CHECK-ESP-ESP32-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" // CHECK-ESP-ESP32-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" // CHECK-ESP-ESP32-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" -// CHECK-ESP-ESP32-SAME: "-lclang_rt.builtins" +// CHECK-ESP-ESP32-SAME: "{{[^"]*}}libclang_rt.builtins.a" // RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf -fno-integrated-as \ // RUN: -L some/directory/user/asked/for \ @@ -370,11 +371,12 @@ // CHECK-ESP-ESP32-FORCEAS-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" // CHECK-ESP-ESP32-FORCEAS-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" // CHECK-ESP-ESP32-FORCEAS-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" -// CHECK-ESP-ESP32-FORCEAS-SAME: "-lclang_rt.builtins" +// CHECK-ESP-ESP32-FORCEAS-SAME: "{{[^"]*}}libclang_rt.builtins.a" // RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf --ld-path=xtensa-esp32-elf-clang-ld \ // RUN: -L some/directory/user/asked/for \ // RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ +// RUN: -ccc-install-dir %t/basic_xtensa_esp_tree/bin \ // RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-FORCELD %s // CHECK-ESP-ESP32-FORCELD: "-isysroot" "[[SYSROOT:[^"]*]]" // CHECK-ESP-ESP32-FORCELD-NEXT: xtensa-esp32-elf-clang-ld{{(.exe)?}}" @@ -384,7 +386,7 @@ // CHECK-ESP-ESP32-FORCELD-SAME: "-Lsome{{[/\\]+}}directory{{[/\\]+}}user{{[/\\]+}}asked{{[/\\]+}}for" // CHECK-ESP-ESP32-FORCELD-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" // CHECK-ESP-ESP32-FORCELD-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" -// CHECK-ESP-ESP32-FORCELD-SAME: "-lclang_rt.builtins" +// CHECK-ESP-ESP32-FORCELD-SAME: "{{[^"]*}}libclang_rt.builtins.a" // RUN: %t/basic_xtensa_esp_tree/bin/clang %s -### 2>&1 --target=xtensa-esp-elf \ // RUN: -nostdlibinc -nobuiltininc \ @@ -400,7 +402,7 @@ // RUN: -rtlib=libgcc \ // RUN: --sysroot=%t/basic_xtensa_esp_tree/lib/clang-runtimes \ // RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-LIBGCC %s -// CHECK-ESP-ESP32-LIBGCC-NOT: "-lclang_rt.builtins" +// CHECK-ESP-ESP32-LIBGCC-NOT: "{{[^"]*}}libclang_rt.builtins.a" // CHECK-ESP-ESP32-LIBGCC: "-lgcc" // RUN: %t/basic_xtensa_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=xtensa-esp-elf \ @@ -424,7 +426,7 @@ // CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" // CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-lstdc++" // CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" -// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "-lclang_rt.builtins" +// CHECK-ESP-ESP32-DEFAULTSTDCXX-SAME: "{{[^"]*}}libclang_rt.builtins.a" // RUN: %t/basic_xtensa_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=xtensa-esp-elf \ // RUN: -stdlib=libc++ \ @@ -443,7 +445,7 @@ // CHECK-ESP-ESP32-LIBCXX-SAME: "-L[[SYSROOT]]{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" // CHECK-ESP-ESP32-LIBCXX-SAME: "-lc++" "-lc++abi" "-lunwind" // CHECK-ESP-ESP32-LIBCXX-SAME: "-lm" "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" -// CHECK-ESP-ESP32-LIBCXX-SAME: "-lclang_rt.builtins" +// CHECK-ESP-ESP32-LIBCXX-SAME: "{{[^"]*}}libclang_rt.builtins.a" // RUN: %t/basic_xtensa_esp_tree/bin/clang --driver-mode=g++ %s -### 2>&1 --target=xtensa-esp-elf \ // RUN: -nodefaultlibs \ @@ -604,8 +606,8 @@ // RUN: --target=xtensa-esp-elf \ // RUN: --sysroot=%T/baremetal_clang_rt_noarch \ // RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32_CLANGRT-NOARCH %s -// CHECK-ESP-ESP32_CLANGRT-NOARCH: "-lclang_rt.builtins" -// CHECK-ESP-ESP32_CLANGRT-NOARCH-NOT: "-lclang_rt.builtins-xtensa" +// CHECK-ESP-ESP32_CLANGRT-NOARCH: "{{[^"]*}}libclang_rt.builtins.a" +// CHECK-ESP-ESP32_CLANGRT-NOARCH-NOT: "{{[^"]*}}libclang_rt.builtins-xtensa.a" // Check that compiler-rt library with the arch filename suffix will be // used if present. @@ -616,5 +618,5 @@ // RUN: --target=xtensa-esp-elf \ // RUN: --sysroot=%T/baremetal_clang_rt_arch \ // RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-CLANGRT-ARCH %s -// CHECK-ESP-ESP32-CLANGRT-ARCH: "-lclang_rt.builtins-xtensa" -// CHECK-ESP-ESP32-CLANGRT-ARCH-NOT: "-lclang_rt.builtins" +// CHECK-ESP-ESP32-CLANGRT-ARCH: "{{[^"]*}}libclang_rt.builtins-xtensa.a" +// CHECK-ESP-ESP32-CLANGRT-ARCH-NOT: "{{[^"]*}}libclang_rt.builtins.a" diff --git a/clang/test/Driver/baremetal-sysroot.cpp b/clang/test/Driver/baremetal-sysroot.cpp index e79f353b1d84f..e88ad7d6c8814 100644 --- a/clang/test/Driver/baremetal-sysroot.cpp +++ b/clang/test/Driver/baremetal-sysroot.cpp @@ -25,6 +25,8 @@ // RUN: mkdir -p %T/baremetal_default_sysroot/bin // RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/riscv32-esp-unknown-elf/include/c++/11.2.0 // RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/include/c++/11.2.0 +// RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/lib/ +// RUN: touch %T/baremetal_default_sysroot/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/lib/libclang_rt.builtins-riscv32.a // RUN: echo "MultilibVersion: '1.0'" > %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml // RUN: echo "Variants:" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml // RUN: echo "- Dir: riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml @@ -49,12 +51,14 @@ // CHECK-ESP-RV32IMAC-C-NEXT: "{{[^"]*}}ld{{(\.(lld|bfd|gold))?}}{{(\.exe)?}}" "-m" "elf32lriscv" // CHECK-ESP-RV32IMAC-C-SAME: "-o" "{{.*}}.o" // CHECK-ESP-RV32IMAC-C-SAME: "-L{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" -// CHECK-ESP-RV32IMAC-C-SAME: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lclang_rt.builtins-riscv32" +// CHECK-ESP-RV32IMAC-C-SAME: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "{{[^"]*}}libclang_rt.builtins-riscv32.a" // RUN: rm -rf %T/baremetal_default_sysroot // RUN: mkdir -p %T/baremetal_default_sysroot/bin // RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/xtensa-esp-unknown-elf/include/c++/11.2.0 // RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/include/c++/11.2.0 +// RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/lib/ +// RUN: touch %T/baremetal_default_sysroot/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/lib/libclang_rt.builtins-xtensa.a // RUN: echo "MultilibVersion: '1.0'" > %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml // RUN: echo "Variants:" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml // RUN: echo "- Dir: xtensa-esp-unknown-elf/esp32" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml @@ -74,4 +78,4 @@ // CHECK-ESP-ESP32-C-NEXT: "{{[^"]*}}ld{{(\.(lld|bfd|gold))?}}{{(\.exe)?}}" // CHECK-ESP-ESP32-C-SAME: "-o" "{{.*}}.o" // CHECK-ESP-ESP32-C-SAME: "-L{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" -// CHECK-ESP-ESP32-C-SAME: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "-lclang_rt.builtins-xtensa" +// CHECK-ESP-ESP32-C-SAME: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "{{[^"]*}}libclang_rt.builtins-xtensa.a" From acc30199531009906e70048b0be95d3e642070dc Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Wed, 11 Sep 2024 01:57:28 +0300 Subject: [PATCH 242/289] [Xtensa] Fix atomic store operands order It was changed in LLVM 18 --- llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 6 +- llvm/test/CodeGen/Xtensa/atomic-load-store.ll | 107 ++++++++++++++++++ 2 files changed, 110 insertions(+), 3 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/atomic-load-store.ll diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index e7b68e1f2cea1..c79d9fab1a3a8 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1792,9 +1792,9 @@ def : Pat<(i32 (atomic_load_8 addr_ish1:$addr)), (L8UI addr_ish1:$addr)>; def : Pat<(i32 (atomic_load_16 addr_ish2:$addr)), (L16UI addr_ish2:$addr)>; def : Pat<(i32 (atomic_load_32 addr_ish4:$addr)), (L32I addr_ish4:$addr)>; -def : Pat<(atomic_store_8 addr_ish1:$addr, AR:$t), (S8I AR:$t, addr_ish1:$addr)>; -def : Pat<(atomic_store_16 addr_ish2:$addr, AR:$t), (S16I AR:$t, addr_ish2:$addr)>; -def : Pat<(atomic_store_32 addr_ish4:$addr, AR:$t), (S32I AR:$t, addr_ish4:$addr)>; +def : Pat<(atomic_store_8 AR:$t, addr_ish1:$addr), (S8I AR:$t, addr_ish1:$addr)>; +def : Pat<(atomic_store_16 AR:$t, addr_ish2:$addr), (S16I AR:$t, addr_ish2:$addr)>; +def : Pat<(atomic_store_32 AR:$t, addr_ish4:$addr), (S32I AR:$t, addr_ish4:$addr)>; let usesCustomInserter = 1, Predicates = [HasS32C1I] in { def ATOMIC_CMP_SWAP_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$cmp, AR:$swap), diff --git a/llvm/test/CodeGen/Xtensa/atomic-load-store.ll b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll new file mode 100644 index 0000000000000..8047fd2914d21 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll @@ -0,0 +1,107 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=XTENSA,XTENSA_OPT +; RUN: llc -mtriple=xtensa -O0 < %s | FileCheck %s --check-prefixes=XTENSA,XTENSA_OPT_NONE + +define void @store32(ptr %ptr, i32 %val1) { +; XTENSA-LABEL: store32: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: memw +; XTENSA-NEXT: s32i.n a3, a2, 0 +; XTENSA-NEXT: memw +; XTENSA-NEXT: retw.n + store atomic i32 %val1, ptr %ptr seq_cst, align 4 + ret void +} + +define i32 @load32(ptr %ptr) { +; XTENSA-LABEL: load32: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: l32i.n a2, a2, 0 +; XTENSA-NEXT: memw +; XTENSA-NEXT: retw.n + %val = load atomic i32, ptr %ptr seq_cst, align 4 + ret i32 %val +} + +define i8 @load8(ptr %p) { +; XTENSA-LABEL: load8: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: l8ui a2, a2, 0 +; XTENSA-NEXT: memw +; XTENSA-NEXT: retw.n + %v = load atomic i8, ptr %p seq_cst, align 1 + ret i8 %v +} + +define void @store8(ptr %p, i8 %val1) { +; XTENSA_OPT-LABEL: store8: +; XTENSA_OPT: entry a1, 32 +; XTENSA_OPT-NEXT: memw +; XTENSA_OPT-NEXT: s8i a3, a2, 0 +; XTENSA_OPT-NEXT: memw +; XTENSA_OPT-NEXT: retw.n +; +; XTENSA_OPT_NONE-LABEL: store8: +; XTENSA_OPT_NONE: entry a1, 32 +; XTENSA_OPT_NONE-NEXT: # kill: def $a8 killed $a3 +; XTENSA_OPT_NONE-NEXT: memw +; XTENSA_OPT_NONE-NEXT: s8i a3, a2, 0 +; XTENSA_OPT_NONE-NEXT: memw +; XTENSA_OPT_NONE-NEXT: retw.n + store atomic i8 %val1, ptr %p seq_cst, align 1 + ret void +} + +define i16 @load16(ptr %p) { +; XTENSA-LABEL: load16: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: l16ui a2, a2, 0 +; XTENSA-NEXT: memw +; XTENSA-NEXT: retw.n + %v = load atomic i16, ptr %p seq_cst, align 2 + ret i16 %v +} + +define void @store16(ptr %p, i16 %val1) { +; XTENSA_OPT-LABEL: store16: +; XTENSA_OPT: entry a1, 32 +; XTENSA_OPT-NEXT: memw +; XTENSA_OPT-NEXT: s16i a3, a2, 0 +; XTENSA_OPT-NEXT: memw +; XTENSA_OPT-NEXT: retw.n +; +; XTENSA_OPT_NONE-LABEL: store16: +; XTENSA_OPT_NONE: entry a1, 32 +; XTENSA_OPT_NONE-NEXT: # kill: def $a8 killed $a3 +; XTENSA_OPT_NONE-NEXT: memw +; XTENSA_OPT_NONE-NEXT: s16i a3, a2, 0 +; XTENSA_OPT_NONE-NEXT: memw +; XTENSA_OPT_NONE-NEXT: retw.n + store atomic i16 %val1, ptr %p seq_cst, align 2 + ret void +} + +define void @test1(ptr %ptr1, ptr %ptr2) { +; XTENSA-LABEL: test1: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: l8ui a8, a2, 0 +; XTENSA-NEXT: s8i a8, a3, 0 +; XTENSA-NEXT: retw.n + %val = load atomic i8, ptr %ptr1 unordered, align 1 + store atomic i8 %val, ptr %ptr2 unordered, align 1 + ret void +} + +define void @test2(ptr %ptr1, ptr %ptr2) { +; XTENSA-LABEL: test2: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: l8ui a8, a2, 0 +; XTENSA-NEXT: memw +; XTENSA-NEXT: memw +; XTENSA-NEXT: s8i a8, a3, 0 +; XTENSA-NEXT: memw +; XTENSA-NEXT: retw.n + %val = load atomic i8, ptr %ptr1 seq_cst, align 1 + store atomic i8 %val, ptr %ptr2 seq_cst, align 1 + ret void +} From e58e877117b3bae1122b290a4b9627ad856493ab Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Wed, 21 Aug 2024 18:04:35 +0300 Subject: [PATCH 243/289] toolchain/esp: Add test for 'xesppie' multilib mapping for esp32-p4 --- .../lib/clang-runtimes/multilib.yaml | 9 +++++++++ clang/test/Driver/baremetal-esp.cpp | 6 ++++++ 2 files changed, 15 insertions(+) diff --git a/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/multilib.yaml b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/multilib.yaml index c4123d3cfc039..f882427769012 100644 --- a/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/multilib.yaml +++ b/clang/test/Driver/Inputs/basic_riscv32_esp_tree/lib/clang-runtimes/multilib.yaml @@ -246,3 +246,12 @@ Mappings: - Match: -march=rv32gc Flags: - -march=rv32imafc_zicsr_zifencei + +# Below is a workaround to support multilib for xesppie variant +# Currently it is added for ESP32-P4 arch only +- Match: -march=rv32imafc_xesppie + Flags: + - -march=rv32imafc_zicsr_zifencei +- Match: -march=rv32imafc_zicsr_zifencei_xesppie + Flags: + - -march=rv32imafc_zicsr_zifencei diff --git a/clang/test/Driver/baremetal-esp.cpp b/clang/test/Driver/baremetal-esp.cpp index 41d7e882483a8..5500b3379d9be 100644 --- a/clang/test/Driver/baremetal-esp.cpp +++ b/clang/test/Driver/baremetal-esp.cpp @@ -253,6 +253,12 @@ // RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -march=rv32gc -mabi=ilp32f \ // RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAFC %s +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -march=rv32imafc_zicsr_zifencei_xesppie -mabi=ilp32f \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAFC %s +// RUN: %t/basic_riscv32_esp_tree/bin/clang %s -### 2>&1 --target=riscv32-esp-elf -march=rv32imafc_xesppie -mabi=ilp32f \ +// RUN: --sysroot=%t/basic_riscv32_esp_tree/lib/clang-runtimes \ +// RUN: | FileCheck --check-prefix=CHECK-ESP-RV32IMAFC %s // CHECK-ESP-RV32IMAFC: "-cc1" "-triple" "riscv32-esp-unknown-elf" // CHECK-ESP-RV32IMAFC-SAME: "-resource-dir" "[[RESOURCE_DIR:[^"]+]]" // CHECK-ESP-RV32IMAFC-SAME: "-isysroot" "[[SYSROOT:[^"]*]]" From 277220c55864577a62357e4a7b3bf612c3327895 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 12 Nov 2024 20:05:43 +0300 Subject: [PATCH 244/289] [Clang][Xtensa] Fix Espressif baremetal toolchain tests. --- clang/lib/Driver/ToolChains/EspBareMetal.cpp | 5 ----- clang/lib/Driver/ToolChains/EspBareMetal.h | 2 -- clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c | 2 +- clang/test/Driver/baremetal-esp.cpp | 8 ++++---- clang/test/Driver/baremetal-sysroot.cpp | 8 ++------ 5 files changed, 7 insertions(+), 18 deletions(-) diff --git a/clang/lib/Driver/ToolChains/EspBareMetal.cpp b/clang/lib/Driver/ToolChains/EspBareMetal.cpp index 4c47238b3570c..cbfbcc4a0afbe 100644 --- a/clang/lib/Driver/ToolChains/EspBareMetal.cpp +++ b/clang/lib/Driver/ToolChains/EspBareMetal.cpp @@ -163,11 +163,6 @@ EspBareMetal::getMultilibFlags(const llvm::opt::ArgList &Args) const { return Result; } -std::string EspBareMetal::getCompilerRTPath() const { - SmallString<128> Dir(getLibraryPaths().back()); - return std::string(Dir); -} - Tool *EspBareMetal::buildLinker() const { return new tools::baremetal::esp::Linker(*this); } diff --git a/clang/lib/Driver/ToolChains/EspBareMetal.h b/clang/lib/Driver/ToolChains/EspBareMetal.h index f8baed6c00ae2..5618564c0a4e9 100644 --- a/clang/lib/Driver/ToolChains/EspBareMetal.h +++ b/clang/lib/Driver/ToolChains/EspBareMetal.h @@ -51,8 +51,6 @@ class LLVM_LIBRARY_VISIBILITY EspBareMetal : public BareMetal { virtual Multilib::flags_list getMultilibFlags(const llvm::opt::ArgList &) const override; - std::string getCompilerRTPath() const override; - private: bool IsIntegratedAsm = true; }; diff --git a/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c index 442174c73548a..3624bff2b318a 100644 --- a/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c +++ b/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple xtensa -emit-llvm -O0 -o - %s \ +// RUN: %clang_cc1 -triple xtensa -S -emit-llvm -O0 -o - %s \ // RUN: | FileCheck %s #include diff --git a/clang/test/Driver/baremetal-esp.cpp b/clang/test/Driver/baremetal-esp.cpp index 5500b3379d9be..f3c813c786f77 100644 --- a/clang/test/Driver/baremetal-esp.cpp +++ b/clang/test/Driver/baremetal-esp.cpp @@ -299,7 +299,7 @@ // RUN: --sysroot=%T/baremetal_clang_rt_noarch \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32_CLANGRT-NOARCH %s // CHECK-ESP-RV32_CLANGRT-NOARCH: "{{[^"]*}}libclang_rt.builtins.a" -// CHECK-ESP-RV32_CLANGRT-NOARCH-NOT: "{{[^"]*}}libclang_rt.builtins-riscv32.a" +// CHECK-ESP-RV32_CLANGRT-NOARCH-NOT: "{{[^"]*}}libclang_rt.builtins.a" // RUN: rm -rf %T/baremetal_clang_rt_arch // RUN: mkdir -p %T/baremetal_clang_rt_arch/lib // RUN: touch %T/baremetal_clang_rt_arch/lib/libclang_rt.builtins-riscv32.a @@ -307,7 +307,7 @@ // RUN: --target=riscv32-esp-elf \ // RUN: --sysroot=%T/baremetal_clang_rt_arch \ // RUN: | FileCheck --check-prefix=CHECK-ESP-RV32-CLANGRT-ARCH %s -// CHECK-ESP-RV32-CLANGRT-ARCH: "{{[^"]*}}libclang_rt.builtins-riscv32.a" +// CHECK-ESP-RV32-CLANGRT-ARCH: "{{[^"]*}}libclang_rt.builtins.a" // CHECK-ESP-RV32-CLANGRT-ARCH-NOT: "{{[^"]*}}libclang_rt.builtins.a" @@ -613,7 +613,7 @@ // RUN: --sysroot=%T/baremetal_clang_rt_noarch \ // RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32_CLANGRT-NOARCH %s // CHECK-ESP-ESP32_CLANGRT-NOARCH: "{{[^"]*}}libclang_rt.builtins.a" -// CHECK-ESP-ESP32_CLANGRT-NOARCH-NOT: "{{[^"]*}}libclang_rt.builtins-xtensa.a" +// CHECK-ESP-ESP32_CLANGRT-NOARCH-NOT: "{{[^"]*}}libclang_rt.builtins.a" // Check that compiler-rt library with the arch filename suffix will be // used if present. @@ -624,5 +624,5 @@ // RUN: --target=xtensa-esp-elf \ // RUN: --sysroot=%T/baremetal_clang_rt_arch \ // RUN: | FileCheck --check-prefix=CHECK-ESP-ESP32-CLANGRT-ARCH %s -// CHECK-ESP-ESP32-CLANGRT-ARCH: "{{[^"]*}}libclang_rt.builtins-xtensa.a" +// CHECK-ESP-ESP32-CLANGRT-ARCH: "{{[^"]*}}libclang_rt.builtins.a" // CHECK-ESP-ESP32-CLANGRT-ARCH-NOT: "{{[^"]*}}libclang_rt.builtins.a" diff --git a/clang/test/Driver/baremetal-sysroot.cpp b/clang/test/Driver/baremetal-sysroot.cpp index e88ad7d6c8814..8a954a362b6ba 100644 --- a/clang/test/Driver/baremetal-sysroot.cpp +++ b/clang/test/Driver/baremetal-sysroot.cpp @@ -25,8 +25,6 @@ // RUN: mkdir -p %T/baremetal_default_sysroot/bin // RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/riscv32-esp-unknown-elf/include/c++/11.2.0 // RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/include/c++/11.2.0 -// RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/lib/ -// RUN: touch %T/baremetal_default_sysroot/lib/clang-runtimes/riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32/lib/libclang_rt.builtins-riscv32.a // RUN: echo "MultilibVersion: '1.0'" > %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml // RUN: echo "Variants:" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml // RUN: echo "- Dir: riscv32-esp-unknown-elf/rv32imac-zicsr-zifencei_ilp32" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml @@ -51,14 +49,12 @@ // CHECK-ESP-RV32IMAC-C-NEXT: "{{[^"]*}}ld{{(\.(lld|bfd|gold))?}}{{(\.exe)?}}" "-m" "elf32lriscv" // CHECK-ESP-RV32IMAC-C-SAME: "-o" "{{.*}}.o" // CHECK-ESP-RV32IMAC-C-SAME: "-L{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}riscv32-esp-unknown-elf{{[/\\]+}}rv32imac-zicsr-zifencei_ilp32{{[/\\]+}}lib" -// CHECK-ESP-RV32IMAC-C-SAME: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "{{[^"]*}}libclang_rt.builtins-riscv32.a" +// CHECK-ESP-RV32IMAC-C-SAME: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "{{[^"]*}}libclang_rt.builtins.a" // RUN: rm -rf %T/baremetal_default_sysroot // RUN: mkdir -p %T/baremetal_default_sysroot/bin // RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/xtensa-esp-unknown-elf/include/c++/11.2.0 // RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/include/c++/11.2.0 -// RUN: mkdir -p %T/baremetal_default_sysroot/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/lib/ -// RUN: touch %T/baremetal_default_sysroot/lib/clang-runtimes/xtensa-esp-unknown-elf/esp32/lib/libclang_rt.builtins-xtensa.a // RUN: echo "MultilibVersion: '1.0'" > %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml // RUN: echo "Variants:" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml // RUN: echo "- Dir: xtensa-esp-unknown-elf/esp32" >> %T/baremetal_default_sysroot/lib/clang-runtimes/multilib.yaml @@ -78,4 +74,4 @@ // CHECK-ESP-ESP32-C-NEXT: "{{[^"]*}}ld{{(\.(lld|bfd|gold))?}}{{(\.exe)?}}" // CHECK-ESP-ESP32-C-SAME: "-o" "{{.*}}.o" // CHECK-ESP-ESP32-C-SAME: "-L{{.*}}/baremetal_default_sysroot{{[/\\]+}}bin{{[/\\]+}}..{{[/\\]+}}lib{{[/\\]+}}clang-runtimes{{[/\\]+}}xtensa-esp-unknown-elf{{[/\\]+}}esp32{{[/\\]+}}lib" -// CHECK-ESP-ESP32-C-SAME: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "{{[^"]*}}libclang_rt.builtins-xtensa.a" +// CHECK-ESP-ESP32-C-SAME: "--start-group" "-lc" "-lgloss" "-lnosys" "--end-group" "{{[^"]*}}libclang_rt.builtins.a" From c3524018d44cae3d643c4882a07b602bdfe7710c Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Wed, 13 Nov 2024 17:00:34 +0300 Subject: [PATCH 245/289] [Xtensa][Test] Fix xtensa-ee-intrinsics test --- clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c b/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c index 3624bff2b318a..442174c73548a 100644 --- a/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c +++ b/clang/test/CodeGen/Xtensa/xtensa-ee-intrinsics.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple xtensa -S -emit-llvm -O0 -o - %s \ +// RUN: %clang_cc1 -triple xtensa -emit-llvm -O0 -o - %s \ // RUN: | FileCheck %s #include From fd1c569c17da1ba4c2825eff98d870c3c440e722 Mon Sep 17 00:00:00 2001 From: Stefan Stipanovic Date: Wed, 27 Nov 2024 10:20:48 +0100 Subject: [PATCH 246/289] [RISCV] Support for ESP32-P4 instructions in RISCV backend. --- clang/include/clang/Basic/BuiltinsRISCV.td | 2 + .../clang/Basic/BuiltinsRISCVESP32P4.td | 356 + clang/test/CodeGen/RISCV/riscv-esp32p4.c | 1027 + .../Driver/print-supported-extensions-riscv.c | 1 + clang/test/Misc/target-invalid-cpu-note.c | 4 +- llvm/include/llvm/IR/IntrinsicsRISCV.td | 4 + .../include/llvm/IR/IntrinsicsRISCVESP32P4.td | 1065 ++ .../Target/RISCV/AsmParser/RISCVAsmParser.cpp | 97 + llvm/lib/Target/RISCV/CMakeLists.txt | 1 + .../RISCV/Disassembler/RISCVDisassembler.cpp | 152 + .../Target/RISCV/MCTargetDesc/RISCVBaseInfo.h | 5 +- .../RISCV/MCTargetDesc/RISCVInstPrinter.cpp | 116 + .../RISCV/MCTargetDesc/RISCVInstPrinter.h | 20 + .../RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp | 188 +- .../Target/RISCV/RISCVESP32P4ISelLowering.cpp | 8468 +++++++++ llvm/lib/Target/RISCV/RISCVESP32P4Operands.td | 134 + llvm/lib/Target/RISCV/RISCVFeatures.td | 7 + llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 13 +- llvm/lib/Target/RISCV/RISCVISelLowering.h | 4 + .../Target/RISCV/RISCVInstrFormatsESP32P4.td | 42 + llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 6 + llvm/lib/Target/RISCV/RISCVInstrInfo.td | 1 + .../lib/Target/RISCV/RISCVInstrInfoESP32P4.td | 15603 ++++++++++++++++ llvm/lib/Target/RISCV/RISCVInstrInfoP4HWLP.td | 172 + llvm/lib/Target/RISCV/RISCVProcessors.td | 14 + llvm/lib/Target/RISCV/RISCVRegisterInfo.td | 21 + llvm/test/CodeGen/RISCV/esp32p4.ll | 1289 ++ llvm/test/MC/RISCV/esp32p4-hwlp-valid.s | 22 + llvm/test/MC/RISCV/esp32p4-valid.s | 710 + llvm/test/MC/RISCV/rv64xtheadmemidx-invalid.s | 2 +- .../TargetParser/RISCVISAInfoTest.cpp | 1 + llvm/utils/TableGen/AsmMatcherEmitter.cpp | 87 +- 32 files changed, 29569 insertions(+), 65 deletions(-) create mode 100644 clang/include/clang/Basic/BuiltinsRISCVESP32P4.td create mode 100644 clang/test/CodeGen/RISCV/riscv-esp32p4.c create mode 100644 llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td create mode 100644 llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp create mode 100644 llvm/lib/Target/RISCV/RISCVESP32P4Operands.td create mode 100644 llvm/lib/Target/RISCV/RISCVInstrFormatsESP32P4.td create mode 100644 llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td create mode 100644 llvm/lib/Target/RISCV/RISCVInstrInfoP4HWLP.td create mode 100644 llvm/test/CodeGen/RISCV/esp32p4.ll create mode 100644 llvm/test/MC/RISCV/esp32p4-hwlp-valid.s create mode 100644 llvm/test/MC/RISCV/esp32p4-valid.s diff --git a/clang/include/clang/Basic/BuiltinsRISCV.td b/clang/include/clang/Basic/BuiltinsRISCV.td index 4cc89a8a9d8af..e157b2253fae9 100644 --- a/clang/include/clang/Basic/BuiltinsRISCV.td +++ b/clang/include/clang/Basic/BuiltinsRISCV.td @@ -146,3 +146,5 @@ let Features = "zihintntl", Attributes = [CustomTypeChecking] in { def ntl_load : RISCVBuiltin<"void(...)">; def ntl_store : RISCVBuiltin<"void(...)">; } // Features = "zihintntl", Attributes = [CustomTypeChecking] + +include "BuiltinsRISCVESP32P4.td" diff --git a/clang/include/clang/Basic/BuiltinsRISCVESP32P4.td b/clang/include/clang/Basic/BuiltinsRISCVESP32P4.td new file mode 100644 index 0000000000000..cf41cee6bfbee --- /dev/null +++ b/clang/include/clang/Basic/BuiltinsRISCVESP32P4.td @@ -0,0 +1,356 @@ +let Features = "xesppie" in { +def esp_vcmulas_s16_qacc_h : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vcmulas_s16_qacc_h_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vcmulas_s16_qacc_h_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vcmulas_s16_qacc_l : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vcmulas_s16_qacc_l_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vcmulas_s16_qacc_l_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vcmulas_s8_qacc_h : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vcmulas_s8_qacc_h_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vcmulas_s8_qacc_h_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vcmulas_s8_qacc_l : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vcmulas_s8_qacc_l_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vcmulas_s8_qacc_l_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vmulas_s16_qacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_s16_qacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s16_qacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_s16_qacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s16_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vmulas_s16_xacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_s16_xacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s16_xacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_s16_xacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vmulas_s8_qacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_s8_qacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s8_qacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_s8_qacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s8_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vmulas_s8_xacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_s8_xacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s8_xacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_s8_xacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vmulas_u16_qacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_u16_qacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u16_qacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_u16_qacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u16_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vmulas_u16_xacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_u16_xacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u16_xacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_u16_xacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vmulas_u8_qacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_u8_qacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u8_qacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_u8_qacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u8_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vmulas_u8_xacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_u8_xacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u8_xacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_u8_xacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s16_qacc_ldbc_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s8_qacc_ldbc_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u16_qacc_ldbc_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u8_qacc_ldbc_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_s16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_s16_qacc_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_s8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_s8_qacc_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_u16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_u16_qacc_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_u8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_u8_qacc_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_max_s16_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_max_s32_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_max_s8_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_max_u16_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_max_u32_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_max_u8_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_min_s16_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_min_s32_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_min_s8_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_min_u16_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_min_u32_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_min_u8_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vabs_16 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vabs_32 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vabs_8 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vadd_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vclamp_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s16_s8xs8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s32_s16xs16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmul_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vmul_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vprelu_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vprelu_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vrelu_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vrelu_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsadds_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsadds_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsadds_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsadds_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsat_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsat_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsat_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsat_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsat_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsat_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vssubs_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vssubs_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vssubs_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vssubs_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_addx2 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_addx4 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_sat : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_subx2 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_subx4 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_andq : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_notq : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_orq : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_xorq : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_eq_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_eq_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_eq_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_eq_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_eq_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_eq_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_gt_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_gt_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_gt_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_gt_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_gt_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_gt_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_lt_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_lt_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_lt_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_lt_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_lt_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vcmp_lt_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_mov_s16_qacc : RISCVBuiltin<"void(unsigned int)">; +def esp_mov_s8_qacc : RISCVBuiltin<"void(unsigned int)">; +def esp_mov_u16_qacc : RISCVBuiltin<"void(unsigned int)">; +def esp_mov_u8_qacc : RISCVBuiltin<"void(unsigned int)">; +def esp_movi_16_a : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_movi_16_q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_movi_32_a : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_movi_32_q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_movi_8_a : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_movi_8_q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_movx_r_cfg : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_r_fft_bit_width : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_r_perf : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_movx_r_sar : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_r_sar_bytes : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_r_xacc_h : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_r_xacc_l : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_w_cfg : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_w_fft_bit_width : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_w_perf : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_w_sar : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_w_sar_bytes : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_w_xacc_h : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_w_xacc_l : RISCVBuiltin<"void(unsigned int)">; +def esp_vext_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vext_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vext_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vext_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vunzip_16 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vunzip_32 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vunzip_8 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vunzipt_16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vunzipt_8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vzip_16 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vzip_32 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vzip_8 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vzipt_16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vzipt_8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_zero_q : RISCVBuiltin<"void(unsigned int)">; +def esp_zero_qacc : RISCVBuiltin<"void()">; +def esp_zero_xacc : RISCVBuiltin<"void()">; +def esp_fft_ams_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_ams_s16_ld_incp_uaup : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_ams_s16_ld_r32_decp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_ams_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_bitrev : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_fft_cmul_s16_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_cmul_s16_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_r2bf_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_r2bf_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_vst_r32_decp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_ld_128_usar_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; +def esp_ld_128_usar_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_ld_xacc_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_ldqa_s16_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_ldqa_s16_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_ldqa_s8_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_ldqa_s8_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_ldqa_u16_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_ldqa_u16_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_ldqa_u8_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_ldqa_u8_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vldbc_16_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; +def esp_vldbc_16_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vldbc_32_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; +def esp_vldbc_32_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vldbc_8_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; +def esp_vldbc_8_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vldext_s16_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int, unsigned int)">; +def esp_vldext_s16_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vldext_s8_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int, unsigned int)">; +def esp_vldext_s8_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vldext_u16_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int, unsigned int)">; +def esp_vldext_u16_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vldext_u8_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int, unsigned int)">; +def esp_vldext_u8_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vldhbc_16_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_ld_qacc_h_h_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_ld_qacc_h_l_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_ld_qacc_l_h_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_ld_qacc_l_l_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_ld_ua_state_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_ldxq_32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_st_qacc_h_h_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_st_qacc_h_l_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_st_qacc_l_h_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_st_qacc_l_l_128_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_st_ua_state_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_stxq_32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vld_128_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; +def esp_vld_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vld_h_64_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; +def esp_vld_h_64_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vld_l_64_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; +def esp_vld_l_64_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vst_128_ip : RISCVBuiltin<"void(unsigned int, unsigned int, int)">; +def esp_vst_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vst_h_64_ip : RISCVBuiltin<"void(unsigned int, unsigned int, int)">; +def esp_vst_h_64_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vst_l_64_ip : RISCVBuiltin<"void(unsigned int, unsigned int, int)">; +def esp_vst_l_64_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_slci_2q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_slcxxp_2q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_src_q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_src_q_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_src_q_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_src_q_qup : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srci_2q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcmb_s16_q_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcmb_s16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcmb_s8_q_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcmb_s8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcmb_u16_q_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcmb_u16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcmb_u8_q_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcmb_u8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcq_128_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcxxp_2q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_srs_s_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_srs_u_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vsl_32 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vsld_16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsld_32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsld_8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsr_s32 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vsr_u32 : RISCVBuiltin<"void(unsigned int, unsigned int)">; +def esp_vsrd_16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsrd_32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vsrd_8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_st_s_xacc_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_st_u_xacc_ip : RISCVBuiltin<"void(unsigned int, int)">; +} diff --git a/clang/test/CodeGen/RISCV/riscv-esp32p4.c b/clang/test/CodeGen/RISCV/riscv-esp32p4.c new file mode 100644 index 0000000000000..912b217276b63 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-esp32p4.c @@ -0,0 +1,1027 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +xesppie -emit-llvm -O0 -o - %s \ +// RUN: | FileCheck %s + +#include + +// CHECK-LABEL: @test( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[DATA:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 10, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.h(i32 4, i32 2) +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32 4, i32 0, i32 [[TMP0]], i32 -96, i32 3) +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32 [[TMP1]], i32 5, i32 5, i32 [[TMP2]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32 6, i32 1) +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32 2, i32 3, i32 [[TMP3]], i32 -48, i32 3) +// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32 [[TMP4]], i32 7, i32 2, i32 [[TMP5]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32 4, i32 4) +// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32 7, i32 4, i32 [[TMP6]], i32 -128, i32 4) +// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32 [[TMP7]], i32 2, i32 3, i32 [[TMP8]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32 6, i32 4) +// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32 5, i32 5, i32 [[TMP9]], i32 16, i32 7) +// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32 [[TMP10]], i32 4, i32 4, i32 [[TMP11]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc(i32 7, i32 6) +// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32 0, i32 4, i32 [[TMP12]], i32 96, i32 4) +// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32 [[TMP13]], i32 4, i32 4, i32 [[TMP14]], i32 7) +// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32 2, i32 1, i32 7, i32 [[TMP15]], i32 -128) +// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32 [[TMP16]], i32 1, i32 2, i32 6, i32 [[TMP17]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc(i32 1, i32 3) +// CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32 7, i32 3, i32 [[TMP18]], i32 -96, i32 5) +// CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32 [[TMP19]], i32 3, i32 1, i32 [[TMP20]], i32 1) +// CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32 2, i32 0, i32 0, i32 [[TMP21]], i32 64) +// CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32 [[TMP22]], i32 6, i32 3, i32 6, i32 [[TMP23]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc(i32 0, i32 0) +// CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32 0, i32 3, i32 [[TMP24]], i32 0, i32 7) +// CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32 [[TMP25]], i32 4, i32 3, i32 [[TMP26]], i32 4) +// CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32 3, i32 3, i32 5, i32 [[TMP27]], i32 -64) +// CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32 [[TMP28]], i32 4, i32 7, i32 0, i32 [[TMP29]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc(i32 3, i32 3) +// CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32 3, i32 2, i32 [[TMP30]], i32 0, i32 5) +// CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32 [[TMP31]], i32 6, i32 3, i32 [[TMP32]], i32 0) +// CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32 1, i32 7, i32 7, i32 [[TMP33]], i32 -32) +// CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP35:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32 [[TMP34]], i32 6, i32 7, i32 6, i32 [[TMP35]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc(i32 5, i32 4) +// CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32 5, i32 2, i32 [[TMP36]], i32 64, i32 6) +// CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32 [[TMP37]], i32 5, i32 7, i32 [[TMP38]], i32 7) +// CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32 1, i32 4, i32 3, i32 [[TMP39]], i32 -96) +// CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP41:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32 [[TMP40]], i32 5, i32 0, i32 2, i32 [[TMP41]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc(i32 0, i32 7) +// CHECK-NEXT: [[TMP42:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32 6, i32 6, i32 [[TMP42]], i32 -96, i32 4) +// CHECK-NEXT: [[TMP43:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP44:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32 [[TMP43]], i32 6, i32 5, i32 [[TMP44]], i32 6) +// CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32 3, i32 0, i32 4, i32 [[TMP45]], i32 64) +// CHECK-NEXT: [[TMP46:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP47:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32 [[TMP46]], i32 1, i32 0, i32 4, i32 [[TMP47]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc(i32 5, i32 4) +// CHECK-NEXT: [[TMP48:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32 5, i32 3, i32 [[TMP48]], i32 80, i32 5) +// CHECK-NEXT: [[TMP49:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP50:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32 [[TMP49]], i32 4, i32 7, i32 [[TMP50]], i32 4) +// CHECK-NEXT: [[TMP51:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32 3, i32 3, i32 5, i32 [[TMP51]], i32 -96) +// CHECK-NEXT: [[TMP52:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP53:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32 [[TMP52]], i32 6, i32 7, i32 3, i32 [[TMP53]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc(i32 0, i32 1) +// CHECK-NEXT: [[TMP54:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32 6, i32 0, i32 [[TMP54]], i32 -32, i32 7) +// CHECK-NEXT: [[TMP55:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP56:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32 [[TMP55]], i32 3, i32 3, i32 [[TMP56]], i32 5) +// CHECK-NEXT: [[TMP57:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32 7, i32 0, i32 4, i32 [[TMP57]], i32 32) +// CHECK-NEXT: [[TMP58:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP59:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32 [[TMP58]], i32 1, i32 0, i32 0, i32 [[TMP59]]) +// CHECK-NEXT: [[TMP60:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32 3, i32 6, i32 [[TMP60]], i32 7) +// CHECK-NEXT: [[TMP61:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32 5, i32 3, i32 [[TMP61]], i32 6) +// CHECK-NEXT: [[TMP62:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32 0, i32 3, i32 [[TMP62]], i32 2) +// CHECK-NEXT: [[TMP63:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32 4, i32 7, i32 [[TMP63]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s16.qacc(i32 7, i32 7, i32 4) +// CHECK-NEXT: [[TMP64:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32 7, i32 7, i32 [[TMP64]], i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s8.qacc(i32 7, i32 0, i32 7) +// CHECK-NEXT: [[TMP65:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32 5, i32 6, i32 [[TMP65]], i32 15, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u16.qacc(i32 7, i32 0, i32 10) +// CHECK-NEXT: [[TMP66:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32 7, i32 6, i32 [[TMP66]], i32 1, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u8.qacc(i32 3, i32 6, i32 5) +// CHECK-NEXT: [[TMP67:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32 6, i32 1, i32 [[TMP67]], i32 4, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s16(i32 2, i32 1, i32 3, i32 1) +// CHECK-NEXT: [[TMP68:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s16.ld.incp(i32 2, i32 7, i32 [[TMP68]], i32 0, i32 5, i32 0) +// CHECK-NEXT: [[TMP69:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s16.st.incp(i32 7, i32 4, i32 6, i32 [[TMP69]], i32 2, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s8(i32 5, i32 7, i32 2, i32 4) +// CHECK-NEXT: [[TMP70:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s8.ld.incp(i32 0, i32 6, i32 [[TMP70]], i32 2, i32 7, i32 5) +// CHECK-NEXT: [[TMP71:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s8.st.incp(i32 1, i32 6, i32 5, i32 [[TMP71]], i32 0, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u16(i32 7, i32 4, i32 0, i32 0) +// CHECK-NEXT: [[TMP72:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u16.ld.incp(i32 2, i32 0, i32 [[TMP72]], i32 3, i32 1, i32 1) +// CHECK-NEXT: [[TMP73:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u16.st.incp(i32 4, i32 3, i32 4, i32 [[TMP73]], i32 1, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u8(i32 3, i32 4, i32 1, i32 5) +// CHECK-NEXT: [[TMP74:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u8.ld.incp(i32 5, i32 0, i32 [[TMP74]], i32 1, i32 5, i32 1) +// CHECK-NEXT: [[TMP75:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u8.st.incp(i32 2, i32 7, i32 4, i32 [[TMP75]], i32 3, i32 1) +// CHECK-NEXT: [[TMP76:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.s16.a(i32 2, i32 [[TMP76]]) +// CHECK-NEXT: [[TMP77:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.s32.a(i32 0, i32 [[TMP77]]) +// CHECK-NEXT: [[TMP78:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.s8.a(i32 7, i32 [[TMP78]]) +// CHECK-NEXT: [[TMP79:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.u16.a(i32 4, i32 [[TMP79]]) +// CHECK-NEXT: [[TMP80:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.u32.a(i32 4, i32 [[TMP80]]) +// CHECK-NEXT: [[TMP81:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.u8.a(i32 3, i32 [[TMP81]]) +// CHECK-NEXT: [[TMP82:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.s16.a(i32 0, i32 [[TMP82]]) +// CHECK-NEXT: [[TMP83:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.s32.a(i32 7, i32 [[TMP83]]) +// CHECK-NEXT: [[TMP84:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.s8.a(i32 4, i32 [[TMP84]]) +// CHECK-NEXT: [[TMP85:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.u16.a(i32 7, i32 [[TMP85]]) +// CHECK-NEXT: [[TMP86:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.u32.a(i32 6, i32 [[TMP86]]) +// CHECK-NEXT: [[TMP87:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.u8.a(i32 1, i32 [[TMP87]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vabs.16(i32 7, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vabs.32(i32 0, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vabs.8(i32 5, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s16(i32 0, i32 4, i32 0) +// CHECK-NEXT: [[TMP88:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s16.ld.incp(i32 4, i32 2, i32 [[TMP88]], i32 0, i32 7) +// CHECK-NEXT: [[TMP89:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s16.st.incp(i32 5, i32 7, i32 0, i32 [[TMP89]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s32(i32 6, i32 5, i32 0) +// CHECK-NEXT: [[TMP90:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s32.ld.incp(i32 5, i32 6, i32 [[TMP90]], i32 0, i32 2) +// CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s32.st.incp(i32 7, i32 7, i32 0, i32 [[TMP91]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s8(i32 6, i32 5, i32 5) +// CHECK-NEXT: [[TMP92:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s8.ld.incp(i32 2, i32 4, i32 [[TMP92]], i32 6, i32 7) +// CHECK-NEXT: [[TMP93:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s8.st.incp(i32 4, i32 6, i32 4, i32 [[TMP93]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u16(i32 0, i32 6, i32 5) +// CHECK-NEXT: [[TMP94:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u16.ld.incp(i32 6, i32 7, i32 [[TMP94]], i32 5, i32 1) +// CHECK-NEXT: [[TMP95:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u16.st.incp(i32 1, i32 3, i32 4, i32 [[TMP95]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u32(i32 7, i32 3, i32 0) +// CHECK-NEXT: [[TMP96:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u32.ld.incp(i32 0, i32 4, i32 [[TMP96]], i32 5, i32 5) +// CHECK-NEXT: [[TMP97:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u32.st.incp(i32 1, i32 5, i32 6, i32 [[TMP97]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u8(i32 0, i32 1, i32 5) +// CHECK-NEXT: [[TMP98:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u8.ld.incp(i32 5, i32 1, i32 [[TMP98]], i32 2, i32 6) +// CHECK-NEXT: [[TMP99:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u8.st.incp(i32 1, i32 7, i32 4, i32 [[TMP99]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vclamp.s16(i32 3, i32 12, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s16(i32 1, i32 2, i32 2) +// CHECK-NEXT: [[TMP100:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s16.ld.incp(i32 3, i32 0, i32 [[TMP100]], i32 5, i32 1) +// CHECK-NEXT: [[TMP101:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s16.st.incp(i32 0, i32 4, i32 2, i32 [[TMP101]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s32(i32 0, i32 2, i32 4) +// CHECK-NEXT: [[TMP102:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s32.ld.incp(i32 3, i32 5, i32 [[TMP102]], i32 3, i32 6) +// CHECK-NEXT: [[TMP103:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s32.st.incp(i32 6, i32 0, i32 7, i32 [[TMP103]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s8(i32 1, i32 0, i32 3) +// CHECK-NEXT: [[TMP104:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s8.ld.incp(i32 1, i32 6, i32 [[TMP104]], i32 6, i32 6) +// CHECK-NEXT: [[TMP105:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s8.st.incp(i32 2, i32 7, i32 1, i32 [[TMP105]], i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u16(i32 6, i32 6, i32 3) +// CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u16.ld.incp(i32 5, i32 2, i32 [[TMP106]], i32 2, i32 1) +// CHECK-NEXT: [[TMP107:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u16.st.incp(i32 3, i32 6, i32 2, i32 [[TMP107]], i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u32(i32 2, i32 3, i32 3) +// CHECK-NEXT: [[TMP108:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u32.ld.incp(i32 1, i32 4, i32 [[TMP108]], i32 5, i32 5) +// CHECK-NEXT: [[TMP109:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u32.st.incp(i32 4, i32 2, i32 1, i32 [[TMP109]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u8(i32 7, i32 0, i32 4) +// CHECK-NEXT: [[TMP110:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u8.ld.incp(i32 1, i32 5, i32 [[TMP110]], i32 4, i32 7) +// CHECK-NEXT: [[TMP111:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u8.st.incp(i32 1, i32 2, i32 5, i32 [[TMP111]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s16(i32 5, i32 1, i32 7) +// CHECK-NEXT: [[TMP112:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s16.ld.incp(i32 7, i32 6, i32 [[TMP112]], i32 6, i32 4) +// CHECK-NEXT: [[TMP113:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s16.st.incp(i32 7, i32 0, i32 6, i32 [[TMP113]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s32(i32 7, i32 4, i32 7) +// CHECK-NEXT: [[TMP114:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s32.ld.incp(i32 0, i32 1, i32 [[TMP114]], i32 5, i32 4) +// CHECK-NEXT: [[TMP115:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s32.st.incp(i32 1, i32 6, i32 7, i32 [[TMP115]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s8(i32 5, i32 6, i32 4) +// CHECK-NEXT: [[TMP116:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s8.ld.incp(i32 1, i32 6, i32 [[TMP116]], i32 6, i32 5) +// CHECK-NEXT: [[TMP117:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s8.st.incp(i32 7, i32 7, i32 6, i32 [[TMP117]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u16(i32 7, i32 1, i32 1) +// CHECK-NEXT: [[TMP118:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u16.ld.incp(i32 6, i32 0, i32 [[TMP118]], i32 3, i32 0) +// CHECK-NEXT: [[TMP119:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u16.st.incp(i32 0, i32 7, i32 5, i32 [[TMP119]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u32(i32 6, i32 5, i32 0) +// CHECK-NEXT: [[TMP120:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u32.ld.incp(i32 3, i32 7, i32 [[TMP120]], i32 1, i32 4) +// CHECK-NEXT: [[TMP121:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u32.st.incp(i32 1, i32 0, i32 2, i32 [[TMP121]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u8(i32 2, i32 0, i32 7) +// CHECK-NEXT: [[TMP122:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u8.ld.incp(i32 4, i32 2, i32 [[TMP122]], i32 4, i32 3) +// CHECK-NEXT: [[TMP123:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u8.st.incp(i32 1, i32 7, i32 4, i32 [[TMP123]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16(i32 7, i32 5, i32 3) +// CHECK-NEXT: [[TMP124:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16.ld.incp(i32 5, i32 4, i32 [[TMP124]], i32 1, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16.s8xs8(i32 7, i32 6, i32 4, i32 4) +// CHECK-NEXT: [[TMP125:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16.st.incp(i32 0, i32 1, i32 5, i32 [[TMP125]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s32.s16xs16(i32 5, i32 3, i32 1, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s8(i32 1, i32 6, i32 0) +// CHECK-NEXT: [[TMP126:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s8.ld.incp(i32 2, i32 1, i32 [[TMP126]], i32 6, i32 5) +// CHECK-NEXT: [[TMP127:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s8.st.incp(i32 5, i32 2, i32 1, i32 [[TMP127]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u16(i32 7, i32 3, i32 6) +// CHECK-NEXT: [[TMP128:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u16.ld.incp(i32 3, i32 3, i32 [[TMP128]], i32 2, i32 0) +// CHECK-NEXT: [[TMP129:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u16.st.incp(i32 6, i32 5, i32 0, i32 [[TMP129]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u8(i32 2, i32 2, i32 7) +// CHECK-NEXT: [[TMP130:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u8.ld.incp(i32 1, i32 1, i32 [[TMP130]], i32 6, i32 7) +// CHECK-NEXT: [[TMP131:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u8.st.incp(i32 5, i32 0, i32 6, i32 [[TMP131]], i32 2) +// CHECK-NEXT: [[TMP132:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vprelu.s16(i32 [[TMP132]], i32 0, i32 7, i32 3) +// CHECK-NEXT: [[TMP133:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vprelu.s8(i32 [[TMP133]], i32 6, i32 6, i32 6) +// CHECK-NEXT: [[TMP134:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP135:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vrelu.s16(i32 [[TMP134]], i32 [[TMP135]], i32 3) +// CHECK-NEXT: [[TMP136:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP137:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vrelu.s8(i32 [[TMP136]], i32 [[TMP137]], i32 7) +// CHECK-NEXT: [[TMP138:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.s16(i32 [[TMP138]], i32 5, i32 4) +// CHECK-NEXT: [[TMP139:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.s8(i32 [[TMP139]], i32 6, i32 6) +// CHECK-NEXT: [[TMP140:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.u16(i32 [[TMP140]], i32 7, i32 2) +// CHECK-NEXT: [[TMP141:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.u8(i32 [[TMP141]], i32 2, i32 0) +// CHECK-NEXT: [[TMP142:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP143:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s16(i32 [[TMP142]], i32 [[TMP143]], i32 7, i32 5) +// CHECK-NEXT: [[TMP144:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP145:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s32(i32 [[TMP144]], i32 [[TMP145]], i32 2, i32 5) +// CHECK-NEXT: [[TMP146:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP147:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s8(i32 [[TMP146]], i32 [[TMP147]], i32 2, i32 5) +// CHECK-NEXT: [[TMP148:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP149:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u16(i32 [[TMP148]], i32 [[TMP149]], i32 0, i32 2) +// CHECK-NEXT: [[TMP150:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP151:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u32(i32 [[TMP150]], i32 [[TMP151]], i32 4, i32 2) +// CHECK-NEXT: [[TMP152:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP153:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u8(i32 [[TMP152]], i32 [[TMP153]], i32 0, i32 2) +// CHECK-NEXT: [[TMP154:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.s16(i32 [[TMP154]], i32 3, i32 6) +// CHECK-NEXT: [[TMP155:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.s8(i32 [[TMP155]], i32 5, i32 5) +// CHECK-NEXT: [[TMP156:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.u16(i32 [[TMP156]], i32 6, i32 3) +// CHECK-NEXT: [[TMP157:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.u8(i32 [[TMP157]], i32 0, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s16(i32 0, i32 5, i32 3) +// CHECK-NEXT: [[TMP158:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s16.ld.incp(i32 0, i32 1, i32 [[TMP158]], i32 5, i32 3) +// CHECK-NEXT: [[TMP159:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s16.st.incp(i32 5, i32 7, i32 7, i32 [[TMP159]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s32(i32 3, i32 0, i32 3) +// CHECK-NEXT: [[TMP160:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s32.ld.incp(i32 1, i32 2, i32 [[TMP160]], i32 0, i32 2) +// CHECK-NEXT: [[TMP161:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s32.st.incp(i32 4, i32 0, i32 0, i32 [[TMP161]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s8(i32 4, i32 1, i32 3) +// CHECK-NEXT: [[TMP162:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s8.ld.incp(i32 3, i32 7, i32 [[TMP162]], i32 3, i32 5) +// CHECK-NEXT: [[TMP163:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s8.st.incp(i32 5, i32 7, i32 3, i32 [[TMP163]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u16(i32 4, i32 6, i32 5) +// CHECK-NEXT: [[TMP164:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u16.ld.incp(i32 4, i32 7, i32 [[TMP164]], i32 0, i32 5) +// CHECK-NEXT: [[TMP165:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u16.st.incp(i32 2, i32 2, i32 7, i32 [[TMP165]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u32(i32 0, i32 1, i32 2) +// CHECK-NEXT: [[TMP166:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u32.ld.incp(i32 5, i32 6, i32 [[TMP166]], i32 3, i32 5) +// CHECK-NEXT: [[TMP167:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u32.st.incp(i32 0, i32 1, i32 4, i32 [[TMP167]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u8(i32 4, i32 2, i32 7) +// CHECK-NEXT: [[TMP168:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u8.ld.incp(i32 2, i32 7, i32 [[TMP168]], i32 3, i32 4) +// CHECK-NEXT: [[TMP169:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u8.st.incp(i32 6, i32 4, i32 7, i32 [[TMP169]], i32 7) +// CHECK-NEXT: [[TMP170:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP171:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP172:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.addx2(i32 [[TMP170]], i32 [[TMP171]], i32 [[TMP172]]) +// CHECK-NEXT: [[TMP173:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP174:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP175:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.addx4(i32 [[TMP173]], i32 [[TMP174]], i32 [[TMP175]]) +// CHECK-NEXT: [[TMP176:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP177:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP178:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.sat(i32 [[TMP176]], i32 [[TMP177]], i32 [[TMP178]]) +// CHECK-NEXT: [[TMP179:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP180:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP181:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.subx2(i32 [[TMP179]], i32 [[TMP180]], i32 [[TMP181]]) +// CHECK-NEXT: [[TMP182:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP183:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP184:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.subx4(i32 [[TMP182]], i32 [[TMP183]], i32 [[TMP184]]) +// CHECK-NEXT: call void @llvm.riscv.esp.andq(i32 0, i32 1, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.notq(i32 0, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.orq(i32 0, i32 6, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.xorq(i32 7, i32 4, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s16(i32 6, i32 6, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s32(i32 6, i32 2, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s8(i32 7, i32 6, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u16(i32 0, i32 2, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u32(i32 6, i32 4, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u8(i32 6, i32 4, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s16(i32 5, i32 3, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s32(i32 2, i32 4, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s8(i32 7, i32 7, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u16(i32 2, i32 7, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u32(i32 6, i32 4, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u8(i32 0, i32 4, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s16(i32 4, i32 6, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s32(i32 2, i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s8(i32 3, i32 0, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u16(i32 2, i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u32(i32 2, i32 0, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u8(i32 0, i32 2, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.s16.qacc(i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.s8.qacc(i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.u16.qacc(i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.u8.qacc(i32 5) +// CHECK-NEXT: [[TMP185:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.16.a(i32 2, i32 8, i32 [[TMP185]]) +// CHECK-NEXT: [[TMP186:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.16.q(i32 [[TMP186]], i32 12, i32 1) +// CHECK-NEXT: [[TMP187:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.32.a(i32 4, i32 2, i32 [[TMP187]]) +// CHECK-NEXT: [[TMP188:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.32.q(i32 [[TMP188]], i32 1, i32 0) +// CHECK-NEXT: [[TMP189:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.8.a(i32 0, i32 13, i32 [[TMP189]]) +// CHECK-NEXT: [[TMP190:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.8.q(i32 [[TMP190]], i32 14, i32 3) +// CHECK-NEXT: [[TMP191:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.cfg(i32 [[TMP191]]) +// CHECK-NEXT: [[TMP192:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.fft.bit.width(i32 [[TMP192]]) +// CHECK-NEXT: [[TMP193:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP194:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.perf(i32 [[TMP193]], i32 [[TMP194]]) +// CHECK-NEXT: [[TMP195:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.sar(i32 [[TMP195]]) +// CHECK-NEXT: [[TMP196:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.sar.bytes(i32 [[TMP196]]) +// CHECK-NEXT: [[TMP197:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.xacc.h(i32 [[TMP197]]) +// CHECK-NEXT: [[TMP198:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.xacc.l(i32 [[TMP198]]) +// CHECK-NEXT: [[TMP199:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.cfg(i32 [[TMP199]]) +// CHECK-NEXT: [[TMP200:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.fft.bit.width(i32 [[TMP200]]) +// CHECK-NEXT: [[TMP201:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.perf(i32 [[TMP201]]) +// CHECK-NEXT: [[TMP202:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.sar(i32 [[TMP202]]) +// CHECK-NEXT: [[TMP203:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.sar.bytes(i32 [[TMP203]]) +// CHECK-NEXT: [[TMP204:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.xacc.h(i32 [[TMP204]]) +// CHECK-NEXT: [[TMP205:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.xacc.l(i32 [[TMP205]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.s16(i32 0, i32 4, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.s8(i32 0, i32 7, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.u16(i32 1, i32 0, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.u8(i32 4, i32 1, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.16(i32 3, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.32(i32 6, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.8(i32 3, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzipt.16(i32 1, i32 5, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzipt.8(i32 7, i32 5, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vzip.16(i32 2, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vzip.32(i32 0, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vzip.8(i32 6, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vzipt.16(i32 6, i32 3, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vzipt.8(i32 7, i32 0, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.zero.q(i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.zero.qacc() +// CHECK-NEXT: call void @llvm.riscv.esp.zero.xacc() +// CHECK-NEXT: [[TMP206:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.ld.incp(i32 1, i32 1, i32 3, i32 [[TMP206]], i32 0, i32 6, i32 0, i32 3) +// CHECK-NEXT: [[TMP207:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32 3, i32 0, i32 1, i32 [[TMP207]], i32 0, i32 3, i32 3, i32 1) +// CHECK-NEXT: [[TMP208:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32 2, i32 3, i32 7, i32 [[TMP208]], i32 0, i32 1, i32 1, i32 4) +// CHECK-NEXT: [[TMP209:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP210:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.st.incp(i32 4, i32 4, i32 0, i32 5, i32 [[TMP209]], i32 [[TMP210]], i32 1, i32 1) +// CHECK-NEXT: [[TMP211:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.bitrev(i32 [[TMP211]], i32 6) +// CHECK-NEXT: [[TMP212:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP213:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32 [[TMP212]], i32 7, i32 0, i32 [[TMP213]], i32 2, i32 1, i32 2) +// CHECK-NEXT: [[TMP214:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP215:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.cmul.s16.st.xp(i32 [[TMP214]], i32 6, i32 0, i32 7, i32 [[TMP215]], i32 0, i32 1, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.fft.r2bf.s16(i32 2, i32 5, i32 0, i32 7, i32 5) +// CHECK-NEXT: [[TMP216:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32 1, i32 7, i32 [[TMP216]], i32 1, i32 6) +// CHECK-NEXT: [[TMP217:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.vst.r32.decp(i32 2, i32 [[TMP217]], i32 1) +// CHECK-NEXT: [[TMP218:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.128.usar.ip(i32 [[TMP218]], i32 -464, i32 7) +// CHECK-NEXT: [[TMP219:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP220:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.128.usar.xp(i32 [[TMP219]], i32 [[TMP220]], i32 0) +// CHECK-NEXT: [[TMP221:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.xacc.ip(i32 [[TMP221]], i32 -224) +// CHECK-NEXT: [[TMP222:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s16.128.ip(i32 [[TMP222]], i32 288) +// CHECK-NEXT: [[TMP223:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP224:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s16.128.xp(i32 [[TMP223]], i32 [[TMP224]]) +// CHECK-NEXT: [[TMP225:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s8.128.ip(i32 [[TMP225]], i32 -1408) +// CHECK-NEXT: [[TMP226:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP227:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s8.128.xp(i32 [[TMP226]], i32 [[TMP227]]) +// CHECK-NEXT: [[TMP228:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u16.128.ip(i32 [[TMP228]], i32 -1440) +// CHECK-NEXT: [[TMP229:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP230:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u16.128.xp(i32 [[TMP229]], i32 [[TMP230]]) +// CHECK-NEXT: [[TMP231:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u8.128.ip(i32 [[TMP231]], i32 -816) +// CHECK-NEXT: [[TMP232:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP233:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u8.128.xp(i32 [[TMP232]], i32 [[TMP233]]) +// CHECK-NEXT: [[TMP234:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.16.ip(i32 [[TMP234]], i32 380, i32 2) +// CHECK-NEXT: [[TMP235:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP236:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.16.xp(i32 [[TMP235]], i32 [[TMP236]], i32 3) +// CHECK-NEXT: [[TMP237:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.32.ip(i32 [[TMP237]], i32 -292, i32 7) +// CHECK-NEXT: [[TMP238:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP239:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.32.xp(i32 [[TMP238]], i32 [[TMP239]], i32 1) +// CHECK-NEXT: [[TMP240:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.8.ip(i32 [[TMP240]], i32 -416, i32 5) +// CHECK-NEXT: [[TMP241:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP242:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.8.xp(i32 [[TMP241]], i32 [[TMP242]], i32 7) +// CHECK-NEXT: [[TMP243:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s16.ip(i32 [[TMP243]], i32 -80, i32 0, i32 3) +// CHECK-NEXT: [[TMP244:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP245:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s16.xp(i32 [[TMP244]], i32 [[TMP245]], i32 2, i32 5) +// CHECK-NEXT: [[TMP246:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s8.ip(i32 [[TMP246]], i32 0, i32 2, i32 7) +// CHECK-NEXT: [[TMP247:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP248:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s8.xp(i32 [[TMP247]], i32 [[TMP248]], i32 7, i32 5) +// CHECK-NEXT: [[TMP249:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u16.ip(i32 [[TMP249]], i32 32, i32 0, i32 6) +// CHECK-NEXT: [[TMP250:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP251:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u16.xp(i32 [[TMP250]], i32 [[TMP251]], i32 7, i32 6) +// CHECK-NEXT: [[TMP252:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u8.ip(i32 [[TMP252]], i32 -16, i32 3, i32 1) +// CHECK-NEXT: [[TMP253:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP254:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u8.xp(i32 [[TMP253]], i32 [[TMP254]], i32 5, i32 4) +// CHECK-NEXT: [[TMP255:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldhbc.16.incp(i32 [[TMP255]], i32 2, i32 3) +// CHECK-NEXT: [[TMP256:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32 [[TMP256]], i32 -240) +// CHECK-NEXT: [[TMP257:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32 [[TMP257]], i32 -32) +// CHECK-NEXT: [[TMP258:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32 [[TMP258]], i32 -64) +// CHECK-NEXT: [[TMP259:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32 [[TMP259]], i32 -80) +// CHECK-NEXT: [[TMP260:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.ua.state.ip(i32 [[TMP260]], i32 1504) +// CHECK-NEXT: [[TMP261:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldxq.32(i32 [[TMP261]], i32 6, i32 1, i32 7, i32 1) +// CHECK-NEXT: [[TMP262:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.h.h.128.ip(i32 [[TMP262]], i32 -480) +// CHECK-NEXT: [[TMP263:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.h.l.128.ip(i32 [[TMP263]], i32 -1712) +// CHECK-NEXT: [[TMP264:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.l.h.128.ip(i32 [[TMP264]], i32 960) +// CHECK-NEXT: [[TMP265:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.l.l.128.ip(i32 [[TMP265]], i32 1920) +// CHECK-NEXT: [[TMP266:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.ua.state.ip(i32 [[TMP266]], i32 -1360) +// CHECK-NEXT: [[TMP267:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.stxq.32(i32 [[TMP267]], i32 6, i32 2, i32 3, i32 0) +// CHECK-NEXT: [[TMP268:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.128.ip(i32 [[TMP268]], i32 -1136, i32 0) +// CHECK-NEXT: [[TMP269:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP270:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.128.xp(i32 [[TMP269]], i32 [[TMP270]], i32 5) +// CHECK-NEXT: [[TMP271:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.h.64.ip(i32 [[TMP271]], i32 1008, i32 4) +// CHECK-NEXT: [[TMP272:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP273:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.h.64.xp(i32 [[TMP272]], i32 [[TMP273]], i32 2) +// CHECK-NEXT: [[TMP274:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.l.64.ip(i32 [[TMP274]], i32 -304, i32 6) +// CHECK-NEXT: [[TMP275:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP276:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.l.64.xp(i32 [[TMP275]], i32 [[TMP276]], i32 6) +// CHECK-NEXT: [[TMP277:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.128.ip(i32 0, i32 [[TMP277]], i32 -1216) +// CHECK-NEXT: [[TMP278:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP279:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.128.xp(i32 [[TMP278]], i32 6, i32 [[TMP279]]) +// CHECK-NEXT: [[TMP280:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.h.64.ip(i32 1, i32 [[TMP280]], i32 -456) +// CHECK-NEXT: [[TMP281:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP282:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.h.64.xp(i32 [[TMP281]], i32 2, i32 [[TMP282]]) +// CHECK-NEXT: [[TMP283:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.l.64.ip(i32 6, i32 [[TMP283]], i32 664) +// CHECK-NEXT: [[TMP284:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP285:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.l.64.xp(i32 [[TMP284]], i32 4, i32 [[TMP285]]) +// CHECK-NEXT: call void @llvm.riscv.esp.slci.2q(i32 2, i32 0, i32 14) +// CHECK-NEXT: [[TMP286:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP287:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.slcxxp.2q(i32 [[TMP286]], i32 [[TMP287]], i32 0, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.src.q(i32 7, i32 3, i32 2) +// CHECK-NEXT: [[TMP288:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.src.q.ld.ip(i32 1, i32 [[TMP288]], i32 4, i32 1168, i32 4) +// CHECK-NEXT: [[TMP289:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP290:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.src.q.ld.xp(i32 [[TMP289]], i32 0, i32 [[TMP290]], i32 1, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.src.q.qup(i32 3, i32 3, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.srci.2q(i32 7, i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s16.q.qacc(i32 2, i32 1, i32 5) +// CHECK-NEXT: [[TMP291:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s16.qacc(i32 [[TMP291]], i32 0, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s8.q.qacc(i32 7, i32 0, i32 3) +// CHECK-NEXT: [[TMP292:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s8.qacc(i32 [[TMP292]], i32 1, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u16.q.qacc(i32 6, i32 1, i32 0) +// CHECK-NEXT: [[TMP293:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u16.qacc(i32 [[TMP293]], i32 0, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u8.q.qacc(i32 6, i32 0, i32 7) +// CHECK-NEXT: [[TMP294:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u8.qacc(i32 [[TMP294]], i32 1, i32 2) +// CHECK-NEXT: [[TMP295:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcq.128.st.incp(i32 0, i32 5, i32 [[TMP295]]) +// CHECK-NEXT: [[TMP296:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP297:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcxxp.2q(i32 [[TMP296]], i32 [[TMP297]], i32 7, i32 5) +// CHECK-NEXT: [[TMP298:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP299:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srs.s.xacc(i32 [[TMP298]], i32 [[TMP299]]) +// CHECK-NEXT: [[TMP300:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP301:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srs.u.xacc(i32 [[TMP300]], i32 [[TMP301]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vsl.32(i32 0, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsld.16(i32 6, i32 4, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vsld.32(i32 2, i32 7, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vsld.8(i32 1, i32 0, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vsr.s32(i32 6, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vsr.u32(i32 3, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vsrd.16(i32 6, i32 2, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vsrd.32(i32 7, i32 5, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vsrd.8(i32 2, i32 1, i32 4) +// CHECK-NEXT: [[TMP302:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.s.xacc.ip(i32 [[TMP302]], i32 912) +// CHECK-NEXT: [[TMP303:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.u.xacc.ip(i32 [[TMP303]], i32 -112) +// CHECK-NEXT: ret void +// +void test() { + uint32_t data = 10; + __builtin_riscv_esp_vcmulas_s16_qacc_h(4, 2); +__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_ip(4, 0, data, -96, 3); +__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_xp(data, 5, 5, data, 5); +__builtin_riscv_esp_vcmulas_s16_qacc_l(6, 1); +__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_ip(2, 3, data, -48, 3); +__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_xp(data, 7, 2, data, 1); +__builtin_riscv_esp_vcmulas_s8_qacc_h(4, 4); +__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_ip(7, 4, data, -128, 4); +__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_xp(data, 2, 3, data, 1); +__builtin_riscv_esp_vcmulas_s8_qacc_l(6, 4); +__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_ip(5, 5, data, 16, 7); +__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_xp(data, 4, 4, data, 2); +__builtin_riscv_esp_vmulas_s16_qacc(7, 6); +__builtin_riscv_esp_vmulas_s16_qacc_ld_ip(0, 4, data, 96, 4); +__builtin_riscv_esp_vmulas_s16_qacc_ld_xp(data, 4, 4, data, 7); +__builtin_riscv_esp_vmulas_s16_qacc_st_ip(2, 1, 7, data, -128); +__builtin_riscv_esp_vmulas_s16_qacc_st_xp(data, 1, 2, 6, data); +__builtin_riscv_esp_vmulas_s16_xacc(1, 3); +__builtin_riscv_esp_vmulas_s16_xacc_ld_ip(7, 3, data, -96, 5); +__builtin_riscv_esp_vmulas_s16_xacc_ld_xp(data, 3, 1, data, 1); +__builtin_riscv_esp_vmulas_s16_xacc_st_ip(2, 0, 0, data, 64); +__builtin_riscv_esp_vmulas_s16_xacc_st_xp(data, 6, 3, 6, data); +__builtin_riscv_esp_vmulas_s8_qacc(0, 0); +__builtin_riscv_esp_vmulas_s8_qacc_ld_ip(0, 3, data, 0, 7); +__builtin_riscv_esp_vmulas_s8_qacc_ld_xp(data, 4, 3, data, 4); +__builtin_riscv_esp_vmulas_s8_qacc_st_ip(3, 3, 5, data, -64); +__builtin_riscv_esp_vmulas_s8_qacc_st_xp(data, 4, 7, 0, data); +__builtin_riscv_esp_vmulas_s8_xacc(3, 3); +__builtin_riscv_esp_vmulas_s8_xacc_ld_ip(3, 2, data, 0, 5); +__builtin_riscv_esp_vmulas_s8_xacc_ld_xp(data, 6, 3, data, 0); +__builtin_riscv_esp_vmulas_s8_xacc_st_ip(1, 7, 7, data, -32); +__builtin_riscv_esp_vmulas_s8_xacc_st_xp(data, 6, 7, 6, data); +__builtin_riscv_esp_vmulas_u16_qacc(5, 4); +__builtin_riscv_esp_vmulas_u16_qacc_ld_ip(5, 2, data, 64, 6); +__builtin_riscv_esp_vmulas_u16_qacc_ld_xp(data, 5, 7, data, 7); +__builtin_riscv_esp_vmulas_u16_qacc_st_ip(1, 4, 3, data, -96); +__builtin_riscv_esp_vmulas_u16_qacc_st_xp(data, 5, 0, 2, data); +__builtin_riscv_esp_vmulas_u16_xacc(0, 7); +__builtin_riscv_esp_vmulas_u16_xacc_ld_ip(6, 6, data, -96, 4); +__builtin_riscv_esp_vmulas_u16_xacc_ld_xp(data, 6, 5, data, 6); +__builtin_riscv_esp_vmulas_u16_xacc_st_ip(3, 0, 4, data, 64); +__builtin_riscv_esp_vmulas_u16_xacc_st_xp(data, 1, 0, 4, data); +__builtin_riscv_esp_vmulas_u8_qacc(5, 4); +__builtin_riscv_esp_vmulas_u8_qacc_ld_ip(5, 3, data, 80, 5); +__builtin_riscv_esp_vmulas_u8_qacc_ld_xp(data, 4, 7, data, 4); +__builtin_riscv_esp_vmulas_u8_qacc_st_ip(3, 3, 5, data, -96); +__builtin_riscv_esp_vmulas_u8_qacc_st_xp(data, 6, 7, 3, data); +__builtin_riscv_esp_vmulas_u8_xacc(0, 1); +__builtin_riscv_esp_vmulas_u8_xacc_ld_ip(6, 0, data, -32, 7); +__builtin_riscv_esp_vmulas_u8_xacc_ld_xp(data, 3, 3, data, 5); +__builtin_riscv_esp_vmulas_u8_xacc_st_ip(7, 0, 4, data, 32); +__builtin_riscv_esp_vmulas_u8_xacc_st_xp(data, 1, 0, 0, data); +__builtin_riscv_esp_vmulas_s16_qacc_ldbc_incp(3, 6, data, 7); +__builtin_riscv_esp_vmulas_s8_qacc_ldbc_incp(5, 3, data, 6); +__builtin_riscv_esp_vmulas_u16_qacc_ldbc_incp(0, 3, data, 2); +__builtin_riscv_esp_vmulas_u8_qacc_ldbc_incp(4, 7, data, 3); +__builtin_riscv_esp_vsmulas_s16_qacc(7, 7, 4); +__builtin_riscv_esp_vsmulas_s16_qacc_ld_incp(7, 7, data, 4, 1); +__builtin_riscv_esp_vsmulas_s8_qacc(7, 0, 7); +__builtin_riscv_esp_vsmulas_s8_qacc_ld_incp(5, 6, data, 15, 2); +__builtin_riscv_esp_vsmulas_u16_qacc(7, 0, 10); +__builtin_riscv_esp_vsmulas_u16_qacc_ld_incp(7, 6, data, 1, 0); +__builtin_riscv_esp_vsmulas_u8_qacc(3, 6, 5); +__builtin_riscv_esp_vsmulas_u8_qacc_ld_incp(6, 1, data, 4, 0); +__builtin_riscv_esp_cmul_s16(2, 1, 3, 1); +__builtin_riscv_esp_cmul_s16_ld_incp(2, 7, data, 0, 5, 0); +__builtin_riscv_esp_cmul_s16_st_incp(7, 4, 6, data, 2, 5); +__builtin_riscv_esp_cmul_s8(5, 7, 2, 4); +__builtin_riscv_esp_cmul_s8_ld_incp(0, 6, data, 2, 7, 5); +__builtin_riscv_esp_cmul_s8_st_incp(1, 6, 5, data, 0, 2); +__builtin_riscv_esp_cmul_u16(7, 4, 0, 0); +__builtin_riscv_esp_cmul_u16_ld_incp(2, 0, data, 3, 1, 1); +__builtin_riscv_esp_cmul_u16_st_incp(4, 3, 4, data, 1, 2); +__builtin_riscv_esp_cmul_u8(3, 4, 1, 5); +__builtin_riscv_esp_cmul_u8_ld_incp(5, 0, data, 1, 5, 1); +__builtin_riscv_esp_cmul_u8_st_incp(2, 7, 4, data, 3, 1); +__builtin_riscv_esp_max_s16_a(2, data); +__builtin_riscv_esp_max_s32_a(0, data); +__builtin_riscv_esp_max_s8_a(7, data); +__builtin_riscv_esp_max_u16_a(4, data); +__builtin_riscv_esp_max_u32_a(4, data); +__builtin_riscv_esp_max_u8_a(3, data); +__builtin_riscv_esp_min_s16_a(0, data); +__builtin_riscv_esp_min_s32_a(7, data); +__builtin_riscv_esp_min_s8_a(4, data); +__builtin_riscv_esp_min_u16_a(7, data); +__builtin_riscv_esp_min_u32_a(6, data); +__builtin_riscv_esp_min_u8_a(1, data); +__builtin_riscv_esp_vabs_16(7, 0); +__builtin_riscv_esp_vabs_32(0, 3); +__builtin_riscv_esp_vabs_8(5, 2); +__builtin_riscv_esp_vadd_s16(0, 4, 0); +__builtin_riscv_esp_vadd_s16_ld_incp(4, 2, data, 0, 7); +__builtin_riscv_esp_vadd_s16_st_incp(5, 7, 0, data, 5); +__builtin_riscv_esp_vadd_s32(6, 5, 0); +__builtin_riscv_esp_vadd_s32_ld_incp(5, 6, data, 0, 2); +__builtin_riscv_esp_vadd_s32_st_incp(7, 7, 0, data, 1); +__builtin_riscv_esp_vadd_s8(6, 5, 5); +__builtin_riscv_esp_vadd_s8_ld_incp(2, 4, data, 6, 7); +__builtin_riscv_esp_vadd_s8_st_incp(4, 6, 4, data, 7); +__builtin_riscv_esp_vadd_u16(0, 6, 5); +__builtin_riscv_esp_vadd_u16_ld_incp(6, 7, data, 5, 1); +__builtin_riscv_esp_vadd_u16_st_incp(1, 3, 4, data, 5); +__builtin_riscv_esp_vadd_u32(7, 3, 0); +__builtin_riscv_esp_vadd_u32_ld_incp(0, 4, data, 5, 5); +__builtin_riscv_esp_vadd_u32_st_incp(1, 5, 6, data, 1); +__builtin_riscv_esp_vadd_u8(0, 1, 5); +__builtin_riscv_esp_vadd_u8_ld_incp(5, 1, data, 2, 6); +__builtin_riscv_esp_vadd_u8_st_incp(1, 7, 4, data, 2); +__builtin_riscv_esp_vclamp_s16(3, 12, 5); +__builtin_riscv_esp_vmax_s16(1, 2, 2); +__builtin_riscv_esp_vmax_s16_ld_incp(3, 0, data, 5, 1); +__builtin_riscv_esp_vmax_s16_st_incp(0, 4, 2, data, 3); +__builtin_riscv_esp_vmax_s32(0, 2, 4); +__builtin_riscv_esp_vmax_s32_ld_incp(3, 5, data, 3, 6); +__builtin_riscv_esp_vmax_s32_st_incp(6, 0, 7, data, 4); +__builtin_riscv_esp_vmax_s8(1, 0, 3); +__builtin_riscv_esp_vmax_s8_ld_incp(1, 6, data, 6, 6); +__builtin_riscv_esp_vmax_s8_st_incp(2, 7, 1, data, 0); +__builtin_riscv_esp_vmax_u16(6, 6, 3); +__builtin_riscv_esp_vmax_u16_ld_incp(5, 2, data, 2, 1); +__builtin_riscv_esp_vmax_u16_st_incp(3, 6, 2, data, 0); +__builtin_riscv_esp_vmax_u32(2, 3, 3); +__builtin_riscv_esp_vmax_u32_ld_incp(1, 4, data, 5, 5); +__builtin_riscv_esp_vmax_u32_st_incp(4, 2, 1, data, 4); +__builtin_riscv_esp_vmax_u8(7, 0, 4); +__builtin_riscv_esp_vmax_u8_ld_incp(1, 5, data, 4, 7); +__builtin_riscv_esp_vmax_u8_st_incp(1, 2, 5, data, 7); +__builtin_riscv_esp_vmin_s16(5, 1, 7); +__builtin_riscv_esp_vmin_s16_ld_incp(7, 6, data, 6, 4); +__builtin_riscv_esp_vmin_s16_st_incp(7, 0, 6, data, 2); +__builtin_riscv_esp_vmin_s32(7, 4, 7); +__builtin_riscv_esp_vmin_s32_ld_incp(0, 1, data, 5, 4); +__builtin_riscv_esp_vmin_s32_st_incp(1, 6, 7, data, 4); +__builtin_riscv_esp_vmin_s8(5, 6, 4); +__builtin_riscv_esp_vmin_s8_ld_incp(1, 6, data, 6, 5); +__builtin_riscv_esp_vmin_s8_st_incp(7, 7, 6, data, 7); +__builtin_riscv_esp_vmin_u16(7, 1, 1); +__builtin_riscv_esp_vmin_u16_ld_incp(6, 0, data, 3, 0); +__builtin_riscv_esp_vmin_u16_st_incp(0, 7, 5, data, 3); +__builtin_riscv_esp_vmin_u32(6, 5, 0); +__builtin_riscv_esp_vmin_u32_ld_incp(3, 7, data, 1, 4); +__builtin_riscv_esp_vmin_u32_st_incp(1, 0, 2, data, 3); +__builtin_riscv_esp_vmin_u8(2, 0, 7); +__builtin_riscv_esp_vmin_u8_ld_incp(4, 2, data, 4, 3); +__builtin_riscv_esp_vmin_u8_st_incp(1, 7, 4, data, 4); +__builtin_riscv_esp_vmul_s16(7, 5, 3); +__builtin_riscv_esp_vmul_s16_ld_incp(5, 4, data, 1, 6); +__builtin_riscv_esp_vmul_s16_s8xs8(7, 6, 4, 4); +__builtin_riscv_esp_vmul_s16_st_incp(0, 1, 5, data, 7); +__builtin_riscv_esp_vmul_s32_s16xs16(5, 3, 1, 2); +__builtin_riscv_esp_vmul_s8(1, 6, 0); +__builtin_riscv_esp_vmul_s8_ld_incp(2, 1, data, 6, 5); +__builtin_riscv_esp_vmul_s8_st_incp(5, 2, 1, data, 7); +__builtin_riscv_esp_vmul_u16(7, 3, 6); +__builtin_riscv_esp_vmul_u16_ld_incp(3, 3, data, 2, 0); +__builtin_riscv_esp_vmul_u16_st_incp(6, 5, 0, data, 1); +__builtin_riscv_esp_vmul_u8(2, 2, 7); +__builtin_riscv_esp_vmul_u8_ld_incp(1, 1, data, 6, 7); +__builtin_riscv_esp_vmul_u8_st_incp(5, 0, 6, data, 2); +__builtin_riscv_esp_vprelu_s16(data, 0, 7, 3); +__builtin_riscv_esp_vprelu_s8(data, 6, 6, 6); +__builtin_riscv_esp_vrelu_s16(data, data, 3); +__builtin_riscv_esp_vrelu_s8(data, data, 7); +__builtin_riscv_esp_vsadds_s16(data, 5, 4); +__builtin_riscv_esp_vsadds_s8(data, 6, 6); +__builtin_riscv_esp_vsadds_u16(data, 7, 2); +__builtin_riscv_esp_vsadds_u8(data, 2, 0); +__builtin_riscv_esp_vsat_s16(data, data, 7, 5); +__builtin_riscv_esp_vsat_s32(data, data, 2, 5); +__builtin_riscv_esp_vsat_s8(data, data, 2, 5); +__builtin_riscv_esp_vsat_u16(data, data, 0, 2); +__builtin_riscv_esp_vsat_u32(data, data, 4, 2); +__builtin_riscv_esp_vsat_u8(data, data, 0, 2); +__builtin_riscv_esp_vssubs_s16(data, 3, 6); +__builtin_riscv_esp_vssubs_s8(data, 5, 5); +__builtin_riscv_esp_vssubs_u16(data, 6, 3); +__builtin_riscv_esp_vssubs_u8(data, 0, 3); +__builtin_riscv_esp_vsub_s16(0, 5, 3); +__builtin_riscv_esp_vsub_s16_ld_incp(0, 1, data, 5, 3); +__builtin_riscv_esp_vsub_s16_st_incp(5, 7, 7, data, 4); +__builtin_riscv_esp_vsub_s32(3, 0, 3); +__builtin_riscv_esp_vsub_s32_ld_incp(1, 2, data, 0, 2); +__builtin_riscv_esp_vsub_s32_st_incp(4, 0, 0, data, 5); +__builtin_riscv_esp_vsub_s8(4, 1, 3); +__builtin_riscv_esp_vsub_s8_ld_incp(3, 7, data, 3, 5); +__builtin_riscv_esp_vsub_s8_st_incp(5, 7, 3, data, 3); +__builtin_riscv_esp_vsub_u16(4, 6, 5); +__builtin_riscv_esp_vsub_u16_ld_incp(4, 7, data, 0, 5); +__builtin_riscv_esp_vsub_u16_st_incp(2, 2, 7, data, 3); +__builtin_riscv_esp_vsub_u32(0, 1, 2); +__builtin_riscv_esp_vsub_u32_ld_incp(5, 6, data, 3, 5); +__builtin_riscv_esp_vsub_u32_st_incp(0, 1, 4, data, 2); +__builtin_riscv_esp_vsub_u8(4, 2, 7); +__builtin_riscv_esp_vsub_u8_ld_incp(2, 7, data, 3, 4); +__builtin_riscv_esp_vsub_u8_st_incp(6, 4, 7, data, 7); +__builtin_riscv_esp_addx2(data, data, data); +__builtin_riscv_esp_addx4(data, data, data); +__builtin_riscv_esp_sat(data, data, data); +__builtin_riscv_esp_subx2(data, data, data); +__builtin_riscv_esp_subx4(data, data, data); +__builtin_riscv_esp_andq(0, 1, 4); +__builtin_riscv_esp_notq(0, 1); +__builtin_riscv_esp_orq(0, 6, 3); +__builtin_riscv_esp_xorq(7, 4, 7); +__builtin_riscv_esp_vcmp_eq_s16(6, 6, 3); +__builtin_riscv_esp_vcmp_eq_s32(6, 2, 1); +__builtin_riscv_esp_vcmp_eq_s8(7, 6, 0); +__builtin_riscv_esp_vcmp_eq_u16(0, 2, 5); +__builtin_riscv_esp_vcmp_eq_u32(6, 4, 3); +__builtin_riscv_esp_vcmp_eq_u8(6, 4, 5); +__builtin_riscv_esp_vcmp_gt_s16(5, 3, 6); +__builtin_riscv_esp_vcmp_gt_s32(2, 4, 5); +__builtin_riscv_esp_vcmp_gt_s8(7, 7, 4); +__builtin_riscv_esp_vcmp_gt_u16(2, 7, 7); +__builtin_riscv_esp_vcmp_gt_u32(6, 4, 2); +__builtin_riscv_esp_vcmp_gt_u8(0, 4, 4); +__builtin_riscv_esp_vcmp_lt_s16(4, 6, 5); +__builtin_riscv_esp_vcmp_lt_s32(2, 4, 1); +__builtin_riscv_esp_vcmp_lt_s8(3, 0, 2); +__builtin_riscv_esp_vcmp_lt_u16(2, 4, 1); +__builtin_riscv_esp_vcmp_lt_u32(2, 0, 5); +__builtin_riscv_esp_vcmp_lt_u8(0, 2, 5); +__builtin_riscv_esp_mov_s16_qacc(4); +__builtin_riscv_esp_mov_s8_qacc(5); +__builtin_riscv_esp_mov_u16_qacc(5); +__builtin_riscv_esp_mov_u8_qacc(5); +__builtin_riscv_esp_movi_16_a(2, 8, data); +__builtin_riscv_esp_movi_16_q(data, 12, 1); +__builtin_riscv_esp_movi_32_a(4, 2, data); +__builtin_riscv_esp_movi_32_q(data, 1, 0); +__builtin_riscv_esp_movi_8_a(0, 13, data); +__builtin_riscv_esp_movi_8_q(data, 14, 3); +__builtin_riscv_esp_movx_r_cfg(data); +__builtin_riscv_esp_movx_r_fft_bit_width(data); +__builtin_riscv_esp_movx_r_perf(data, data); +__builtin_riscv_esp_movx_r_sar(data); +__builtin_riscv_esp_movx_r_sar_bytes(data); +__builtin_riscv_esp_movx_r_xacc_h(data); +__builtin_riscv_esp_movx_r_xacc_l(data); +__builtin_riscv_esp_movx_w_cfg(data); +__builtin_riscv_esp_movx_w_fft_bit_width(data); +__builtin_riscv_esp_movx_w_perf(data); +__builtin_riscv_esp_movx_w_sar(data); +__builtin_riscv_esp_movx_w_sar_bytes(data); +__builtin_riscv_esp_movx_w_xacc_h(data); +__builtin_riscv_esp_movx_w_xacc_l(data); +__builtin_riscv_esp_vext_s16(0, 4, 6); +__builtin_riscv_esp_vext_s8(0, 7, 1); +__builtin_riscv_esp_vext_u16(1, 0, 6); +__builtin_riscv_esp_vext_u8(4, 1, 6); +__builtin_riscv_esp_vunzip_16(3, 2); +__builtin_riscv_esp_vunzip_32(6, 1); +__builtin_riscv_esp_vunzip_8(3, 5); +__builtin_riscv_esp_vunzipt_16(1, 5, 4); +__builtin_riscv_esp_vunzipt_8(7, 5, 7); +__builtin_riscv_esp_vzip_16(2, 2); +__builtin_riscv_esp_vzip_32(0, 7); +__builtin_riscv_esp_vzip_8(6, 4); +__builtin_riscv_esp_vzipt_16(6, 3, 0); +__builtin_riscv_esp_vzipt_8(7, 0, 1); +__builtin_riscv_esp_zero_q(3); +__builtin_riscv_esp_zero_qacc(); +__builtin_riscv_esp_zero_xacc(); +__builtin_riscv_esp_fft_ams_s16_ld_incp(1, 1, 3, data, 0, 6, 0, 3); +__builtin_riscv_esp_fft_ams_s16_ld_incp_uaup(3, 0, 1, data, 0, 3, 3, 1); +__builtin_riscv_esp_fft_ams_s16_ld_r32_decp(2, 3, 7, data, 0, 1, 1, 4); +__builtin_riscv_esp_fft_ams_s16_st_incp(4, 4, 0, 5, data, data, 1, 1); +__builtin_riscv_esp_fft_bitrev(data, 6); +__builtin_riscv_esp_fft_cmul_s16_ld_xp(data, 7, 0, data, 2, 1, 2); +__builtin_riscv_esp_fft_cmul_s16_st_xp(data, 6, 0, 7, data, 0, 1, 0); +__builtin_riscv_esp_fft_r2bf_s16(2, 5, 0, 7, 5); +__builtin_riscv_esp_fft_r2bf_s16_st_incp(1, 7, data, 1, 6); +__builtin_riscv_esp_fft_vst_r32_decp(2, data, 1); +__builtin_riscv_esp_ld_128_usar_ip(data, -464, 7); +__builtin_riscv_esp_ld_128_usar_xp(data, data, 0); +__builtin_riscv_esp_ld_xacc_ip(data, -224); +__builtin_riscv_esp_ldqa_s16_128_ip(data, 288); +__builtin_riscv_esp_ldqa_s16_128_xp(data, data); +__builtin_riscv_esp_ldqa_s8_128_ip(data, -1408); +__builtin_riscv_esp_ldqa_s8_128_xp(data, data); +__builtin_riscv_esp_ldqa_u16_128_ip(data, -1440); +__builtin_riscv_esp_ldqa_u16_128_xp(data, data); +__builtin_riscv_esp_ldqa_u8_128_ip(data, -816); +__builtin_riscv_esp_ldqa_u8_128_xp(data, data); +__builtin_riscv_esp_vldbc_16_ip(data, 380, 2); +__builtin_riscv_esp_vldbc_16_xp(data, data, 3); +__builtin_riscv_esp_vldbc_32_ip(data, -292, 7); +__builtin_riscv_esp_vldbc_32_xp(data, data, 1); +__builtin_riscv_esp_vldbc_8_ip(data, -416, 5); +__builtin_riscv_esp_vldbc_8_xp(data, data, 7); +__builtin_riscv_esp_vldext_s16_ip(data, -80, 0, 3); +__builtin_riscv_esp_vldext_s16_xp(data, data, 2, 5); +__builtin_riscv_esp_vldext_s8_ip(data, 0, 2, 7); +__builtin_riscv_esp_vldext_s8_xp(data, data, 7, 5); +__builtin_riscv_esp_vldext_u16_ip(data, 32, 0, 6); +__builtin_riscv_esp_vldext_u16_xp(data, data, 7, 6); +__builtin_riscv_esp_vldext_u8_ip(data, -16, 3, 1); +__builtin_riscv_esp_vldext_u8_xp(data, data, 5, 4); +__builtin_riscv_esp_vldhbc_16_incp(data, 2, 3); +__builtin_riscv_esp_ld_qacc_h_h_128_ip(data, -240); +__builtin_riscv_esp_ld_qacc_h_l_128_ip(data, -32); +__builtin_riscv_esp_ld_qacc_l_h_128_ip(data, -64); +__builtin_riscv_esp_ld_qacc_l_l_128_ip(data, -80); +__builtin_riscv_esp_ld_ua_state_ip(data, 1504); +__builtin_riscv_esp_ldxq_32(data, 6, 1, 7, 1); +__builtin_riscv_esp_st_qacc_h_h_128_ip(data, -480); +__builtin_riscv_esp_st_qacc_h_l_128_ip(data, -1712); +__builtin_riscv_esp_st_qacc_l_h_128_ip(data, 960); +__builtin_riscv_esp_st_qacc_l_l_128_ip(data, 1920); +__builtin_riscv_esp_st_ua_state_ip(data, -1360); +__builtin_riscv_esp_stxq_32(data, 6, 2, 3, 0); +__builtin_riscv_esp_vld_128_ip(data, -1136, 0); +__builtin_riscv_esp_vld_128_xp(data, data, 5); +__builtin_riscv_esp_vld_h_64_ip(data, 1008, 4); +__builtin_riscv_esp_vld_h_64_xp(data, data, 2); +__builtin_riscv_esp_vld_l_64_ip(data, -304, 6); +__builtin_riscv_esp_vld_l_64_xp(data, data, 6); +__builtin_riscv_esp_vst_128_ip(0, data, -1216); +__builtin_riscv_esp_vst_128_xp(data, 6, data); +__builtin_riscv_esp_vst_h_64_ip(1, data, -456); +__builtin_riscv_esp_vst_h_64_xp(data, 2, data); +__builtin_riscv_esp_vst_l_64_ip(6, data, 664); +__builtin_riscv_esp_vst_l_64_xp(data, 4, data); +__builtin_riscv_esp_slci_2q(2, 0, 14); +__builtin_riscv_esp_slcxxp_2q(data, data, 0, 1); +__builtin_riscv_esp_src_q(7, 3, 2); +__builtin_riscv_esp_src_q_ld_ip(1, data, 4, 1168, 4); +__builtin_riscv_esp_src_q_ld_xp(data, 0, data, 1, 0); +__builtin_riscv_esp_src_q_qup(3, 3, 0); +__builtin_riscv_esp_srci_2q(7, 4, 1); +__builtin_riscv_esp_srcmb_s16_q_qacc(2, 1, 5); +__builtin_riscv_esp_srcmb_s16_qacc(data, 0, 7); +__builtin_riscv_esp_srcmb_s8_q_qacc(7, 0, 3); +__builtin_riscv_esp_srcmb_s8_qacc(data, 1, 3); +__builtin_riscv_esp_srcmb_u16_q_qacc(6, 1, 0); +__builtin_riscv_esp_srcmb_u16_qacc(data, 0, 0); +__builtin_riscv_esp_srcmb_u8_q_qacc(6, 0, 7); +__builtin_riscv_esp_srcmb_u8_qacc(data, 1, 2); +__builtin_riscv_esp_srcq_128_st_incp(0, 5, data); +__builtin_riscv_esp_srcxxp_2q(data, data, 7, 5); +__builtin_riscv_esp_srs_s_xacc(data, data); +__builtin_riscv_esp_srs_u_xacc(data, data); +__builtin_riscv_esp_vsl_32(0, 3); +__builtin_riscv_esp_vsld_16(6, 4, 4); +__builtin_riscv_esp_vsld_32(2, 7, 5); +__builtin_riscv_esp_vsld_8(1, 0, 0); +__builtin_riscv_esp_vsr_s32(6, 2); +__builtin_riscv_esp_vsr_u32(3, 2); +__builtin_riscv_esp_vsrd_16(6, 2, 1); +__builtin_riscv_esp_vsrd_32(7, 5, 4); +__builtin_riscv_esp_vsrd_8(2, 1, 4); +__builtin_riscv_esp_st_s_xacc_ip(data, 912); +__builtin_riscv_esp_st_u_xacc_ip(data, -112); +} diff --git a/clang/test/Driver/print-supported-extensions-riscv.c b/clang/test/Driver/print-supported-extensions-riscv.c index 91f12b8416b2a..84e040f08d6bc 100644 --- a/clang/test/Driver/print-supported-extensions-riscv.c +++ b/clang/test/Driver/print-supported-extensions-riscv.c @@ -145,6 +145,7 @@ // CHECK-NEXT: xcvmac 1.0 'XCVmac' (CORE-V Multiply-Accumulate) // CHECK-NEXT: xcvmem 1.0 'XCVmem' (CORE-V Post-incrementing Load & Store) // CHECK-NEXT: xcvsimd 1.0 'XCVsimd' (CORE-V SIMD ALU) +// CHECK-NEXT: xesppie 1.0 'Espressif ESP32P4' // CHECK-NEXT: xsfcease 1.0 'XSfcease' (SiFive sf.cease Instruction) // CHECK-NEXT: xsfvcp 1.0 'XSfvcp' (SiFive Custom Vector Coprocessor Interface Instructions) // CHECK-NEXT: xsfvfnrclipxfqf 1.0 'XSfvfnrclipxfqf' (SiFive FP32-to-int8 Ranged Clip Instructions) diff --git a/clang/test/Misc/target-invalid-cpu-note.c b/clang/test/Misc/target-invalid-cpu-note.c index 0b529f0861d46..9823839816023 100644 --- a/clang/test/Misc/target-invalid-cpu-note.c +++ b/clang/test/Misc/target-invalid-cpu-note.c @@ -81,7 +81,7 @@ // RUN: not %clang_cc1 -triple riscv32 -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix RISCV32 // RISCV32: error: unknown target CPU 'not-a-cpu' -// RISCV32-NEXT: note: valid target CPU values are: generic-rv32, rocket-rv32, sifive-e20, sifive-e21, sifive-e24, sifive-e31, sifive-e34, sifive-e76, syntacore-scr1-base, syntacore-scr1-max, syntacore-scr3-rv32{{$}} +// RISCV32-NEXT: note: valid target CPU values are: esp32p4, generic-rv32, rocket-rv32, sifive-e20, sifive-e21, sifive-e24, sifive-e31, sifive-e34, sifive-e76, syntacore-scr1-base, syntacore-scr1-max, syntacore-scr3-rv32{{$}} // RUN: not %clang_cc1 -triple riscv64 -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix RISCV64 // RISCV64: error: unknown target CPU 'not-a-cpu' @@ -89,7 +89,7 @@ // RUN: not %clang_cc1 -triple riscv32 -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE-RISCV32 // TUNE-RISCV32: error: unknown target CPU 'not-a-cpu' -// TUNE-RISCV32-NEXT: note: valid target CPU values are: generic-rv32, rocket-rv32, sifive-e20, sifive-e21, sifive-e24, sifive-e31, sifive-e34, sifive-e76, syntacore-scr1-base, syntacore-scr1-max, syntacore-scr3-rv32, generic, rocket, sifive-7-series{{$}} +// TUNE-RISCV32-NEXT: note: valid target CPU values are: esp32p4, generic-rv32, rocket-rv32, sifive-e20, sifive-e21, sifive-e24, sifive-e31, sifive-e34, sifive-e76, syntacore-scr1-base, syntacore-scr1-max, syntacore-scr3-rv32, generic, rocket, sifive-7-series{{$}} // RUN: not %clang_cc1 -triple riscv64 -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE-RISCV64 // TUNE-RISCV64: error: unknown target CPU 'not-a-cpu' diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 2da154c300344..d9e13ee4c63c4 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1904,3 +1904,7 @@ let TargetPrefix = "riscv" in { include "llvm/IR/IntrinsicsRISCVXTHead.td" include "llvm/IR/IntrinsicsRISCVXsf.td" include "llvm/IR/IntrinsicsRISCVXCV.td" + +// Generated code +// -------------- +include "llvm/IR/IntrinsicsRISCVESP32P4.td" \ No newline at end of file diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td b/llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td new file mode 100644 index 0000000000000..c1a11f90f12dd --- /dev/null +++ b/llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td @@ -0,0 +1,1065 @@ +let TargetPrefix = "riscv" in { +def int_riscv_esp_vcmulas_s16_qacc_h: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_h">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s16_qacc_h_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s16_qacc_h_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s16_qacc_l: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_l">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s16_qacc_l_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s16_qacc_l_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_h: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_h">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_h_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_h_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_l: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_l">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_l_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_l_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_s16_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_s16_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s16_qacc_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_s8_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_s8_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s8_qacc_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_u16_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_u16_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u16_qacc_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_u8_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u8_qacc_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s16: ClangBuiltin<"__builtin_riscv_esp_cmul_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s8: ClangBuiltin<"__builtin_riscv_esp_cmul_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u16: ClangBuiltin<"__builtin_riscv_esp_cmul_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u8: ClangBuiltin<"__builtin_riscv_esp_cmul_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_max_s16_a: ClangBuiltin<"__builtin_riscv_esp_max_s16_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_max_s32_a: ClangBuiltin<"__builtin_riscv_esp_max_s32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_max_s8_a: ClangBuiltin<"__builtin_riscv_esp_max_s8_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_max_u16_a: ClangBuiltin<"__builtin_riscv_esp_max_u16_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_max_u32_a: ClangBuiltin<"__builtin_riscv_esp_max_u32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_max_u8_a: ClangBuiltin<"__builtin_riscv_esp_max_u8_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_s16_a: ClangBuiltin<"__builtin_riscv_esp_min_s16_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_s32_a: ClangBuiltin<"__builtin_riscv_esp_min_s32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_s8_a: ClangBuiltin<"__builtin_riscv_esp_min_s8_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_u16_a: ClangBuiltin<"__builtin_riscv_esp_min_u16_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_u32_a: ClangBuiltin<"__builtin_riscv_esp_min_u32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_u8_a: ClangBuiltin<"__builtin_riscv_esp_min_u8_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vabs_16: ClangBuiltin<"__builtin_riscv_esp_vabs_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vabs_32: ClangBuiltin<"__builtin_riscv_esp_vabs_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vabs_8: ClangBuiltin<"__builtin_riscv_esp_vabs_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s16: ClangBuiltin<"__builtin_riscv_esp_vadd_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s32: ClangBuiltin<"__builtin_riscv_esp_vadd_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s8: ClangBuiltin<"__builtin_riscv_esp_vadd_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u16: ClangBuiltin<"__builtin_riscv_esp_vadd_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u32: ClangBuiltin<"__builtin_riscv_esp_vadd_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u8: ClangBuiltin<"__builtin_riscv_esp_vadd_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vclamp_s16: ClangBuiltin<"__builtin_riscv_esp_vclamp_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s16: ClangBuiltin<"__builtin_riscv_esp_vmax_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s32: ClangBuiltin<"__builtin_riscv_esp_vmax_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s8: ClangBuiltin<"__builtin_riscv_esp_vmax_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u16: ClangBuiltin<"__builtin_riscv_esp_vmax_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u32: ClangBuiltin<"__builtin_riscv_esp_vmax_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u8: ClangBuiltin<"__builtin_riscv_esp_vmax_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s16: ClangBuiltin<"__builtin_riscv_esp_vmin_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s32: ClangBuiltin<"__builtin_riscv_esp_vmin_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s8: ClangBuiltin<"__builtin_riscv_esp_vmin_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u16: ClangBuiltin<"__builtin_riscv_esp_vmin_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u32: ClangBuiltin<"__builtin_riscv_esp_vmin_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u8: ClangBuiltin<"__builtin_riscv_esp_vmin_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s16: ClangBuiltin<"__builtin_riscv_esp_vmul_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s16_s8xs8: ClangBuiltin<"__builtin_riscv_esp_vmul_s16_s8xs8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s32_s16xs16: ClangBuiltin<"__builtin_riscv_esp_vmul_s32_s16xs16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s8: ClangBuiltin<"__builtin_riscv_esp_vmul_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u16: ClangBuiltin<"__builtin_riscv_esp_vmul_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u8: ClangBuiltin<"__builtin_riscv_esp_vmul_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vprelu_s16: ClangBuiltin<"__builtin_riscv_esp_vprelu_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vprelu_s8: ClangBuiltin<"__builtin_riscv_esp_vprelu_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vrelu_s16: ClangBuiltin<"__builtin_riscv_esp_vrelu_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vrelu_s8: ClangBuiltin<"__builtin_riscv_esp_vrelu_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vsadds_s16: ClangBuiltin<"__builtin_riscv_esp_vsadds_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsadds_s8: ClangBuiltin<"__builtin_riscv_esp_vsadds_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsadds_u16: ClangBuiltin<"__builtin_riscv_esp_vsadds_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsadds_u8: ClangBuiltin<"__builtin_riscv_esp_vsadds_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_s16: ClangBuiltin<"__builtin_riscv_esp_vsat_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_s32: ClangBuiltin<"__builtin_riscv_esp_vsat_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_s8: ClangBuiltin<"__builtin_riscv_esp_vsat_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_u16: ClangBuiltin<"__builtin_riscv_esp_vsat_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_u32: ClangBuiltin<"__builtin_riscv_esp_vsat_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_u8: ClangBuiltin<"__builtin_riscv_esp_vsat_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vssubs_s16: ClangBuiltin<"__builtin_riscv_esp_vssubs_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vssubs_s8: ClangBuiltin<"__builtin_riscv_esp_vssubs_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vssubs_u16: ClangBuiltin<"__builtin_riscv_esp_vssubs_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vssubs_u8: ClangBuiltin<"__builtin_riscv_esp_vssubs_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s16: ClangBuiltin<"__builtin_riscv_esp_vsub_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s32: ClangBuiltin<"__builtin_riscv_esp_vsub_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s8: ClangBuiltin<"__builtin_riscv_esp_vsub_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u16: ClangBuiltin<"__builtin_riscv_esp_vsub_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u32: ClangBuiltin<"__builtin_riscv_esp_vsub_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u8: ClangBuiltin<"__builtin_riscv_esp_vsub_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_addx2: ClangBuiltin<"__builtin_riscv_esp_addx2">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_addx4: ClangBuiltin<"__builtin_riscv_esp_addx4">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_sat: ClangBuiltin<"__builtin_riscv_esp_sat">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_subx2: ClangBuiltin<"__builtin_riscv_esp_subx2">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_subx4: ClangBuiltin<"__builtin_riscv_esp_subx4">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_andq: ClangBuiltin<"__builtin_riscv_esp_andq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_notq: ClangBuiltin<"__builtin_riscv_esp_notq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_orq: ClangBuiltin<"__builtin_riscv_esp_orq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_xorq: ClangBuiltin<"__builtin_riscv_esp_xorq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_s16: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_s32: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_s8: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_u16: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_u32: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_u8: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_s16: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_s32: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_s8: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_u16: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_u32: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_u8: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_s16: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_s32: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_s8: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_u16: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_u32: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_u8: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_mov_s16_qacc: ClangBuiltin<"__builtin_riscv_esp_mov_s16_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_mov_s8_qacc: ClangBuiltin<"__builtin_riscv_esp_mov_s8_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_mov_u16_qacc: ClangBuiltin<"__builtin_riscv_esp_mov_u16_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_mov_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_mov_u8_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_movi_16_a: ClangBuiltin<"__builtin_riscv_esp_movi_16_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movi_16_q: ClangBuiltin<"__builtin_riscv_esp_movi_16_q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movi_32_a: ClangBuiltin<"__builtin_riscv_esp_movi_32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movi_32_q: ClangBuiltin<"__builtin_riscv_esp_movi_32_q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movi_8_a: ClangBuiltin<"__builtin_riscv_esp_movi_8_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movi_8_q: ClangBuiltin<"__builtin_riscv_esp_movi_8_q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movx_r_cfg: ClangBuiltin<"__builtin_riscv_esp_movx_r_cfg">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_fft_bit_width: ClangBuiltin<"__builtin_riscv_esp_movx_r_fft_bit_width">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_perf: ClangBuiltin<"__builtin_riscv_esp_movx_r_perf">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_sar: ClangBuiltin<"__builtin_riscv_esp_movx_r_sar">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_sar_bytes: ClangBuiltin<"__builtin_riscv_esp_movx_r_sar_bytes">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_xacc_h: ClangBuiltin<"__builtin_riscv_esp_movx_r_xacc_h">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_xacc_l: ClangBuiltin<"__builtin_riscv_esp_movx_r_xacc_l">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_cfg: ClangBuiltin<"__builtin_riscv_esp_movx_w_cfg">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_fft_bit_width: ClangBuiltin<"__builtin_riscv_esp_movx_w_fft_bit_width">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_perf: ClangBuiltin<"__builtin_riscv_esp_movx_w_perf">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_sar: ClangBuiltin<"__builtin_riscv_esp_movx_w_sar">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_sar_bytes: ClangBuiltin<"__builtin_riscv_esp_movx_w_sar_bytes">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_xacc_h: ClangBuiltin<"__builtin_riscv_esp_movx_w_xacc_h">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_xacc_l: ClangBuiltin<"__builtin_riscv_esp_movx_w_xacc_l">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_vext_s16: ClangBuiltin<"__builtin_riscv_esp_vext_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vext_s8: ClangBuiltin<"__builtin_riscv_esp_vext_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vext_u16: ClangBuiltin<"__builtin_riscv_esp_vext_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vext_u8: ClangBuiltin<"__builtin_riscv_esp_vext_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vunzip_16: ClangBuiltin<"__builtin_riscv_esp_vunzip_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vunzip_32: ClangBuiltin<"__builtin_riscv_esp_vunzip_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vunzip_8: ClangBuiltin<"__builtin_riscv_esp_vunzip_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vunzipt_16: ClangBuiltin<"__builtin_riscv_esp_vunzipt_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vunzipt_8: ClangBuiltin<"__builtin_riscv_esp_vunzipt_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vzip_16: ClangBuiltin<"__builtin_riscv_esp_vzip_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vzip_32: ClangBuiltin<"__builtin_riscv_esp_vzip_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vzip_8: ClangBuiltin<"__builtin_riscv_esp_vzip_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vzipt_16: ClangBuiltin<"__builtin_riscv_esp_vzipt_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vzipt_8: ClangBuiltin<"__builtin_riscv_esp_vzipt_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_zero_q: ClangBuiltin<"__builtin_riscv_esp_zero_q">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_zero_qacc: ClangBuiltin<"__builtin_riscv_esp_zero_qacc">, + Intrinsic<[], [], []>; + +def int_riscv_esp_zero_xacc: ClangBuiltin<"__builtin_riscv_esp_zero_xacc">, + Intrinsic<[], [], []>; + +def int_riscv_esp_fft_ams_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_ams_s16_ld_incp_uaup: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_ld_incp_uaup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_ams_s16_ld_r32_decp: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_ld_r32_decp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_ams_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_bitrev: ClangBuiltin<"__builtin_riscv_esp_fft_bitrev">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_fft_cmul_s16_ld_xp: ClangBuiltin<"__builtin_riscv_esp_fft_cmul_s16_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_cmul_s16_st_xp: ClangBuiltin<"__builtin_riscv_esp_fft_cmul_s16_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_r2bf_s16: ClangBuiltin<"__builtin_riscv_esp_fft_r2bf_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_r2bf_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_fft_r2bf_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_vst_r32_decp: ClangBuiltin<"__builtin_riscv_esp_fft_vst_r32_decp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_ld_128_usar_ip: ClangBuiltin<"__builtin_riscv_esp_ld_128_usar_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_ld_128_usar_xp: ClangBuiltin<"__builtin_riscv_esp_ld_128_usar_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ld_xacc_ip: ClangBuiltin<"__builtin_riscv_esp_ld_xacc_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldqa_s16_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_s16_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldqa_s16_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_s16_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_ldqa_s8_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_s8_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldqa_s8_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_s8_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_ldqa_u16_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_u16_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldqa_u16_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_u16_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_ldqa_u8_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_u8_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldqa_u8_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_u8_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_vldbc_16_ip: ClangBuiltin<"__builtin_riscv_esp_vldbc_16_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldbc_16_xp: ClangBuiltin<"__builtin_riscv_esp_vldbc_16_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vldbc_32_ip: ClangBuiltin<"__builtin_riscv_esp_vldbc_32_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldbc_32_xp: ClangBuiltin<"__builtin_riscv_esp_vldbc_32_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vldbc_8_ip: ClangBuiltin<"__builtin_riscv_esp_vldbc_8_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldbc_8_xp: ClangBuiltin<"__builtin_riscv_esp_vldbc_8_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vldext_s16_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_s16_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_s16_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_s16_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_s8_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_s8_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_s8_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_s8_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_u16_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_u16_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_u16_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_u16_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_u8_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_u8_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_u8_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_u8_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldhbc_16_incp: ClangBuiltin<"__builtin_riscv_esp_vldhbc_16_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_ld_qacc_h_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_h_h_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ld_qacc_h_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_h_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ld_qacc_l_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_l_h_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ld_qacc_l_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_l_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ld_ua_state_ip: ClangBuiltin<"__builtin_riscv_esp_ld_ua_state_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldxq_32: ClangBuiltin<"__builtin_riscv_esp_ldxq_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_st_qacc_h_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_h_h_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_st_qacc_h_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_h_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_st_qacc_l_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_l_h_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_st_qacc_l_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_l_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_st_ua_state_ip: ClangBuiltin<"__builtin_riscv_esp_st_ua_state_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_stxq_32: ClangBuiltin<"__builtin_riscv_esp_stxq_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vld_128_ip: ClangBuiltin<"__builtin_riscv_esp_vld_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vld_128_xp: ClangBuiltin<"__builtin_riscv_esp_vld_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vld_h_64_ip: ClangBuiltin<"__builtin_riscv_esp_vld_h_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vld_h_64_xp: ClangBuiltin<"__builtin_riscv_esp_vld_h_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vld_l_64_ip: ClangBuiltin<"__builtin_riscv_esp_vld_l_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vld_l_64_xp: ClangBuiltin<"__builtin_riscv_esp_vld_l_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vst_128_ip: ClangBuiltin<"__builtin_riscv_esp_vst_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vst_128_xp: ClangBuiltin<"__builtin_riscv_esp_vst_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vst_h_64_ip: ClangBuiltin<"__builtin_riscv_esp_vst_h_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vst_h_64_xp: ClangBuiltin<"__builtin_riscv_esp_vst_h_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vst_l_64_ip: ClangBuiltin<"__builtin_riscv_esp_vst_l_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vst_l_64_xp: ClangBuiltin<"__builtin_riscv_esp_vst_l_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_slci_2q: ClangBuiltin<"__builtin_riscv_esp_slci_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_slcxxp_2q: ClangBuiltin<"__builtin_riscv_esp_slcxxp_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_src_q: ClangBuiltin<"__builtin_riscv_esp_src_q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_src_q_ld_ip: ClangBuiltin<"__builtin_riscv_esp_src_q_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_src_q_ld_xp: ClangBuiltin<"__builtin_riscv_esp_src_q_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_src_q_qup: ClangBuiltin<"__builtin_riscv_esp_src_q_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srci_2q: ClangBuiltin<"__builtin_riscv_esp_srci_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_s16_q_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_s16_q_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_s16_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_s16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_s8_q_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_s8_q_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_s8_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_s8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_u16_q_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_u16_q_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_u16_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_u16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_u8_q_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_u8_q_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_u8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcq_128_st_incp: ClangBuiltin<"__builtin_riscv_esp_srcq_128_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcxxp_2q: ClangBuiltin<"__builtin_riscv_esp_srcxxp_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srs_s_xacc: ClangBuiltin<"__builtin_riscv_esp_srs_s_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_srs_u_xacc: ClangBuiltin<"__builtin_riscv_esp_srs_u_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_vsl_32: ClangBuiltin<"__builtin_riscv_esp_vsl_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsld_16: ClangBuiltin<"__builtin_riscv_esp_vsld_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsld_32: ClangBuiltin<"__builtin_riscv_esp_vsld_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsld_8: ClangBuiltin<"__builtin_riscv_esp_vsld_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsr_s32: ClangBuiltin<"__builtin_riscv_esp_vsr_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsr_u32: ClangBuiltin<"__builtin_riscv_esp_vsr_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsrd_16: ClangBuiltin<"__builtin_riscv_esp_vsrd_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsrd_32: ClangBuiltin<"__builtin_riscv_esp_vsrd_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsrd_8: ClangBuiltin<"__builtin_riscv_esp_vsrd_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_st_s_xacc_ip: ClangBuiltin<"__builtin_riscv_esp_st_s_xacc_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_st_u_xacc_ip: ClangBuiltin<"__builtin_riscv_esp_st_u_xacc_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + + +} diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index a288e7d884d31..0bbd4e49045f5 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -450,6 +450,14 @@ struct RISCVOperand final : public MCParsedAsmOperand { } } + static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue) { + if (auto *CE = dyn_cast(Expr)) { + int64_t Value = CE->getValue(); + return Value >= MinValue && Value <= MaxValue; + } + return false; + } + bool isToken() const override { return Kind == KindTy::Token; } bool isReg() const override { return Kind == KindTy::Register; } bool isV0Reg() const { @@ -475,6 +483,43 @@ struct RISCVOperand final : public MCParsedAsmOperand { bool isRlist() const { return Kind == KindTy::Rlist; } bool isSpimm() const { return Kind == KindTy::Spimm; } + bool isImm(int64_t MinValue, int64_t MaxValue) const { + return Kind == KindTy::Immediate && inRange(getImm(), MinValue, MaxValue); + } + + bool isImm8() const { + // The addi instruction maybe expaned to addmi and addi. + return isImm((-32768 - 128), (32512 + 127)); + } + + bool isSelect_2() const { return isImm(0, 1); } + + bool isSelect_4() const { return isImm(0, 3); } + + bool isSelect_8() const { return isImm(0, 7); } + + bool isSelect_16() const { return isImm(0, 16); } + + bool isOffset_16_16() const { + return isImm(-128, 112) && + ((cast(getImm())->getValue() & 0xf) == 0); + } + + bool isOffset_256_8() const { + return isImm(-1024, 1016) && + ((cast(getImm())->getValue() & 0x7) == 0); + } + + bool isOffset_256_16() const { + return isImm(-2048, 2032) && + ((cast(getImm())->getValue() & 0xf) == 0); + } + + bool isOffset_256_4() const { + return isImm(-512, 508) && + ((cast(getImm())->getValue() & 0x3) == 0); + } + bool isGPR() const { return Kind == KindTy::Register && RISCVMCRegisterClasses[RISCV::GPRRegClassID].contains(Reg.RegNum); @@ -871,6 +916,54 @@ struct RISCVOperand final : public MCParsedAsmOperand { VK == RISCVMCExpr::VK_RISCV_None; } + bool isUImm9() const { + if (!isImm()) + return false; + int64_t Imm; + RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None; + bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK); + return IsConstantImm && isUInt<9>(Imm) && + VK == RISCVMCExpr::VK_RISCV_None; + } + + bool isUImm10() const { + RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None; + int64_t Imm; + bool IsValid; + if (!isImm()) + return false; + bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK); + if (!IsConstantImm) + IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK); + else + IsValid = isUInt<10>(fixImmediateForRV32(Imm, isRV64Imm())); + return IsValid && VK == RISCVMCExpr::VK_RISCV_None; + } + + bool isUImm12() const { + if (!isImm()) + return false; + int64_t Imm; + RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None; + bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK); + return IsConstantImm && isUInt<12>(Imm) && + VK == RISCVMCExpr::VK_RISCV_None; + } + + bool isUImm13() const { + RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None; + int64_t Imm; + bool IsValid; + if (!isImm()) + return false; + bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK); + if (!IsConstantImm) + IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK); + else + IsValid = isUInt<13>(fixImmediateForRV32(Imm, isRV64Imm())); + return IsValid && VK == RISCVMCExpr::VK_RISCV_None; + } + bool isUImm10Lsb00NonZero() const { if (!isImm()) return false; @@ -1571,6 +1664,10 @@ bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return generateImmOutOfRangeError( Operands, ErrorInfo, -(1 << 11), (1 << 11) - 32, "immediate must be a multiple of 32 bytes in the range"); + case Match_InvalidUImm12: + return generateImmOutOfRangeError( + Operands, ErrorInfo, 0, (1 << 12) - 1, + "immediate must be in the range"); case Match_InvalidSImm13Lsb0: return generateImmOutOfRangeError( Operands, ErrorInfo, -(1 << 12), (1 << 12) - 2, diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt index f28a7092e3cec..47e4fbb41c822 100644 --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -42,6 +42,7 @@ add_llvm_target(RISCVCodeGen RISCVInstrInfo.cpp RISCVISelDAGToDAG.cpp RISCVISelLowering.cpp + RISCVESP32P4ISelLowering.cpp RISCVMachineFunctionInfo.cpp RISCVMergeBaseOffset.cpp RISCVOptWInstrs.cpp diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp index 23897e2d98f63..409de51d05898 100644 --- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp +++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp @@ -81,6 +81,20 @@ static DecodeStatus DecodeGPRRegisterClass(MCInst &Inst, uint32_t RegNo, return MCDisassembler::Success; } +static DecodeStatus DecodeGPRPIERegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const MCDisassembler *Decoder) { + auto bit4 = RegNo & 0x8; + RegNo |= (bit4 << 4); + RegNo |= (1 << 3); + if ((RegNo >= 8 && RegNo <= 15) || (RegNo >= 24 && RegNo <= 31)) { + MCRegister Reg = RISCV::X0 + RegNo; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; + } + return MCDisassembler::Fail; +} + static DecodeStatus DecodeGPRX1X5RegisterClass(MCInst &Inst, uint32_t RegNo, uint64_t Address, const MCDisassembler *Decoder) { @@ -262,6 +276,21 @@ static DecodeStatus DecodeVRM8RegisterClass(MCInst &Inst, uint32_t RegNo, return MCDisassembler::Success; } +static const unsigned QRDecoderTable[] = {RISCV::Q0, RISCV::Q1, RISCV::Q2, + RISCV::Q3, RISCV::Q4, RISCV::Q5, + RISCV::Q6, RISCV::Q7}; + +static DecodeStatus DecodeQRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= std::size(QRDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = QRDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + static DecodeStatus decodeVMaskReg(MCInst &Inst, uint32_t RegNo, uint64_t Address, const MCDisassembler *Decoder) { @@ -381,6 +410,43 @@ static DecodeStatus decodeCSSPushPopchk(MCInst &Inst, uint32_t Insn, uint64_t Address, const MCDisassembler *Decoder); +static DecodeStatus decodeSelect_2Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder); + +static DecodeStatus decodeSelect_4Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder); + +static DecodeStatus decodeSelect_8Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder); + +static DecodeStatus decodeSelect_16Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeOffset_16_16Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeOffset_256_8Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeOffset_256_16Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeOffset_256_4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeUImm13_Step4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeUImm10_Step4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + #include "RISCVGenDisassemblerTables.inc" static DecodeStatus decodeRVCInstrRdRs1ImmZero(MCInst &Inst, uint32_t Insn, @@ -500,6 +566,90 @@ static DecodeStatus decodeZcmpSpimm(MCInst &Inst, uint32_t Imm, return MCDisassembler::Success; } +static DecodeStatus decodeSelect_2Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeSelect_4Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeSelect_8Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeSelect_16Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_16_16Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<8>(Imm) && "Invalid immediate"); + auto ImmSigned = SignExtend64<4>(Imm); + Inst.addOperand(MCOperand::createImm(ImmSigned * 16)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_256_8Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<16>(Imm) && "Invalid immediate"); + auto ImmSigned = SignExtend64<4>(Imm); + Inst.addOperand(MCOperand::createImm(ImmSigned * 8)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_256_16Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<16>(Imm) && "Invalid immediate"); + auto ImmSigned = SignExtend64<4>(Imm); + Inst.addOperand(MCOperand::createImm(ImmSigned * 16)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_256_4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<16>(Imm) && "Invalid immediate"); + auto ImmSigned = SignExtend64<4>(Imm); + Inst.addOperand(MCOperand::createImm(ImmSigned * 4)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeUImm13_Step4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<13>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm((Imm * 2) * 2)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeUImm10_Step4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<10>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm * 2)); + return MCDisassembler::Success; +} + // Add implied SP operand for C.*SP compressed instructions. The SP operand // isn't explicitly encoded in the instruction. void RISCVDisassembler::addSPOperands(MCInst &MI) const { @@ -597,6 +747,8 @@ DecodeStatus RISCVDisassembler::getInstruction32(MCInst &MI, uint64_t &Size, TRY_TO_DECODE_FEATURE( RISCV::FeatureVendorXSfvfnrclipxfqf, DecoderTableXSfvfnrclipxfqf32, "SiFive FP32-to-int8 Ranged Clip Instructions opcode table"); + TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorESP32P4, DecoderTableESP32P432, + "ESP32P4 Instruction opcode table"); TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXSiFivecdiscarddlone, DecoderTableXSiFivecdiscarddlone32, "SiFive sf.cdiscard.d.l1 custom opcode table"); diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index 626206962e752..61e0c10301572 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -52,7 +52,8 @@ enum { InstFormatCSB = 20, InstFormatCSH = 21, InstFormatOther = 22, - + InstFormatESP32P4 = 23, + InstFormatMask = 31, InstFormatShift = 0, @@ -280,7 +281,9 @@ enum OperandType : unsigned { OPERAND_UIMM8_GE32, OPERAND_UIMM9_LSB000, OPERAND_UIMM10_LSB00_NONZERO, + OPERAND_UIMM10_STEP4, OPERAND_UIMM12, + OPERAND_UIMM13_STEP4, OPERAND_UIMM16, OPERAND_UIMM32, OPERAND_ZERO, diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp index d18ded271a085..8724e471d2b34 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp @@ -313,3 +313,119 @@ const char *RISCVInstPrinter::getRegisterName(MCRegister Reg) { return getRegisterName(Reg, ArchRegNames ? RISCV::NoRegAltName : RISCV::ABIRegAltName); } + +void RISCVInstPrinter::printImm8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert(isInt<8>(Value) && + "Invalid argument, value must be in ranges [-128,127]"); + O << Value; + } else { + printOperand(MI, OpNum, STI, O); + } +} + +void RISCVInstPrinter::printSelect_2_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 1) && + "Invalid argument, value must be in range [0,1]"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} + +void RISCVInstPrinter::printSelect_4_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 3) && + "Invalid argument, value must be in range [0,3]"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} + +void RISCVInstPrinter::printSelect_8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 7) && + "Invalid argument, value must be in range [0,7]"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} + +void RISCVInstPrinter::printSelect_16_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 15) && + "Invalid argument, value must be in range [0,15]"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} + +void RISCVInstPrinter::printOffset_16_16_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -128 && Value <= 112 && (Value & 0xf) == 0) && + "Invalid argument, value must be in range [-128,112], first 4 bits " + "should be zero"); + O << Value; + } else { + printOperand(MI, OpNum, STI, O); + } +} + +void RISCVInstPrinter::printOffset_256_8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -1024 && Value <= 1016 && (Value & 0x7) == 0) && + "Invalid argument, value must be in range [-1024,1016], first 3 " + "bits should be zero"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} + +void RISCVInstPrinter::printOffset_256_16_AsmOperand(const MCInst *MI, + int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -2048 && Value <= 2032 && (Value & 0xf) == 0) && + "Invalid argument, value must be in range [-2048,2032], first 4 " + "bits should be zero"); + O << Value; + } else { + printOperand(MI, OpNum, STI, O); + } +} + +void RISCVInstPrinter::printOffset_256_4_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -512 && Value <= 508 && (Value & 0x3) == 0) && + "Invalid argument, value must be in range [-512,508], first 2 bits " + "should be zero"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h index 77cc7a67e8892..9191646a2692d 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h @@ -72,6 +72,26 @@ class RISCVInstPrinter : public MCInstPrinter { const MCSubtargetInfo &STI, raw_ostream &O); static const char *getRegisterName(MCRegister Reg); static const char *getRegisterName(MCRegister Reg, unsigned AltIdx); + + void printImm8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSelect_2_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSelect_4_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSelect_8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSelect_16_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOffset_16_16_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOffset_256_8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOffset_256_16_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O); + void printOffset_256_4_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); }; } // namespace llvm diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp index 0863345b0c6dc..05be3035bb3d0 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp @@ -100,6 +100,54 @@ class RISCVMCCodeEmitter : public MCCodeEmitter { unsigned getRegReg(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + + uint32_t getImm8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_2OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_256OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int8_t getOffset_16_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int16_t getOffset_256_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int16_t getOffset_256_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int16_t getOffset_256_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint16_t getUImm10_Step4Operand(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint16_t getUImm13_Step4Operand(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; }; } // end anonymous namespace @@ -497,7 +545,8 @@ unsigned RISCVMCCodeEmitter::getImmOpValue(const MCInst &MI, unsigned OpNo, // FIXME: Sub kind binary exprs have chance of underflow. if (MIFrm == RISCVII::InstFormatJ) { FixupKind = RISCV::fixup_riscv_jal; - } else if (MIFrm == RISCVII::InstFormatB) { + } else if (MIFrm == RISCVII::InstFormatB|| + MIFrm == RISCVII::InstFormatESP32P4) { FixupKind = RISCV::fixup_riscv_branch; } else if (MIFrm == RISCVII::InstFormatCJ) { FixupKind = RISCV::fixup_riscv_rvc_jump; @@ -567,4 +616,141 @@ unsigned RISCVMCCodeEmitter::getRegReg(const MCInst &MI, unsigned OpNo, return Op | Op1 << 5; } +uint32_t RISCVMCCodeEmitter::getImm8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int32_t Res = MO.getImm(); + + assert(((Res >= -128) && (Res <= 127)) && "Unexpected operand value!"); + + return (Res & 0xff); +} + +uint8_t +RISCVMCCodeEmitter::getSelect_2OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 1)) && "Unexpected operand value!"); + + return Res; +} + +uint8_t +RISCVMCCodeEmitter::getSelect_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 3)) && "Unexpected operand value!"); + + return Res; +} + +uint8_t +RISCVMCCodeEmitter::getSelect_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 7)) && "Unexpected operand value!"); + + return Res; +} + +uint8_t +RISCVMCCodeEmitter::getSelect_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 15)) && "Unexpected operand value!"); + + return Res; +} + +int8_t +RISCVMCCodeEmitter::getOffset_16_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int8_t Res = static_cast(MO.getImm()); + + assert(((Res >= -128) && (Res <= 112) && ((Res & 0xf) == 0)) && + "Unexpected operand value!"); + + return Res / 16; +} + +int16_t +RISCVMCCodeEmitter::getOffset_256_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int16_t Res = static_cast(MO.getImm()); + + assert(((Res >= -1024) && (Res <= 1016) && ((Res & 0x7) == 0)) && + "Unexpected operand value!"); + + return Res / 8; +} + +int16_t +RISCVMCCodeEmitter::getOffset_256_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int16_t Res = static_cast(MO.getImm()); + + assert(((Res >= -2048) && (Res <= 2032) && ((Res & 0xf) == 0)) && + "Unexpected operand value!"); + + return Res / 16; +} + +int16_t +RISCVMCCodeEmitter::getOffset_256_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int16_t Res = static_cast(MO.getImm()); + + assert(((Res >= -512) && (Res <= 508) && ((Res & 0x3) == 0)) && + "Unexpected operand value!"); + + return Res / 4; +} + +uint16_t +RISCVMCCodeEmitter::getUImm10_Step4Operand(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + if (MO.isImm()) { + int16_t Res = static_cast(MO.getImm()); + assert((isUInt<10>(Res) && ((Res & 0x1) == 0)) && "Unexpected operand value!"); + return Res / 2; + } + return getImmOpValue(MI, OpNo, Fixups, STI); +} + +uint16_t +RISCVMCCodeEmitter::getUImm13_Step4Operand(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + if (MO.isImm()) { + int16_t Res = static_cast(MO.getImm()); + assert((isUInt<13>(Res) && ((Res & 0x1) == 0)) && "Unexpected operand value!"); + return Res / 2; + } + return getImmOpValue(MI, OpNo, Fixups, STI); +} + #include "RISCVGenMCCodeEmitter.inc" diff --git a/llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp new file mode 100644 index 0000000000000..0ec3aaf3ad1dc --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp @@ -0,0 +1,8468 @@ +//==- RISCVESP32P4ISelLowering.cpp - ESP32 P4 DAG Lowering Implementation -===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that Xtensa uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#include "RISCVISelLowering.h" +#include "RISCVSubtarget.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" + +using namespace llvm; + +MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( + MachineInstr &MI, MachineBasicBlock *MBB, const TargetInstrInfo &TII, + MachineFunction *MF, MachineRegisterInfo &MRI, DebugLoc DL) const { + switch (MI.getOpcode()) { + default: + llvm_unreachable("Unexpected instr type to insert"); + case RISCV::ESP_VCMULAS_S16_QACC_H_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_H; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S16_QACC_H_LD_IP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_H_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_ip " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_ip " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_ip " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S16_QACC_H_LD_XP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_H_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_xp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_xp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_xp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S16_QACC_L_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_L; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S16_QACC_L_LD_IP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_L_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_ip " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_ip " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_ip " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S16_QACC_L_LD_XP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_L_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_xp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_xp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_xp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_H_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_H; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_H_LD_IP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_H_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_H_LD_XP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_H_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_L_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_L; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_L_LD_IP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_L_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_L_LD_XP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_L_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_XACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_XACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_XACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_XACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_XACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_XACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_XACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_XACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_XACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_XACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_XACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_XACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_XACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_XACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_XACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_XACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_XACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_XACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_XACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_XACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_LDBC_INCP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_LDBC_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_LDBC_INCP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_LDBC_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_LDBC_INCP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_LDBC_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_LDBC_INCP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_LDBC_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_S16_QACC_P: { + unsigned Opc = RISCV::ESP_VSMULAS_S16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_S16_QACC_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSMULAS_S16_QACC_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_S8_QACC_P: { + unsigned Opc = RISCV::ESP_VSMULAS_S8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_S8_QACC_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSMULAS_S8_QACC_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_U16_QACC_P: { + unsigned Opc = RISCV::ESP_VSMULAS_U16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_U16_QACC_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSMULAS_U16_QACC_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_U8_QACC_P: { + unsigned Opc = RISCV::ESP_VSMULAS_U8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_U8_QACC_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSMULAS_U8_QACC_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S16_P: { + unsigned Opc = RISCV::ESP_CMUL_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S8_P: { + unsigned Opc = RISCV::ESP_CMUL_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U16_P: { + unsigned Opc = RISCV::ESP_CMUL_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U8_P: { + unsigned Opc = RISCV::ESP_CMUL_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_S16_A_P: { + unsigned Opc = RISCV::ESP_MAX_S16_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_s16_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_S32_A_P: { + unsigned Opc = RISCV::ESP_MAX_S32_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_s32_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_S8_A_P: { + unsigned Opc = RISCV::ESP_MAX_S8_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_s8_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_U16_A_P: { + unsigned Opc = RISCV::ESP_MAX_U16_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_u16_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_U32_A_P: { + unsigned Opc = RISCV::ESP_MAX_U32_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_u32_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_U8_A_P: { + unsigned Opc = RISCV::ESP_MAX_U8_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_u8_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_S16_A_P: { + unsigned Opc = RISCV::ESP_MIN_S16_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_s16_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_S32_A_P: { + unsigned Opc = RISCV::ESP_MIN_S32_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_s32_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_S8_A_P: { + unsigned Opc = RISCV::ESP_MIN_S8_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_s8_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_U16_A_P: { + unsigned Opc = RISCV::ESP_MIN_U16_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_u16_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_U32_A_P: { + unsigned Opc = RISCV::ESP_MIN_U32_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_u32_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_U8_A_P: { + unsigned Opc = RISCV::ESP_MIN_U8_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_u8_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VABS_16_P: { + unsigned Opc = RISCV::ESP_VABS_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vabs_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(1); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vabs_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VABS_32_P: { + unsigned Opc = RISCV::ESP_VABS_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vabs_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(1); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vabs_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VABS_8_P: { + unsigned Opc = RISCV::ESP_VABS_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vabs_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(1); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vabs_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S16_P: { + unsigned Opc = RISCV::ESP_VADD_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S32_P: { + unsigned Opc = RISCV::ESP_VADD_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S8_P: { + unsigned Opc = RISCV::ESP_VADD_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U16_P: { + unsigned Opc = RISCV::ESP_VADD_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U32_P: { + unsigned Opc = RISCV::ESP_VADD_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U8_P: { + unsigned Opc = RISCV::ESP_VADD_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCLAMP_S16_P: { + unsigned Opc = RISCV::ESP_VCLAMP_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vclamp_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(1); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vclamp_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S16_P: { + unsigned Opc = RISCV::ESP_VMAX_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S32_P: { + unsigned Opc = RISCV::ESP_VMAX_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S8_P: { + unsigned Opc = RISCV::ESP_VMAX_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U16_P: { + unsigned Opc = RISCV::ESP_VMAX_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U32_P: { + unsigned Opc = RISCV::ESP_VMAX_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U8_P: { + unsigned Opc = RISCV::ESP_VMAX_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S16_P: { + unsigned Opc = RISCV::ESP_VMIN_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S32_P: { + unsigned Opc = RISCV::ESP_VMIN_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S8_P: { + unsigned Opc = RISCV::ESP_VMIN_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U16_P: { + unsigned Opc = RISCV::ESP_VMIN_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U32_P: { + unsigned Opc = RISCV::ESP_VMIN_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U8_P: { + unsigned Opc = RISCV::ESP_VMIN_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S16_P: { + unsigned Opc = RISCV::ESP_VMUL_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S16_S8XS8_P: { + unsigned Opc = RISCV::ESP_VMUL_S16_S8XS8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s16_s8xs8 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s16_s8xs8 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s16_s8xs8 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vmul_s16_s8xs8 first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S32_S16XS16_P: { + unsigned Opc = RISCV::ESP_VMUL_S32_S16XS16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s32_s16xs16 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s32_s16xs16 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s32_s16xs16 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vmul_s32_s16xs16 first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S8_P: { + unsigned Opc = RISCV::ESP_VMUL_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U16_P: { + unsigned Opc = RISCV::ESP_VMUL_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U8_P: { + unsigned Opc = RISCV::ESP_VMUL_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VPRELU_S16_P: { + unsigned Opc = RISCV::ESP_VPRELU_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vprelu_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vprelu_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vprelu_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VPRELU_S8_P: { + unsigned Opc = RISCV::ESP_VPRELU_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vprelu_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vprelu_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vprelu_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VRELU_S16_P: { + unsigned Opc = RISCV::ESP_VRELU_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vrelu_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VRELU_S8_P: { + unsigned Opc = RISCV::ESP_VRELU_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vrelu_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSADDS_S16_P: { + unsigned Opc = RISCV::ESP_VSADDS_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsadds_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsadds_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSADDS_S8_P: { + unsigned Opc = RISCV::ESP_VSADDS_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsadds_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsadds_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSADDS_U16_P: { + unsigned Opc = RISCV::ESP_VSADDS_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsadds_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsadds_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSADDS_U8_P: { + unsigned Opc = RISCV::ESP_VSADDS_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsadds_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsadds_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_S16_P: { + unsigned Opc = RISCV::ESP_VSAT_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_S32_P: { + unsigned Opc = RISCV::ESP_VSAT_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_S8_P: { + unsigned Opc = RISCV::ESP_VSAT_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_U16_P: { + unsigned Opc = RISCV::ESP_VSAT_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_U32_P: { + unsigned Opc = RISCV::ESP_VSAT_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_U8_P: { + unsigned Opc = RISCV::ESP_VSAT_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSSUBS_S16_P: { + unsigned Opc = RISCV::ESP_VSSUBS_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vssubs_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vssubs_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSSUBS_S8_P: { + unsigned Opc = RISCV::ESP_VSSUBS_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vssubs_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vssubs_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSSUBS_U16_P: { + unsigned Opc = RISCV::ESP_VSSUBS_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vssubs_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vssubs_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSSUBS_U8_P: { + unsigned Opc = RISCV::ESP_VSSUBS_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vssubs_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vssubs_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S16_P: { + unsigned Opc = RISCV::ESP_VSUB_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S32_P: { + unsigned Opc = RISCV::ESP_VSUB_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S8_P: { + unsigned Opc = RISCV::ESP_VSUB_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U16_P: { + unsigned Opc = RISCV::ESP_VSUB_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U32_P: { + unsigned Opc = RISCV::ESP_VSUB_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U8_P: { + unsigned Opc = RISCV::ESP_VSUB_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ADDX2_P: { + unsigned Opc = RISCV::ESP_ADDX2; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ADDX4_P: { + unsigned Opc = RISCV::ESP_ADDX4; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SAT_P: { + unsigned Opc = RISCV::ESP_SAT; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS0 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &RSD = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS0.getReg()) + .addReg(RS1.getReg()) + .addReg(RSD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SUBX2_P: { + unsigned Opc = RISCV::ESP_SUBX2; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SUBX4_P: { + unsigned Opc = RISCV::ESP_SUBX4; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ANDQ_P: { + unsigned Opc = RISCV::ESP_ANDQ; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_andq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_andq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_andq first argument, it must " + "bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_NOTQ_P: { + unsigned Opc = RISCV::ESP_NOTQ; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_notq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(1); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_notq first argument, it must " + "bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ORQ_P: { + unsigned Opc = RISCV::ESP_ORQ; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_orq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_orq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_orq first argument, it must " + "bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_XORQ_P: { + unsigned Opc = RISCV::ESP_XORQ; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_xorq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_xorq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_xorq first argument, it must " + "bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_S16_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_s16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_S32_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_s32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_S8_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_U16_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_u16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_U32_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_u32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_U8_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_S16_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_s16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_S32_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_s32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_S8_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_U16_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_u16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_U32_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_u32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_U8_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_S16_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_s16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_S32_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_s32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_S8_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_U16_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_u16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_U32_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_u32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_U8_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOV_S16_QACC_P: { + unsigned Opc = RISCV::ESP_MOV_S16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_mov_s16_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RISCV::Q0 + QUVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOV_S8_QACC_P: { + unsigned Opc = RISCV::ESP_MOV_S8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_mov_s8_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RISCV::Q0 + QUVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOV_U16_QACC_P: { + unsigned Opc = RISCV::ESP_MOV_U16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_mov_u16_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RISCV::Q0 + QUVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOV_U8_QACC_P: { + unsigned Opc = RISCV::ESP_MOV_U8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_mov_u8_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RISCV::Q0 + QUVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_16_A_P: { + unsigned Opc = RISCV::ESP_MOVI_16_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_16_a first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_16_Q_P: { + unsigned Opc = RISCV::ESP_MOVI_16_Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_16 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_16_q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_32_A_P: { + unsigned Opc = RISCV::ESP_MOVI_32_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_32_a first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_32_Q_P: { + unsigned Opc = RISCV::ESP_MOVI_32_Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_4 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_32_q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_8_A_P: { + unsigned Opc = RISCV::ESP_MOVI_8_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_8_a first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_8_Q_P: { + unsigned Opc = RISCV::ESP_MOVI_8_Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_16 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_8_q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_CFG_P: { + unsigned Opc = RISCV::ESP_MOVX_R_CFG; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_FFT_BIT_WIDTH_P: { + unsigned Opc = RISCV::ESP_MOVX_R_FFT_BIT_WIDTH; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_PERF_P: { + unsigned Opc = RISCV::ESP_MOVX_R_PERF; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + MachineOperand &RS1 = MI.getOperand(1); + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_SAR_P: { + unsigned Opc = RISCV::ESP_MOVX_R_SAR; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_SAR_BYTES_P: { + unsigned Opc = RISCV::ESP_MOVX_R_SAR_BYTES; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_XACC_H_P: { + unsigned Opc = RISCV::ESP_MOVX_R_XACC_H; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_XACC_L_P: { + unsigned Opc = RISCV::ESP_MOVX_R_XACC_L; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_CFG_P: { + unsigned Opc = RISCV::ESP_MOVX_W_CFG; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_FFT_BIT_WIDTH_P: { + unsigned Opc = RISCV::ESP_MOVX_W_FFT_BIT_WIDTH; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_PERF_P: { + unsigned Opc = RISCV::ESP_MOVX_W_PERF; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_SAR_P: { + unsigned Opc = RISCV::ESP_MOVX_W_SAR; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_SAR_BYTES_P: { + unsigned Opc = RISCV::ESP_MOVX_W_SAR_BYTES; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_XACC_H_P: { + unsigned Opc = RISCV::ESP_MOVX_W_XACC_H; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_XACC_L_P: { + unsigned Opc = RISCV::ESP_MOVX_W_XACC_L; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VEXT_S16_P: { + unsigned Opc = RISCV::ESP_VEXT_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vext_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(1); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vext_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vext_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VEXT_S8_P: { + unsigned Opc = RISCV::ESP_VEXT_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vext_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(1); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vext_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vext_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VEXT_U16_P: { + unsigned Opc = RISCV::ESP_VEXT_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vext_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(1); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vext_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vext_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VEXT_U8_P: { + unsigned Opc = RISCV::ESP_VEXT_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vext_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(1); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vext_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vext_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VUNZIP_16_P: { + unsigned Opc = RISCV::ESP_VUNZIP_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vunzip_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vunzip_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VUNZIP_32_P: { + unsigned Opc = RISCV::ESP_VUNZIP_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vunzip_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vunzip_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VUNZIP_8_P: { + unsigned Opc = RISCV::ESP_VUNZIP_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vunzip_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vunzip_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VUNZIPT_16_P: { + unsigned Opc = RISCV::ESP_VUNZIPT_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vunzipt_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vunzipt_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vunzipt_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VUNZIPT_8_P: { + unsigned Opc = RISCV::ESP_VUNZIPT_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vunzipt_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vunzipt_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vunzipt_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VZIP_16_P: { + unsigned Opc = RISCV::ESP_VZIP_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vzip_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vzip_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VZIP_32_P: { + unsigned Opc = RISCV::ESP_VZIP_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vzip_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vzip_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VZIP_8_P: { + unsigned Opc = RISCV::ESP_VZIP_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vzip_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vzip_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VZIPT_16_P: { + unsigned Opc = RISCV::ESP_VZIPT_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vzipt_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vzipt_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vzipt_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VZIPT_8_P: { + unsigned Opc = RISCV::ESP_VZIPT_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vzipt_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vzipt_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vzipt_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ZERO_Q_P: { + unsigned Opc = RISCV::ESP_ZERO_Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QZ = MI.getOperand(0); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_zero_q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ZERO_QACC_P: { + unsigned Opc = RISCV::ESP_ZERO_QACC; + MachineBasicBlock *MBB = MI.getParent(); + BuildMI(*MBB, MI, DL, TII.get(Opc)); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ZERO_XACC_P: { + unsigned Opc = RISCV::ESP_ZERO_XACC; + MachineBasicBlock *MBB = MI.getParent(); + BuildMI(*MBB, MI, DL, TII.get(Opc)); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_AMS_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_FFT_AMS_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_2 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(6); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(7); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_AMS_S16_LD_INCP_UAUP_P: { + unsigned Opc = RISCV::ESP_FFT_AMS_S16_LD_INCP_UAUP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_2 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(6); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(7); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_AMS_S16_LD_R32_DECP_P: { + unsigned Opc = RISCV::ESP_FFT_AMS_S16_LD_R32_DECP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_2 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(6); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(7); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_AMS_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_FFT_AMS_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &RS2 = MI.getOperand(5); + MachineOperand &SELECT_2 = MI.getOperand(6); + MachineOperand &QZ = MI.getOperand(7); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + unsigned R2 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(R2, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_BITREV_P: { + unsigned Opc = RISCV::ESP_FFT_BITREV; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QV = MI.getOperand(1); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_fft_bitrev first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QVVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_CMUL_S16_LD_XP_P: { + unsigned Opc = RISCV::ESP_FFT_CMUL_S16_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_8 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(6); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_CMUL_S16_ST_XP_P: { + unsigned Opc = RISCV::ESP_FFT_CMUL_S16_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_cmul_s16_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_cmul_s16_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_cmul_s16_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &SELECT_4 = MI.getOperand(5); + MachineOperand &UPD_4 = MI.getOperand(6); + MachineOperand &SELECT_8 = MI.getOperand(7); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()) + .addImm(UPD_4.getImm()) + .addImm(SELECT_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_R2BF_S16_P: { + unsigned Opc = RISCV::ESP_FFT_R2BF_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_r2bf_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_r2bf_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &SELECT_2 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_r2bf_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_fft_r2bf_s16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_R2BF_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_FFT_R2BF_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_r2bf_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_r2bf_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_r2bf_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_VST_R32_DECP_P: { + unsigned Opc = RISCV::ESP_FFT_VST_R32_DECP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_vst_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &SELECT_2 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_128_USAR_IP_P: { + unsigned Opc = RISCV::ESP_LD_128_USAR_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_ld_128_usar_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_128_USAR_XP_P: { + unsigned Opc = RISCV::ESP_LD_128_USAR_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_ld_128_usar_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_XACC_IP_P: { + unsigned Opc = RISCV::ESP_LD_XACC_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_8 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_S16_128_IP_P: { + unsigned Opc = RISCV::ESP_LDQA_S16_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_S16_128_XP_P: { + unsigned Opc = RISCV::ESP_LDQA_S16_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_S8_128_IP_P: { + unsigned Opc = RISCV::ESP_LDQA_S8_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_S8_128_XP_P: { + unsigned Opc = RISCV::ESP_LDQA_S8_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_U16_128_IP_P: { + unsigned Opc = RISCV::ESP_LDQA_U16_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_U16_128_XP_P: { + unsigned Opc = RISCV::ESP_LDQA_U16_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_U8_128_IP_P: { + unsigned Opc = RISCV::ESP_LDQA_U8_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_U8_128_XP_P: { + unsigned Opc = RISCV::ESP_LDQA_U8_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_16_IP_P: { + unsigned Opc = RISCV::ESP_VLDBC_16_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_4 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_16_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_16_XP_P: { + unsigned Opc = RISCV::ESP_VLDBC_16_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_16_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_32_IP_P: { + unsigned Opc = RISCV::ESP_VLDBC_32_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_4 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_32_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_32_XP_P: { + unsigned Opc = RISCV::ESP_VLDBC_32_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_32_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_8_IP_P: { + unsigned Opc = RISCV::ESP_VLDBC_8_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_4 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_8_ip first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_8_XP_P: { + unsigned Opc = RISCV::ESP_VLDBC_8_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_8_xp first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_S16_IP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_S16_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_16_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_s16_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_s16_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_S16_XP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_S16_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_s16_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_s16_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_S8_IP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_S8_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_16_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_s8_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_s8_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_S8_XP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_S8_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_s8_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_s8_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_U16_IP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_U16_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_16_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_u16_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_u16_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_U16_XP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_U16_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_u16_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_u16_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_U8_IP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_U8_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_16_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_u8_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_u8_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_U8_XP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_U8_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_u8_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_u8_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDHBC_16_INCP_P: { + unsigned Opc = RISCV::ESP_VLDHBC_16_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldhbc_16_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldhbc_16_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_QACC_H_H_128_IP_P: { + unsigned Opc = RISCV::ESP_LD_QACC_H_H_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_QACC_H_L_128_IP_P: { + unsigned Opc = RISCV::ESP_LD_QACC_H_L_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_QACC_L_H_128_IP_P: { + unsigned Opc = RISCV::ESP_LD_QACC_L_H_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_QACC_L_L_128_IP_P: { + unsigned Opc = RISCV::ESP_LD_QACC_L_L_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_UA_STATE_IP_P: { + unsigned Opc = RISCV::ESP_LD_UA_STATE_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDXQ_32_P: { + unsigned Opc = RISCV::ESP_LDXQ_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_ldxq_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(2); + MachineOperand &SELECT_8 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_ldxq_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_4.getImm()) + .addImm(SELECT_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_QACC_H_H_128_IP_P: { + unsigned Opc = RISCV::ESP_ST_QACC_H_H_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_QACC_H_L_128_IP_P: { + unsigned Opc = RISCV::ESP_ST_QACC_H_L_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_QACC_L_H_128_IP_P: { + unsigned Opc = RISCV::ESP_ST_QACC_L_H_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_QACC_L_L_128_IP_P: { + unsigned Opc = RISCV::ESP_ST_QACC_L_L_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_UA_STATE_IP_P: { + unsigned Opc = RISCV::ESP_ST_UA_STATE_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_STXQ_32_P: { + unsigned Opc = RISCV::ESP_STXQ_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_stxq_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_stxq_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &SELECT_8 = MI.getOperand(4); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QWVal) + .addReg(RISCV::Q0 + QUVal) + .addImm(SELECT_4.getImm()) + .addImm(SELECT_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_128_IP_P: { + unsigned Opc = RISCV::ESP_VLD_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_128_ip first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_128_XP_P: { + unsigned Opc = RISCV::ESP_VLD_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_128_xp first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_H_64_IP_P: { + unsigned Opc = RISCV::ESP_VLD_H_64_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_8 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_h_64_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_H_64_XP_P: { + unsigned Opc = RISCV::ESP_VLD_H_64_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_h_64_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_L_64_IP_P: { + unsigned Opc = RISCV::ESP_VLD_L_64_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_8 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_l_64_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_L_64_XP_P: { + unsigned Opc = RISCV::ESP_VLD_L_64_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_l_64_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_128_IP_P: { + unsigned Opc = RISCV::ESP_VST_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_128_ip first argument, it " + "must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_128_XP_P: { + unsigned Opc = RISCV::ESP_VST_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_128_xp first argument, it " + "must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_H_64_IP_P: { + unsigned Opc = RISCV::ESP_VST_H_64_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_h_64_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_8 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_H_64_XP_P: { + unsigned Opc = RISCV::ESP_VST_H_64_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_h_64_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_L_64_IP_P: { + unsigned Opc = RISCV::ESP_VST_L_64_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_l_64_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_8 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_L_64_XP_P: { + unsigned Opc = RISCV::ESP_VST_L_64_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_l_64_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SLCI_2Q_P: { + unsigned Opc = RISCV::ESP_SLCI_2Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_slci_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_slci_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SLCXXP_2Q_P: { + unsigned Opc = RISCV::ESP_SLCXXP_2Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_slcxxp_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(3); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_slcxxp_2q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRC_Q_P: { + unsigned Opc = RISCV::ESP_SRC_Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_src_q first argument, it must " + "bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_src_q first argument, it must " + "bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_src_q first argument, it must " + "bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRC_Q_LD_IP_P: { + unsigned Opc = RISCV::ESP_SRC_Q_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_src_q_ld_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_src_q_ld_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &OFFSET_256_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_src_q_ld_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QWVal) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRC_Q_LD_XP_P: { + unsigned Opc = RISCV::ESP_SRC_Q_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_src_q_ld_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QW = MI.getOperand(3); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_src_q_ld_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_src_q_ld_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRC_Q_QUP_P: { + unsigned Opc = RISCV::ESP_SRC_Q_QUP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_src_q_qup first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_src_q_qup first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_src_q_qup first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCI_2Q_P: { + unsigned Opc = RISCV::ESP_SRCI_2Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_srci_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srci_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_S16_Q_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_S16_Q_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcmb_s16_q_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_s16_q_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_S16_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_S16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_s16_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_S8_Q_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_S8_Q_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcmb_s8_q_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_s8_q_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_S8_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_S8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_s8_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_U16_Q_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_U16_Q_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcmb_u16_q_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_u16_q_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_U16_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_U16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_u16_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_U8_Q_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_U8_Q_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcmb_u8_q_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_u8_q_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_U8_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_U8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_u8_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCQ_128_ST_INCP_P: { + unsigned Opc = RISCV::ESP_SRCQ_128_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_srcq_128_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcq_128_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCXXP_2Q_P: { + unsigned Opc = RISCV::ESP_SRCXXP_2Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_srcxxp_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(3); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcxxp_2q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRS_S_XACC_P: { + unsigned Opc = RISCV::ESP_SRS_S_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRS_U_XACC_P: { + unsigned Opc = RISCV::ESP_SRS_U_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSL_32_P: { + unsigned Opc = RISCV::ESP_VSL_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsl_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsl_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSLD_16_P: { + unsigned Opc = RISCV::ESP_VSLD_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsld_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsld_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsld_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSLD_32_P: { + unsigned Opc = RISCV::ESP_VSLD_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsld_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsld_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsld_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSLD_8_P: { + unsigned Opc = RISCV::ESP_VSLD_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsld_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsld_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsld_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSR_S32_P: { + unsigned Opc = RISCV::ESP_VSR_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsr_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsr_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSR_U32_P: { + unsigned Opc = RISCV::ESP_VSR_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsr_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsr_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSRD_16_P: { + unsigned Opc = RISCV::ESP_VSRD_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsrd_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsrd_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsrd_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSRD_32_P: { + unsigned Opc = RISCV::ESP_VSRD_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsrd_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsrd_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsrd_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSRD_8_P: { + unsigned Opc = RISCV::ESP_VSRD_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsrd_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsrd_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsrd_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_S_XACC_IP_P: { + unsigned Opc = RISCV::ESP_ST_S_XACC_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_8 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_U_XACC_IP_P: { + unsigned Opc = RISCV::ESP_ST_U_XACC_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_8 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + } +} diff --git a/llvm/lib/Target/RISCV/RISCVESP32P4Operands.td b/llvm/lib/Target/RISCV/RISCVESP32P4Operands.td new file mode 100644 index 0000000000000..0d77783aff0e2 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVESP32P4Operands.td @@ -0,0 +1,134 @@ +//===- RISCVESP32P4Operands.td - ESP32P4 instruction operands -*- tblgen-*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// Immediate operands with a shared generic render method. +class P4ImmAsmOperand : AsmOperandClass { + let Name = name; + let RenderMethod = "addImmOperands"; + let DiagnosticType = !strconcat("Invalid", name); +} + +class Immediate + : Operand, ImmLeaf { + let PrintMethod = "print"#asmop; + let ParserMatchClass = !cast(asmop); +} +/// --------------------------- /// + +// imm8 predicate - Immediate in the range [-128,127] +def Imm8_AsmOperand : P4ImmAsmOperand<"Imm8">; +def imm8 : Immediate= -128 && Imm <= 127; }], "Imm8_AsmOperand"> { + let EncoderMethod = "getImm8OpValue"; + let DecoderMethod = "decodeImm8Operand"; +} + +// offset_16_16 predicate - 4-bit signed immediate in the range [-128,112] with an interval +// of 16. +def Offset_16_16_AsmOperand: P4ImmAsmOperand<"Offset_16_16">; +def offset_16_16: Immediate= -128 && Imm <= 112) && ((Imm & 0xf) == 0); }], "Offset_16_16_AsmOperand"> { + let EncoderMethod = "getOffset_16_16OpValue"; + let DecoderMethod = "decodeOffset_16_16Operand"; +} + +// offset_256_8 predicate - 4-bit signed immediate in the range [-1024,1016] with an interval +// of 8. +def Offset_256_8_AsmOperand: P4ImmAsmOperand<"Offset_256_8">; +def offset_256_8: Immediate= -1024 && Imm <= 1016) && ((Imm & 0x7) == 0); }], "Offset_256_8_AsmOperand"> { + let EncoderMethod = "getOffset_256_8OpValue"; + let DecoderMethod = "decodeOffset_256_8Operand"; +} + +// offset_256_16 predicate - 8-bit signed immediate in the range [-2048,2032] with an interval +// of 16. +def Offset_256_16_AsmOperand: P4ImmAsmOperand<"Offset_256_16">; +def offset_256_16: Immediate= -2048 && Imm <= 2032) && ((Imm & 0xf) == 0); }], "Offset_256_16_AsmOperand"> { + let EncoderMethod = "getOffset_256_16OpValue"; + let DecoderMethod = "decodeOffset_256_16Operand"; +} + +// offset_256_4 predicate - 4-bit signed immediate in the range [-512,508] with an interval +// of 4. +def Offset_256_4_AsmOperand: P4ImmAsmOperand<"Offset_256_4">; +def offset_256_4: Immediate= -512 && Imm <= 508) && ((Imm & 0x3) == 0); }], "Offset_256_4_AsmOperand"> { + let EncoderMethod = "getOffset_256_4OpValue"; + let DecoderMethod = "decodeOffset_256_4Operand"; +} + +// select_2 predicate - Immediate in the range [0,1] +def Select_2_AsmOperand: P4ImmAsmOperand<"Select_2">; +def select_2: Immediate= 0 && Imm <= 1; }], "Select_2_AsmOperand"> { + let EncoderMethod = "getSelect_2OpValue"; + let DecoderMethod = "decodeSelect_2Operand"; +} + +// select_4 predicate - Immediate in the range [0,3] +def Select_4_AsmOperand: P4ImmAsmOperand<"Select_4">; +def select_4: Immediate= 0 && Imm <= 3; }], "Select_4_AsmOperand"> { + let EncoderMethod = "getSelect_4OpValue"; + let DecoderMethod = "decodeSelect_4Operand"; +} + +// select_8 predicate - Immediate in the range [0,7] +def Select_8_AsmOperand: P4ImmAsmOperand<"Select_8">; +def select_8: Immediate= 0 && Imm <= 7; }], "Select_8_AsmOperand"> { + let EncoderMethod = "getSelect_8OpValue"; + let DecoderMethod = "decodeSelect_8Operand"; +} + +// select_16 predicate - Immediate in the range [0,15] +def Select_16_AsmOperand: P4ImmAsmOperand<"Select_16">; +def select_16: Immediate= 0 && Imm <= 15; }], "Select_16_AsmOperand"> { + let EncoderMethod = "getSelect_16OpValue"; + let DecoderMethod = "decodeSelect_16Operand"; +} + +def uimm10_step4 : Operand { + // let ParserMatchClass = Simm21Lsb0JALAsmOperand; + let ParserMatchClass = UImmAsmOperand<10>; + let PrintMethod = "printBranchOperand"; + let EncoderMethod = "getUImm10_Step4Operand"; + let DecoderMethod = "decodeUImm10_Step4Operand"; + let MCOperandPredicate = [{ + int64_t Imm; + if (MCOp.evaluateAsConstantImm(Imm)) + return isUInt<10>(); + return MCOp.isBareSymbolRef(); + }]; + let OperandType = "OPERAND_PCREL"; +} + +def uimm13_step4 : Operand { + let ParserMatchClass = UImmAsmOperand<13>; + let PrintMethod = "printBranchOperand"; + let EncoderMethod = "getUImm13_Step4Operand"; + let DecoderMethod = "decodeUImm13_Step4Operand"; + let MCOperandPredicate = [{ + int64_t Imm; + if (MCOp.evaluateAsConstantImm(Imm)) + return isUInt<10>(); + return MCOp.isBareSymbolRef(); + }]; + let OperandType = "OPERAND_PCREL"; +} + +// A 12-bit unsigned immediate. +def uimm12 : RISCVOp, + ImmLeaf(Imm);}]> { + let ParserMatchClass = UImmAsmOperand<12>; + let EncoderMethod = "getImmOpValue"; + let DecoderMethod = "decodeUImmOperand<12>"; + let OperandType = "OPERAND_UIMM12"; + let MCOperandPredicate = [{ + uint64_t Imm; + if (!MCOp.evaluateAsConstantImm(Imm)) + return false; + return isUInt<12>(Imm); + }]; +} diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 3c868dbbf8b3a..d7d44adb9709a 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -1270,6 +1270,13 @@ def HasVendorXwchc AssemblerPredicate<(all_of FeatureVendorXwchc), "'Xwchc' (WCH/QingKe additional compressed opcodes)">; + +def FeatureVendorESP32P4 + : RISCVExtension<"xesppie", 1, 0, + "'Espressif ESP32P4'">; +// def FeatureVendorESP32P4 : SubtargetFeature<"xesppie", "HasVendorESP32P4", "true", "'Espressif ESP32P4'">; +def HasVendorESP32P4 : Predicate<"Subtarget->hasVendorESP32P4()">, AssemblerPredicate<(all_of FeatureVendorESP32P4), "'Espressif ESP32P4'">; + //===----------------------------------------------------------------------===// // LLVM specific features and extensions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 823fb428472ef..600b20416c918 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -119,6 +119,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, // Set up the register classes. addRegisterClass(XLenVT, &RISCV::GPRRegClass); + + if (Subtarget.hasVendorESP32P4()) { + static const MVT::SimpleValueType QRVec[] = {MVT::v16i8, MVT::v4i32}; + for (auto st : QRVec) + addRegisterClass(st, &RISCV::QRRegClass); + } + if (Subtarget.is64Bit() && RV64LegalI32) addRegisterClass(MVT::i32, &RISCV::GPRRegClass); @@ -18746,9 +18753,13 @@ static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB, MachineBasicBlock * RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + DebugLoc DL = MI.getDebugLoc(); switch (MI.getOpcode()) { default: - llvm_unreachable("Unexpected instr type to insert"); + return emitDSPInstrWithCustomInserter(MI, BB, TII, MF, MRI, DL); case RISCV::ReadCounterWide: assert(!Subtarget.is64Bit() && "ReadCounterWide is only to be used on riscv32"); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 0b0ad9229f0b3..0d549861d2135 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -891,6 +891,10 @@ class RISCVTargetLowering : public TargetLowering { MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override; + MachineBasicBlock *emitDSPInstrWithCustomInserter( + MachineInstr &MI, MachineBasicBlock *MBB, const TargetInstrInfo &TII, + MachineFunction *MF, MachineRegisterInfo &MRI, DebugLoc DL) const; + /// RISCVCCAssignFn - This target-specific function extends the default /// CCValAssign with additional information used to lower RISC-V calling /// conventions. diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormatsESP32P4.td b/llvm/lib/Target/RISCV/RISCVInstrFormatsESP32P4.td new file mode 100644 index 0000000000000..6adaa56cc656d --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVInstrFormatsESP32P4.td @@ -0,0 +1,42 @@ +//====-- RISCVInstrFormatsESP32P4.td - ESP32P4 Instr Formats -*- tablegen -*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the RISC-V ESP32-P4 extension instruction formats. +// +//===----------------------------------------------------------------------===// + +class Esp32P4Inst pattern, + InstrItinClass itin = NoItinerary> + : Instruction, Requires<[HasVendorESP32P4]> { + let Namespace = "RISCV"; + let DecoderNamespace = "ESP32P4"; + field bits<32> Inst; + field bits<32> SoftFail = 0; + + let TSFlags{4-0} = 23; + let Size = 4; + + let OutOperandList = outs; + let InOperandList = ins; + + let AsmString = asmstr; + let Pattern = pattern; + + let Itinerary = itin; +} + + +// Pseudo instructions +class PseudoESP32P4 pattern> + : Esp32P4Inst { + let isPseudo = 1; + let isCodeGenOnly = 1; + let mayLoad = 1; + let mayStore = 1; + let hasSideEffects = 1; +} \ No newline at end of file diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 6c0cbeadebf43..541f1c03601fe 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -2411,6 +2411,12 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI, case RISCVOp::OPERAND_UIMM10_LSB00_NONZERO: Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0); break; + case RISCVOp::OPERAND_UIMM10_STEP4: + Ok = isUInt<10>(Imm) && (Imm != 0); + break; + case RISCVOp::OPERAND_UIMM13_STEP4: + Ok = isUInt<13>(Imm) && (Imm != 0); + break; case RISCVOp::OPERAND_ZERO: Ok = Imm == 0; break; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index 04054d2c3feee..9ffa314a430ba 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -2081,6 +2081,7 @@ include "RISCVInstrInfoXSf.td" include "RISCVInstrInfoSFB.td" include "RISCVInstrInfoXCV.td" include "RISCVInstrInfoXwch.td" +include "RISCVInstrInfoESP32P4.td" //===----------------------------------------------------------------------===// // Global ISel diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td b/llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td new file mode 100644 index 0000000000000..bd97a927ea98f --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td @@ -0,0 +1,15603 @@ +//===- RISCVInstrInfoP4.td - RISCV Target Description -*- tablegen -*------===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the RISCV ESP32P4 DSP instructions in TableGen format. +// +// These definitions are generated +// This file is generated +// +//===----------------------------------------------------------------------===// + +include "RISCVESP32P4Operands.td" +include "RISCVInstrFormatsESP32P4.td" +include "RISCVInstrInfoP4HWLP.td" + +// This file is generated + +def ESP_VCMULAS_S16_QACC_H: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vcmulas.s16.qacc.h\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_H_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vcmulas_s16_qacc_h_p $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_h timm:$qx, timm:$qy)]>; + +def ESP_VCMULAS_S16_QACC_H_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vcmulas.s16.qacc.h.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-19} = off1616{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_H_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vcmulas_s16_qacc_h_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_h_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VCMULAS_S16_QACC_H_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vcmulas.s16.qacc.h.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_H_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vcmulas_s16_qacc_h_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_h_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VCMULAS_S16_QACC_L: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vcmulas.s16.qacc.l\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_L_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vcmulas_s16_qacc_l_p $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_l timm:$qx, timm:$qy)]>; + +def ESP_VCMULAS_S16_QACC_L_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vcmulas.s16.qacc.l.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-19} = off1616{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_L_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vcmulas_s16_qacc_l_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_l_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VCMULAS_S16_QACC_L_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vcmulas.s16.qacc.l.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_L_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vcmulas_s16_qacc_l_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_l_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VCMULAS_S8_QACC_H: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vcmulas.s8.qacc.h\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_H_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vcmulas_s8_qacc_h_p $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_h timm:$qx, timm:$qy)]>; + +def ESP_VCMULAS_S8_QACC_H_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vcmulas.s8.qacc.h.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-19} = off1616{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_H_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vcmulas_s8_qacc_h_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_h_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VCMULAS_S8_QACC_H_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vcmulas.s8.qacc.h.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_H_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vcmulas_s8_qacc_h_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_h_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VCMULAS_S8_QACC_L: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vcmulas.s8.qacc.l\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_L_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vcmulas_s8_qacc_l_p $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_l timm:$qx, timm:$qy)]>; + +def ESP_VCMULAS_S8_QACC_L_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vcmulas.s8.qacc.l.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-19} = off1616{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_L_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vcmulas_s8_qacc_l_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_l_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VCMULAS_S8_QACC_L_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vcmulas.s8.qacc.l.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_L_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vcmulas_s8_qacc_l_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_l_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.s16.qacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_s16_qacc_p $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_S16_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s16.qacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_s16_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_S16_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s16.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s16_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S16_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s16.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_s16_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_S16_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.s16.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_s16_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_S16_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.s16.xacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_XACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_s16_xacc_p $qx, $qy", + [(int_riscv_esp_vmulas_s16_xacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_S16_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s16.xacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_s16_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s16_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_S16_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s16.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s16_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s16_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S16_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s16.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_s16_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s16_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_S16_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.s16.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_s16_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s16_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_S8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.s8.qacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_s8_qacc_p $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_S8_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s8.qacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_s8_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_S8_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s8.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s8_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S8_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s8.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_s8_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_S8_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.s8.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_s8_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_S8_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.s8.xacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_XACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_s8_xacc_p $qx, $qy", + [(int_riscv_esp_vmulas_s8_xacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_S8_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s8.xacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_s8_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s8_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_S8_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s8.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s8_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s8_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S8_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s8.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_s8_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s8_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_S8_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.s8.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_s8_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s8_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_U16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.u16.qacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_u16_qacc_p $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_U16_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u16.qacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_u16_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_U16_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u16.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u16_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U16_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u16.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_u16_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_U16_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.u16.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_u16_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_U16_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.u16.xacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_XACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_u16_xacc_p $qx, $qy", + [(int_riscv_esp_vmulas_u16_xacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_U16_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u16.xacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_u16_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u16_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_U16_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u16.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u16_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u16_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U16_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u16.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_u16_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u16_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_U16_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.u16.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_u16_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u16_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_U8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.u8.qacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_u8_qacc_p $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_U8_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u8.qacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_u8_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_U8_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u8.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u8_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U8_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u8.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_u8_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_U8_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.u8.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_u8_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_U8_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.u8.xacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_XACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_u8_xacc_p $qx, $qy", + [(int_riscv_esp_vmulas_u8_xacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_U8_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u8.xacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_u8_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u8_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_U8_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u8.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u8_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u8_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U8_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u8.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_u8_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u8_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_U8_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.u8.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_u8_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u8_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_S16_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s16.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s16_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S8_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s8.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s8_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U16_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u16.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u16_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U8_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u8.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u8_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VSMULAS_S16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), + "esp.vsmulas.s16.qacc\t $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<4> sel16; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_S16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_16:$sel16), + "!esp_vsmulas_s16_qacc_p $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_s16_qacc timm:$qx, timm:$qy, timm:$sel16)]>; + +def ESP_VSMULAS_S16_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_16:$sel16), + "esp.vsmulas.s16.qacc.ld.incp\t $qu, $rs1, $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> sel16; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-19} = sel16{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_S16_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), + "!esp_vsmulas_s16_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_s16_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + +def ESP_VSMULAS_S8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), + "esp.vsmulas.s8.qacc\t $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<4> sel16; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_S8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_16:$sel16), + "!esp_vsmulas_s8_qacc_p $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_s8_qacc timm:$qx, timm:$qy, timm:$sel16)]>; + +def ESP_VSMULAS_S8_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_16:$sel16), + "esp.vsmulas.s8.qacc.ld.incp\t $qu, $rs1, $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> sel16; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-19} = sel16{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_S8_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), + "!esp_vsmulas_s8_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_s8_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + +def ESP_VSMULAS_U16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), + "esp.vsmulas.u16.qacc\t $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<4> sel16; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_U16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_16:$sel16), + "!esp_vsmulas_u16_qacc_p $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_u16_qacc timm:$qx, timm:$qy, timm:$sel16)]>; + +def ESP_VSMULAS_U16_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_16:$sel16), + "esp.vsmulas.u16.qacc.ld.incp\t $qu, $rs1, $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> sel16; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-19} = sel16{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_U16_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), + "!esp_vsmulas_u16_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_u16_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + +def ESP_VSMULAS_U8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), + "esp.vsmulas.u8.qacc\t $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<4> sel16; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_U8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_16:$sel16), + "!esp_vsmulas_u8_qacc_p $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_u8_qacc timm:$qx, timm:$qy, timm:$sel16)]>; + +def ESP_VSMULAS_U8_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_16:$sel16), + "esp.vsmulas.u8.qacc.ld.incp\t $qu, $rs1, $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> sel16; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-19} = sel16{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_U8_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), + "!esp_vsmulas_u8_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_u8_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + +def ESP_CMUL_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), + "esp.cmul.s16\t $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<2> sel4; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16-15} = sel4{1-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_4:$sel4, imm8:$qz), + "!esp_cmul_s16_p $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s16 timm:$qx, timm:$qy, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.s16.ld.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), + "!esp_cmul_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + +def ESP_CMUL_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), + "!esp_cmul_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), + "esp.cmul.s8\t $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<2> sel4; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16-15} = sel4{1-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_4:$sel4, imm8:$qz), + "!esp_cmul_s8_p $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s8 timm:$qx, timm:$qy, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.s8.ld.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), + "!esp_cmul_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + +def ESP_CMUL_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), + "!esp_cmul_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), + "esp.cmul.u16\t $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<2> sel4; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16-15} = sel4{1-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_4:$sel4, imm8:$qz), + "!esp_cmul_u16_p $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u16 timm:$qx, timm:$qy, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.u16.ld.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), + "!esp_cmul_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + +def ESP_CMUL_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), + "!esp_cmul_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), + "esp.cmul.u8\t $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<2> sel4; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16-15} = sel4{1-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_4:$sel4, imm8:$qz), + "!esp_cmul_u8_p $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u8 timm:$qx, timm:$qy, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.u8.ld.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), + "!esp_cmul_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + +def ESP_CMUL_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), + "!esp_cmul_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + +def ESP_MAX_S16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.s16.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_S16_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_s16_a_p $qw, $rd", + [(int_riscv_esp_max_s16_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MAX_S32_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.s32.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_S32_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_s32_a_p $qw, $rd", + [(int_riscv_esp_max_s32_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MAX_S8_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.s8.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_S8_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_s8_a_p $qw, $rd", + [(int_riscv_esp_max_s8_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MAX_U16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.u16.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_U16_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_u16_a_p $qw, $rd", + [(int_riscv_esp_max_u16_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MAX_U32_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.u32.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_U32_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_u32_a_p $qw, $rd", + [(int_riscv_esp_max_u32_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MAX_U8_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.u8.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_U8_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_u8_a_p $qw, $rd", + [(int_riscv_esp_max_u8_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_S16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.s16.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_S16_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_s16_a_p $qw, $rd", + [(int_riscv_esp_min_s16_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_S32_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.s32.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_S32_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_s32_a_p $qw, $rd", + [(int_riscv_esp_min_s32_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_S8_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.s8.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_S8_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_s8_a_p $qw, $rd", + [(int_riscv_esp_min_s8_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_U16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.u16.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_U16_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_u16_a_p $qw, $rd", + [(int_riscv_esp_min_u16_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_U32_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.u32.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_U32_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_u32_a_p $qw, $rd", + [(int_riscv_esp_min_u32_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_U8_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.u8.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_U8_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_u8_a_p $qw, $rd", + [(int_riscv_esp_min_u8_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_VABS_16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qy), + "esp.vabs.16\t $qv, $qy", []> +{ + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VABS_16_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qv), + "!esp_vabs_16_p $qv, $qy", + [(int_riscv_esp_vabs_16 timm:$qy, timm:$qv)]>; + +def ESP_VABS_32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qy), + "esp.vabs.32\t $qv, $qy", []> +{ + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VABS_32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qv), + "!esp_vabs_32_p $qv, $qy", + [(int_riscv_esp_vabs_32 timm:$qy, timm:$qv)]>; + +def ESP_VABS_8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qy), + "esp.vabs.8\t $qv, $qy", []> +{ + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VABS_8_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qv), + "!esp_vabs_8_p $qv, $qy", + [(int_riscv_esp_vabs_8 timm:$qy, timm:$qv)]>; + +def ESP_VADD_S16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.s16\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_s16_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_s16 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_S16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.s16.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_s16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_S16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.s16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_s16_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VADD_S32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.s32\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_s32_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_s32 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_S32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.s32.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_s32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_S32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.s32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_s32_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VADD_S8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.s8\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_s8_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_s8 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_S8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.s8.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_s8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_S8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.s8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_s8_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VADD_U16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.u16\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_u16_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_u16 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_U16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.u16.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_u16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_U16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.u16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_u16_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VADD_U32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.u32\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_u32_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_u32 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_U32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.u32.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_u32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_U32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.u32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_u32_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VADD_U8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.u8\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_u8_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_u8 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_U8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.u8.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_u8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_U8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.u8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_u8_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VCLAMP_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, select_16:$sel16), + "esp.vclamp.s16\t $qz, $qx, $sel16", []> +{ + bits<3> qx; + bits<4> sel16; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = sel16{3-2}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19-18} = sel16{1-0}; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCLAMP_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, select_16:$sel16, imm8:$qz), + "!esp_vclamp_s16_p $qz, $qx, $sel16", + [(int_riscv_esp_vclamp_s16 timm:$qx, timm:$sel16, timm:$qz)]>; + +def ESP_VMAX_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.s16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMAX_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.s32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_s32_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_s32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_S32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.s32.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_s32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_S32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.s32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_s32_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMAX_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.s8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMAX_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.u16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMAX_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.u32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_u32_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_u32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_U32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.u32.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_u32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_U32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.u32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_u32_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMAX_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.u8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.s16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.s32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_s32_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_s32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_S32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.s32.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_s32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_S32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.s32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_s32_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.s8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.u16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.u32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_u32_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_u32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_U32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.u32.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_u32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_U32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.u32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_u32_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.u8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMUL_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmul.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmul_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vmul_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMUL_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmul.s16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmul_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMUL_S16_S8XS8: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vmul.s16.s8xs8\t $qz, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S16_S8XS8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz, imm8:$qv), + "!esp_vmul_s16_s8xs8_p $qz, $qv, $qx, $qy", + [(int_riscv_esp_vmul_s16_s8xs8 timm:$qx, timm:$qy, timm:$qz, timm:$qv)]>; + +def ESP_VMUL_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmul.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmul_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMUL_S32_S16XS16: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vmul.s32.s16xs16\t $qz, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S32_S16XS16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz, imm8:$qv), + "!esp_vmul_s32_s16xs16_p $qz, $qv, $qx, $qy", + [(int_riscv_esp_vmul_s32_s16xs16 timm:$qx, timm:$qy, timm:$qz, timm:$qv)]>; + +def ESP_VMUL_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmul.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmul_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vmul_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMUL_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmul.s8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmul_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMUL_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmul.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmul_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMUL_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmul.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmul_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vmul_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMUL_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmul.u16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmul_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMUL_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmul.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmul_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMUL_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmul.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmul_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vmul_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMUL_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmul.u8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmul_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMUL_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmul.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmul_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VPRELU_S16: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, QR:$qx, QR:$qy), + "esp.vprelu.s16\t $qz, $qy, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VPRELU_S16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vprelu_s16_p $qz, $qy, $qx, $rs1", + [(int_riscv_esp_vprelu_s16 GPRPIE:$rs1, timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VPRELU_S8: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, QR:$qx, QR:$qy), + "esp.vprelu.s8\t $qz, $qy, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VPRELU_S8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vprelu_s8_p $qz, $qy, $qx, $rs1", + [(int_riscv_esp_vprelu_s8 GPRPIE:$rs1, timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VRELU_S16: Esp32P4Inst<(outs QR:$qyr), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qy), + "esp.vrelu.s16\t $qy, $rs2, $rs1", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qy; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VRELU_S16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qy), + "!esp_vrelu_s16_p $qy, $rs2, $rs1", + [(int_riscv_esp_vrelu_s16 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qy)]>; + +def ESP_VRELU_S8: Esp32P4Inst<(outs QR:$qyr), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qy), + "esp.vrelu.s8\t $qy, $rs2, $rs1", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qy; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VRELU_S8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qy), + "!esp_vrelu_s8_p $qy, $rs2, $rs1", + [(int_riscv_esp_vrelu_s8 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qy)]>; + +def ESP_VSADDS_S16: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vsadds.s16\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSADDS_S16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vsadds_s16_p $qv, $qx, $rs1", + [(int_riscv_esp_vsadds_s16 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSADDS_S8: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vsadds.s8\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSADDS_S8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vsadds_s8_p $qv, $qx, $rs1", + [(int_riscv_esp_vsadds_s8 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSADDS_U16: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vsadds.u16\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSADDS_U16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vsadds_u16_p $qv, $qx, $rs1", + [(int_riscv_esp_vsadds_u16 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSADDS_U8: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vsadds.u8\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSADDS_U8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vsadds_u8_p $qv, $qx, $rs1", + [(int_riscv_esp_vsadds_u8 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSAT_S16: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.s16\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_S16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_s16_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_s16 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSAT_S32: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.s32\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_S32_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_s32_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_s32 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSAT_S8: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.s8\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_S8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_s8_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_s8 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSAT_U16: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.u16\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_U16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_u16_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_u16 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSAT_U32: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.u32\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_U32_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_u32_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_u32 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSAT_U8: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.u8\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_U8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_u8_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_u8 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSSUBS_S16: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vssubs.s16\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSSUBS_S16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vssubs_s16_p $qv, $qx, $rs1", + [(int_riscv_esp_vssubs_s16 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSSUBS_S8: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vssubs.s8\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSSUBS_S8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vssubs_s8_p $qv, $qx, $rs1", + [(int_riscv_esp_vssubs_s8 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSSUBS_U16: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vssubs.u16\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSSUBS_U16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vssubs_u16_p $qv, $qx, $rs1", + [(int_riscv_esp_vssubs_u16 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSSUBS_U8: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vssubs.u8\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSSUBS_U8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vssubs_u8_p $qv, $qx, $rs1", + [(int_riscv_esp_vssubs_u8 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSUB_S16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.s16\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_s16_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_s16 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_S16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.s16.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_s16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_S16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.s16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_s16_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VSUB_S32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.s32\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_s32_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_s32 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_S32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.s32.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_s32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_S32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.s32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_s32_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VSUB_S8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.s8\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_s8_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_s8 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_S8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.s8.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_s8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_S8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.s8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_s8_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VSUB_U16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.u16\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_u16_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_u16 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_U16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.u16.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_u16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_U16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.u16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_u16_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VSUB_U32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.u32\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_u32_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_u32 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_U32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.u32.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_u32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_U32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.u32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_u32_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VSUB_U8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.u8\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_u8_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_u8 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_U8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.u8.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_u8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_U8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.u8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_u8_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_ADDX2: Esp32P4Inst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), + "esp.addx2\t $rd, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24-20} = rs2{4-0}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-7} = rd{4-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 0; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ADDX2_P : PseudoESP32P4<(outs), (ins GPR:$rs1, GPR:$rs2, GPR:$rd), + "!esp_addx2_p $rd, $rs1, $rs2", + [(int_riscv_esp_addx2 GPR:$rs1, GPR:$rs2, GPR:$rd)]>; + +def ESP_ADDX4: Esp32P4Inst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), + "esp.addx4\t $rd, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24-20} = rs2{4-0}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-7} = rd{4-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 0; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ADDX4_P : PseudoESP32P4<(outs), (ins GPR:$rs1, GPR:$rs2, GPR:$rd), + "!esp_addx4_p $rd, $rs1, $rs2", + [(int_riscv_esp_addx4 GPR:$rs1, GPR:$rs2, GPR:$rd)]>; + +def ESP_SAT: Esp32P4Inst<(outs GPR:$rsdr), (ins GPR:$rs0, GPR:$rs1, GPR:$rsd), + "esp.sat\t $rsd, $rs0, $rs1", []> +{ + bits<5> rs0; + bits<5> rs1; + bits<5> rsd; + bits<5> rsdr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rsdr = $rsd"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24-20} = rsd{4-0}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11-7} = rs0{4-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 0; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SAT_P : PseudoESP32P4<(outs), (ins GPR:$rs0, GPR:$rs1, GPR:$rsd), + "!esp_sat_p $rsd, $rs0, $rs1", + [(int_riscv_esp_sat GPR:$rs0, GPR:$rs1, GPR:$rsd)]>; + +def ESP_SUBX2: Esp32P4Inst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), + "esp.subx2\t $rd, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24-20} = rs2{4-0}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-7} = rd{4-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 0; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SUBX2_P : PseudoESP32P4<(outs), (ins GPR:$rs1, GPR:$rs2, GPR:$rd), + "!esp_subx2_p $rd, $rs1, $rs2", + [(int_riscv_esp_subx2 GPR:$rs1, GPR:$rs2, GPR:$rd)]>; + +def ESP_SUBX4: Esp32P4Inst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), + "esp.subx4\t $rd, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24-20} = rs2{4-0}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-7} = rd{4-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 0; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SUBX4_P : PseudoESP32P4<(outs), (ins GPR:$rs1, GPR:$rs2, GPR:$rd), + "!esp_subx4_p $rd, $rs1, $rs2", + [(int_riscv_esp_subx4 GPR:$rs1, GPR:$rs2, GPR:$rd)]>; + +def ESP_ANDQ: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.andq\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ANDQ_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_andq_p $qz, $qx, $qy", + [(int_riscv_esp_andq timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_NOTQ: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx), + "esp.notq\t $qz, $qx", []> +{ + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_NOTQ_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qz), + "!esp_notq_p $qz, $qx", + [(int_riscv_esp_notq timm:$qx, timm:$qz)]>; + +def ESP_ORQ: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.orq\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ORQ_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_orq_p $qz, $qx, $qy", + [(int_riscv_esp_orq timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_XORQ: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.xorq\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_XORQ_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_xorq_p $qz, $qx, $qy", + [(int_riscv_esp_xorq timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.s32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_s32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_s32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.u32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_u32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_u32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.s32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_s32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_s32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.u32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_u32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_u32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.s32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_s32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_s32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.u32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_u32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_u32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_MOV_S16_QACC: Esp32P4Inst<(outs), (ins QR:$qu), + "esp.mov.s16.qacc\t $qu", []> +{ + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOV_S16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qu), + "!esp_mov_s16_qacc_p $qu", + [(int_riscv_esp_mov_s16_qacc timm:$qu)]>; + +def ESP_MOV_S8_QACC: Esp32P4Inst<(outs), (ins QR:$qu), + "esp.mov.s8.qacc\t $qu", []> +{ + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOV_S8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qu), + "!esp_mov_s8_qacc_p $qu", + [(int_riscv_esp_mov_s8_qacc timm:$qu)]>; + +def ESP_MOV_U16_QACC: Esp32P4Inst<(outs), (ins QR:$qu), + "esp.mov.u16.qacc\t $qu", []> +{ + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOV_U16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qu), + "!esp_mov_u16_qacc_p $qu", + [(int_riscv_esp_mov_u16_qacc timm:$qu)]>; + +def ESP_MOV_U8_QACC: Esp32P4Inst<(outs), (ins QR:$qu), + "esp.mov.u8.qacc\t $qu", []> +{ + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOV_U8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qu), + "!esp_mov_u8_qacc_p $qu", + [(int_riscv_esp_mov_u8_qacc timm:$qu)]>; + +def ESP_MOVI_16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qy, select_16:$sel16), + "esp.movi.16.a\t $qy, $rd, $sel16", []> +{ + bits<3> qy; + bits<4> sel16; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_16_A_P : PseudoESP32P4<(outs), (ins imm8:$qy, select_16:$sel16, GPRPIE:$rd), + "!esp_movi_16_a_p $qy, $rd, $sel16", + [(int_riscv_esp_movi_16_a timm:$qy, timm:$sel16, GPRPIE:$rd)]>; + +def ESP_MOVI_16_Q: Esp32P4Inst<(outs QR:$qy), (ins GPRPIE:$rs1, select_16:$sel16), + "esp.movi.16.q\t $qy, $rs1, $sel16", []> +{ + bits<5> rs1; + bits<4> sel16; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10-7} = sel16{3-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_16_Q_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_16:$sel16, imm8:$qy), + "!esp_movi_16_q_p $qy, $rs1, $sel16", + [(int_riscv_esp_movi_16_q GPRPIE:$rs1, timm:$sel16, timm:$qy)]>; + +def ESP_MOVI_32_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qy, select_4:$sel4), + "esp.movi.32.a\t $qy, $rd, $sel4", []> +{ + bits<3> qy; + bits<2> sel4; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = sel4{1}; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = sel4{0}; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_32_A_P : PseudoESP32P4<(outs), (ins imm8:$qy, select_4:$sel4, GPRPIE:$rd), + "!esp_movi_32_a_p $qy, $rd, $sel4", + [(int_riscv_esp_movi_32_a timm:$qy, timm:$sel4, GPRPIE:$rd)]>; + +def ESP_MOVI_32_Q: Esp32P4Inst<(outs QR:$qy), (ins GPRPIE:$rs1, select_4:$sel4), + "esp.movi.32.q\t $qy, $rs1, $sel4", []> +{ + bits<5> rs1; + bits<2> sel4; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10-9} = sel4{1-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_32_Q_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_4:$sel4, imm8:$qy), + "!esp_movi_32_q_p $qy, $rs1, $sel4", + [(int_riscv_esp_movi_32_q GPRPIE:$rs1, timm:$sel4, timm:$qy)]>; + +def ESP_MOVI_8_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qy, select_16:$sel16), + "esp.movi.8.a\t $qy, $rd, $sel16", []> +{ + bits<3> qy; + bits<4> sel16; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_8_A_P : PseudoESP32P4<(outs), (ins imm8:$qy, select_16:$sel16, GPRPIE:$rd), + "!esp_movi_8_a_p $qy, $rd, $sel16", + [(int_riscv_esp_movi_8_a timm:$qy, timm:$sel16, GPRPIE:$rd)]>; + +def ESP_MOVI_8_Q: Esp32P4Inst<(outs QR:$qy), (ins GPRPIE:$rs1, select_16:$sel16), + "esp.movi.8.q\t $qy, $rs1, $sel16", []> +{ + bits<5> rs1; + bits<4> sel16; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10-7} = sel16{3-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_8_Q_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_16:$sel16, imm8:$qy), + "!esp_movi_8_q_p $qy, $rs1, $sel16", + [(int_riscv_esp_movi_8_q GPRPIE:$rs1, timm:$sel16, timm:$qy)]>; + +def ESP_MOVX_R_CFG: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.cfg\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_CFG_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_cfg_p $rd", + [(int_riscv_esp_movx_r_cfg GPRPIE:$rd)]>; + +def ESP_MOVX_R_FFT_BIT_WIDTH: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.fft.bit.width\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_FFT_BIT_WIDTH_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_fft_bit_width_p $rd", + [(int_riscv_esp_movx_r_fft_bit_width GPRPIE:$rd)]>; + +def ESP_MOVX_R_PERF: Esp32P4Inst<(outs GPRPIE:$rd), (ins GPRPIE:$rs1), + "esp.movx.r.perf\t $rd, $rs1", []> +{ + bits<5> rs1; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_PERF_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rd), + "!esp_movx_r_perf_p $rd, $rs1", + [(int_riscv_esp_movx_r_perf GPRPIE:$rs1, GPRPIE:$rd)]>; + +def ESP_MOVX_R_SAR: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.sar\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_SAR_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_sar_p $rd", + [(int_riscv_esp_movx_r_sar GPRPIE:$rd)]>; + +def ESP_MOVX_R_SAR_BYTES: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.sar.bytes\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_SAR_BYTES_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_sar_bytes_p $rd", + [(int_riscv_esp_movx_r_sar_bytes GPRPIE:$rd)]>; + +def ESP_MOVX_R_XACC_H: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.xacc.h\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_XACC_H_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_xacc_h_p $rd", + [(int_riscv_esp_movx_r_xacc_h GPRPIE:$rd)]>; + +def ESP_MOVX_R_XACC_L: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.xacc.l\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_XACC_L_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_xacc_l_p $rd", + [(int_riscv_esp_movx_r_xacc_l GPRPIE:$rd)]>; + +def ESP_MOVX_W_CFG: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.cfg\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_CFG_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_cfg_p $rs1", + [(int_riscv_esp_movx_w_cfg GPRPIE:$rs1)]>; + +def ESP_MOVX_W_FFT_BIT_WIDTH: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.fft.bit.width\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_FFT_BIT_WIDTH_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_fft_bit_width_p $rs1", + [(int_riscv_esp_movx_w_fft_bit_width GPRPIE:$rs1)]>; + +def ESP_MOVX_W_PERF: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.perf\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_PERF_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_perf_p $rs1", + [(int_riscv_esp_movx_w_perf GPRPIE:$rs1)]>; + +def ESP_MOVX_W_SAR: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.sar\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_SAR_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_sar_p $rs1", + [(int_riscv_esp_movx_w_sar GPRPIE:$rs1)]>; + +def ESP_MOVX_W_SAR_BYTES: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.sar.bytes\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_SAR_BYTES_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_sar_bytes_p $rs1", + [(int_riscv_esp_movx_w_sar_bytes GPRPIE:$rs1)]>; + +def ESP_MOVX_W_XACC_H: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.xacc.h\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_XACC_H_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_xacc_h_p $rs1", + [(int_riscv_esp_movx_w_xacc_h GPRPIE:$rs1)]>; + +def ESP_MOVX_W_XACC_L: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.xacc.l\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_XACC_L_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_xacc_l_p $rs1", + [(int_riscv_esp_movx_w_xacc_l GPRPIE:$rs1)]>; + +def ESP_VEXT_S16: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qw), + "esp.vext.s16\t $qz, $qv, $qw", []> +{ + bits<3> qw; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VEXT_S16_P : PseudoESP32P4<(outs), (ins imm8:$qw, imm8:$qz, imm8:$qv), + "!esp_vext_s16_p $qz, $qv, $qw", + [(int_riscv_esp_vext_s16 timm:$qw, timm:$qz, timm:$qv)]>; + +def ESP_VEXT_S8: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qw), + "esp.vext.s8\t $qz, $qv, $qw", []> +{ + bits<3> qw; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VEXT_S8_P : PseudoESP32P4<(outs), (ins imm8:$qw, imm8:$qz, imm8:$qv), + "!esp_vext_s8_p $qz, $qv, $qw", + [(int_riscv_esp_vext_s8 timm:$qw, timm:$qz, timm:$qv)]>; + +def ESP_VEXT_U16: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qw), + "esp.vext.u16\t $qz, $qv, $qw", []> +{ + bits<3> qw; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VEXT_U16_P : PseudoESP32P4<(outs), (ins imm8:$qw, imm8:$qz, imm8:$qv), + "!esp_vext_u16_p $qz, $qv, $qw", + [(int_riscv_esp_vext_u16 timm:$qw, timm:$qz, timm:$qv)]>; + +def ESP_VEXT_U8: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qw), + "esp.vext.u8\t $qz, $qv, $qw", []> +{ + bits<3> qw; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VEXT_U8_P : PseudoESP32P4<(outs), (ins imm8:$qw, imm8:$qz, imm8:$qv), + "!esp_vext_u8_p $qz, $qv, $qw", + [(int_riscv_esp_vext_u8 timm:$qw, timm:$qz, timm:$qv)]>; + +def ESP_VUNZIP_16: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vunzip.16\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VUNZIP_16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vunzip_16_p $qx, $qy", + [(int_riscv_esp_vunzip_16 timm:$qx, timm:$qy)]>; + +def ESP_VUNZIP_32: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vunzip.32\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VUNZIP_32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vunzip_32_p $qx, $qy", + [(int_riscv_esp_vunzip_32 timm:$qx, timm:$qy)]>; + +def ESP_VUNZIP_8: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vunzip.8\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VUNZIP_8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vunzip_8_p $qx, $qy", + [(int_riscv_esp_vunzip_8 timm:$qx, timm:$qy)]>; + +def ESP_VUNZIPT_16: Esp32P4Inst<(outs QR:$qxr, QR:$qyr, QR:$qwr), (ins QR:$qx, QR:$qy, QR:$qw), + "esp.vunzipt.16\t $qx, $qy, $qw", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<3> qxr; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy, $qwr = $qw"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VUNZIPT_16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw), + "!esp_vunzipt_16_p $qx, $qy, $qw", + [(int_riscv_esp_vunzipt_16 timm:$qx, timm:$qy, timm:$qw)]>; + +def ESP_VUNZIPT_8: Esp32P4Inst<(outs QR:$qxr, QR:$qyr, QR:$qwr), (ins QR:$qx, QR:$qy, QR:$qw), + "esp.vunzipt.8\t $qx, $qy, $qw", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<3> qxr; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy, $qwr = $qw"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VUNZIPT_8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw), + "!esp_vunzipt_8_p $qx, $qy, $qw", + [(int_riscv_esp_vunzipt_8 timm:$qx, timm:$qy, timm:$qw)]>; + +def ESP_VZIP_16: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vzip.16\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VZIP_16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vzip_16_p $qx, $qy", + [(int_riscv_esp_vzip_16 timm:$qx, timm:$qy)]>; + +def ESP_VZIP_32: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vzip.32\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VZIP_32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vzip_32_p $qx, $qy", + [(int_riscv_esp_vzip_32 timm:$qx, timm:$qy)]>; + +def ESP_VZIP_8: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vzip.8\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VZIP_8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vzip_8_p $qx, $qy", + [(int_riscv_esp_vzip_8 timm:$qx, timm:$qy)]>; + +def ESP_VZIPT_16: Esp32P4Inst<(outs QR:$qxr, QR:$qyr, QR:$qwr), (ins QR:$qx, QR:$qy, QR:$qw), + "esp.vzipt.16\t $qx, $qy, $qw", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<3> qxr; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy, $qwr = $qw"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VZIPT_16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw), + "!esp_vzipt_16_p $qx, $qy, $qw", + [(int_riscv_esp_vzipt_16 timm:$qx, timm:$qy, timm:$qw)]>; + +def ESP_VZIPT_8: Esp32P4Inst<(outs QR:$qxr, QR:$qyr, QR:$qwr), (ins QR:$qx, QR:$qy, QR:$qw), + "esp.vzipt.8\t $qx, $qy, $qw", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<3> qxr; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy, $qwr = $qw"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VZIPT_8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw), + "!esp_vzipt_8_p $qx, $qy, $qw", + [(int_riscv_esp_vzipt_8 timm:$qx, timm:$qy, timm:$qw)]>; + +def ESP_ZERO_Q: Esp32P4Inst<(outs QR:$qz), (ins), + "esp.zero.q\t $qz", []> +{ + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ZERO_Q_P : PseudoESP32P4<(outs), (ins imm8:$qz), + "!esp_zero_q_p $qz", + [(int_riscv_esp_zero_q timm:$qz)]>; + +def ESP_ZERO_QACC: Esp32P4Inst<(outs), (ins), + "esp.zero.qacc\t", []> +{ + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ZERO_QACC_P : PseudoESP32P4<(outs), (ins), + "!esp_zero_qacc_p", + [(int_riscv_esp_zero_qacc)]>; + +def ESP_ZERO_XACC: Esp32P4Inst<(outs), (ins), + "esp.zero.xacc\t", []> +{ + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ZERO_XACC_P : PseudoESP32P4<(outs), (ins), + "!esp_zero_xacc_p", + [(int_riscv_esp_zero_xacc)]>; + +def ESP_FFT_AMS_S16_LD_INCP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qw, GPRPIE:$rs1, select_2:$sel2), + "esp.fft.ams.s16.ld.incp\t $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + bits<3> qz; + bits<3> qv; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = sel2{0}; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_AMS_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), + "!esp_fft_ams_s16_ld_incp_p $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", + [(int_riscv_esp_fft_ams_s16_ld_incp timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv)]>; + +def ESP_FFT_AMS_S16_LD_INCP_UAUP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qw, GPRPIE:$rs1, select_2:$sel2), + "esp.fft.ams.s16.ld.incp.uaup\t $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + bits<3> qz; + bits<3> qv; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = sel2{0}; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_AMS_S16_LD_INCP_UAUP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), + "!esp_fft_ams_s16_ld_incp_uaup_p $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", + [(int_riscv_esp_fft_ams_s16_ld_incp_uaup timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv)]>; + +def ESP_FFT_AMS_S16_LD_R32_DECP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qw, GPRPIE:$rs1, select_2:$sel2), + "esp.fft.ams.s16.ld.r32.decp\t $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + bits<3> qz; + bits<3> qv; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = sel2{0}; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_AMS_S16_LD_R32_DECP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), + "!esp_fft_ams_s16_ld_r32_decp_p $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", + [(int_riscv_esp_fft_ams_s16_ld_r32_decp timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv)]>; + +def ESP_FFT_AMS_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r, GPRPIE:$rs2r), (ins QR:$qx, QR:$qy, QR:$qw, QR:$qu, GPRPIE:$rs1, GPRPIE:$rs2, select_2:$sel2), + "esp.fft.ams.s16.st.incp\t $qu, $qz, $rs2, $rs1, $qx, $qw, $qy, $sel2", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<3> qu; + bits<5> rs1; + bits<5> rs2; + bits<1> sel2; + bits<3> qz; + bits<5> rs1r; + bits<5> rs2r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1, $rs2r = $rs2"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = sel2{0}; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_AMS_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, imm8:$qu, GPRPIE:$rs1, GPRPIE:$rs2, select_2:$sel2, imm8:$qz), + "!esp_fft_ams_s16_st_incp_p $qu, $qz, $rs2, $rs1, $qx, $qw, $qy, $sel2", + [(int_riscv_esp_fft_ams_s16_st_incp timm:$qx, timm:$qy, timm:$qw, timm:$qu, GPRPIE:$rs1, GPRPIE:$rs2, timm:$sel2, timm:$qz)]>; + +def ESP_FFT_BITREV: Esp32P4Inst<(outs GPRPIE:$rs1r, QR:$qvr), (ins GPRPIE:$rs1, QR:$qv), + "esp.fft.bitrev\t $qv, $rs1", []> +{ + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + bits<3> qvr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1, $qvr = $qv"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_BITREV_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qv), + "!esp_fft_bitrev_p $qv, $rs1", + [(int_riscv_esp_fft_bitrev GPRPIE:$rs1, timm:$qv)]>; + +def ESP_FFT_CMUL_S16_LD_XP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1, select_8:$sel8), + "esp.fft.cmul.s16.ld.xp\t $qu, $rs1, $rs2, $qz, $qy, $qx, $sel8", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> sel8; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = sel8{2-1}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = sel8{0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_CMUL_S16_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_8:$sel8, imm8:$qz, imm8:$qu), + "!esp_fft_cmul_s16_ld_xp_p $qu, $rs1, $rs2, $qz, $qy, $qx, $sel8", + [(int_riscv_esp_fft_cmul_s16_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel8, timm:$qz, timm:$qu)]>; + +def ESP_FFT_CMUL_S16_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4, select_4:$upd4, select_8:$sel8), + "esp.fft.cmul.s16.st.xp\t $qy, $qx, $qu, $rs1, $rs2, $sel8, $upd4, $sel4", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<2> sel4; + bits<2> upd4; + bits<3> sel8; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = sel4{1-0}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = upd4{1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = upd4{0}; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = sel8{2-0}; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_CMUL_S16_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, select_4:$upd4, select_8:$sel8), + "!esp_fft_cmul_s16_st_xp_p $qy, $qx, $qu, $rs1, $rs2, $sel8, $upd4, $sel4", + [(int_riscv_esp_fft_cmul_s16_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$upd4, timm:$sel8)]>; + +def ESP_FFT_R2BF_S16: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qx, QR:$qy, select_2:$sel2), + "esp.fft.r2bf.s16\t $qz, $qv, $qx, $qy, $sel2", []> +{ + bits<3> qx; + bits<3> qy; + bits<1> sel2; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = sel2{0}; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_R2BF_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_2:$sel2, imm8:$qz, imm8:$qv), + "!esp_fft_r2bf_s16_p $qz, $qv, $qx, $qy, $sel2", + [(int_riscv_esp_fft_r2bf_s16 timm:$qx, timm:$qy, timm:$sel2, timm:$qz, timm:$qv)]>; + +def ESP_FFT_R2BF_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_4:$sel4), + "esp.fft.r2bf.s16.st.incp\t $qz, $qx, $qy, $rs1, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-22} = sel4{1-0}; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_R2BF_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), + "!esp_fft_r2bf_s16_st_incp_p $qz, $qx, $qy, $rs1, $sel4", + [(int_riscv_esp_fft_r2bf_s16_st_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + +def ESP_FFT_VST_R32_DECP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, select_2:$sel2), + "esp.fft.vst.r32.decp\t $qu, $rs1, $sel2", []> +{ + bits<3> qu; + bits<5> rs1; + bits<1> sel2; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = sel2{0}; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_VST_R32_DECP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, select_2:$sel2), + "!esp_fft_vst_r32_decp_p $qu, $rs1, $sel2", + [(int_riscv_esp_fft_vst_r32_decp timm:$qu, GPRPIE:$rs1, timm:$sel2)]>; + +def ESP_LD_128_USAR_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.128.usar.ip\t $qu, $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = off25616{7-5}; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off25616{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_128_USAR_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616, imm8:$qu), + "!esp_ld_128_usar_ip_p $qu, $rs1, $off25616", + [(int_riscv_esp_ld_128_usar_ip GPRPIE:$rs1, timm:$off25616, timm:$qu)]>; + +def ESP_LD_128_USAR_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.ld.128.usar.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_128_USAR_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_ld_128_usar_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_ld_128_usar_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_LD_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "esp.ld.xacc.ip\t $rs1, $off2568", []> +{ + bits<5> rs1; + bits<8> off2568; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = off2568{7}; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{6-3}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = off2568{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_XACC_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "!esp_ld_xacc_ip_p $rs1, $off2568", + [(int_riscv_esp_ld_xacc_ip GPRPIE:$rs1, timm:$off2568)]>; + +def ESP_LDQA_S16_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ldqa.s16.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_S16_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ldqa_s16_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ldqa_s16_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LDQA_S16_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.ldqa.s16.128.xp\t $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_S16_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "!esp_ldqa_s16_128_xp_p $rs1, $rs2", + [(int_riscv_esp_ldqa_s16_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + +def ESP_LDQA_S8_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ldqa.s8.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_S8_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ldqa_s8_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ldqa_s8_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LDQA_S8_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.ldqa.s8.128.xp\t $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_S8_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "!esp_ldqa_s8_128_xp_p $rs1, $rs2", + [(int_riscv_esp_ldqa_s8_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + +def ESP_LDQA_U16_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ldqa.u16.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_U16_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ldqa_u16_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ldqa_u16_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LDQA_U16_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.ldqa.u16.128.xp\t $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_U16_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "!esp_ldqa_u16_128_xp_p $rs1, $rs2", + [(int_riscv_esp_ldqa_u16_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + +def ESP_LDQA_U8_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ldqa.u8.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_U8_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ldqa_u8_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ldqa_u8_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LDQA_U8_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.ldqa.u8.128.xp\t $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_U8_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "!esp_ldqa_u8_128_xp_p $rs1, $rs2", + [(int_riscv_esp_ldqa_u8_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + +def ESP_VLDBC_16_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564), + "esp.vldbc.16.ip\t $qu, $rs1, $off2564", []> +{ + bits<5> rs1; + bits<8> off2564; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30-28} = off2564{7-5}; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-20} = off2564{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2564{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_16_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), + "!esp_vldbc_16_ip_p $qu, $rs1, $off2564", + [(int_riscv_esp_vldbc_16_ip GPRPIE:$rs1, timm:$off2564, timm:$qu)]>; + +def ESP_VLDBC_16_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldbc.16.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_16_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vldbc_16_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vldbc_16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VLDBC_32_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564), + "esp.vldbc.32.ip\t $qu, $rs1, $off2564", []> +{ + bits<5> rs1; + bits<8> off2564; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = off2564{7-5}; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-20} = off2564{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2564{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_32_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), + "!esp_vldbc_32_ip_p $qu, $rs1, $off2564", + [(int_riscv_esp_vldbc_32_ip GPRPIE:$rs1, timm:$off2564, timm:$qu)]>; + +def ESP_VLDBC_32_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldbc.32.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_32_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vldbc_32_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vldbc_32_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VLDBC_8_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564), + "esp.vldbc.8.ip\t $qu, $rs1, $off2564", []> +{ + bits<5> rs1; + bits<8> off2564; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30-28} = off2564{7-5}; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-20} = off2564{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2564{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_8_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), + "!esp_vldbc_8_ip_p $qu, $rs1, $off2564", + [(int_riscv_esp_vldbc_8_ip GPRPIE:$rs1, timm:$off2564, timm:$qu)]>; + +def ESP_VLDBC_8_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldbc.8.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_8_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vldbc_8_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vldbc_8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VLDEXT_S16_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vldext.s16.ip\t $qu, $qz, $rs1, $off1616", []> +{ + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off1616{3-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_S16_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), + "!esp_vldext_s16_ip_p $qu, $qz, $rs1, $off1616", + [(int_riscv_esp_vldext_s16_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_S16_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldext.s16.xp\t $qu, $qz, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_S16_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), + "!esp_vldext_s16_xp_p $qu, $qz, $rs1, $rs2", + [(int_riscv_esp_vldext_s16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_S8_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vldext.s8.ip\t $qu, $qz, $rs1, $off1616", []> +{ + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off1616{3-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_S8_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), + "!esp_vldext_s8_ip_p $qu, $qz, $rs1, $off1616", + [(int_riscv_esp_vldext_s8_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_S8_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldext.s8.xp\t $qu, $qz, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_S8_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), + "!esp_vldext_s8_xp_p $qu, $qz, $rs1, $rs2", + [(int_riscv_esp_vldext_s8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_U16_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vldext.u16.ip\t $qu, $qz, $rs1, $off1616", []> +{ + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off1616{3-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_U16_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), + "!esp_vldext_u16_ip_p $qu, $qz, $rs1, $off1616", + [(int_riscv_esp_vldext_u16_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_U16_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldext.u16.xp\t $qu, $qz, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_U16_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), + "!esp_vldext_u16_xp_p $qu, $qz, $rs1, $rs2", + [(int_riscv_esp_vldext_u16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_U8_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vldext.u8.ip\t $qu, $qz, $rs1, $off1616", []> +{ + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off1616{3-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_U8_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), + "!esp_vldext_u8_ip_p $qu, $qz, $rs1, $off1616", + [(int_riscv_esp_vldext_u8_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_U8_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldext.u8.xp\t $qu, $qz, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_U8_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), + "!esp_vldext_u8_xp_p $qu, $qz, $rs1, $rs2", + [(int_riscv_esp_vldext_u8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + +def ESP_VLDHBC_16_INCP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1), + "esp.vldhbc.16.incp\t $qu, $qz, $rs1", []> +{ + bits<5> rs1; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDHBC_16_INCP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qu, imm8:$qz), + "!esp_vldhbc_16_incp_p $qu, $qz, $rs1", + [(int_riscv_esp_vldhbc_16_incp GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + +def ESP_LD_QACC_H_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.qacc.h.h.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_QACC_H_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ld_qacc_h_h_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ld_qacc_h_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LD_QACC_H_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.qacc.h.l.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_QACC_H_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ld_qacc_h_l_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ld_qacc_h_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LD_QACC_L_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.qacc.l.h.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_QACC_L_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ld_qacc_l_h_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ld_qacc_l_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LD_QACC_L_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.qacc.l.l.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_QACC_L_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ld_qacc_l_l_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ld_qacc_l_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LD_UA_STATE_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.ua.state.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = off25616{7}; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{6-3}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = off25616{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_UA_STATE_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ld_ua_state_ip_p $rs1, $off25616", + [(int_riscv_esp_ld_ua_state_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LDXQ_32: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, QR:$qw, select_4:$sel4, select_8:$sel8), + "esp.ldxq.32\t $qu, $qw, $rs1, $sel4, $sel8", []> +{ + bits<5> rs1; + bits<3> qw; + bits<2> sel4; + bits<3> sel8; + bits<3> qu; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30-29} = sel4{1-0}; + let Inst{28-26} = sel8{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDXQ_32_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qw, select_4:$sel4, select_8:$sel8, imm8:$qu), + "!esp_ldxq_32_p $qu, $qw, $rs1, $sel4, $sel8", + [(int_riscv_esp_ldxq_32 GPRPIE:$rs1, timm:$qw, timm:$sel4, timm:$sel8, timm:$qu)]>; + +def ESP_ST_QACC_H_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.st.qacc.h.h.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_QACC_H_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_st_qacc_h_h_128_ip_p $rs1, $off25616", + [(int_riscv_esp_st_qacc_h_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_ST_QACC_H_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.st.qacc.h.l.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_QACC_H_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_st_qacc_h_l_128_ip_p $rs1, $off25616", + [(int_riscv_esp_st_qacc_h_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_ST_QACC_L_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.st.qacc.l.h.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_QACC_L_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_st_qacc_l_h_128_ip_p $rs1, $off25616", + [(int_riscv_esp_st_qacc_l_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_ST_QACC_L_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.st.qacc.l.l.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_QACC_L_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_st_qacc_l_l_128_ip_p $rs1, $off25616", + [(int_riscv_esp_st_qacc_l_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_ST_UA_STATE_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.st.ua.state.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = off25616{7}; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{6-3}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = off25616{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_UA_STATE_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_st_ua_state_ip_p $rs1, $off25616", + [(int_riscv_esp_st_ua_state_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_STXQ_32: Esp32P4Inst<(outs), (ins GPRPIE:$rs1, QR:$qw, QR:$qu, select_4:$sel4, select_8:$sel8), + "esp.stxq.32\t $qu, $qw, $rs1, $sel4, $sel8", []> +{ + bits<5> rs1; + bits<3> qw; + bits<3> qu; + bits<2> sel4; + bits<3> sel8; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30-29} = sel4{1-0}; + let Inst{28-26} = sel8{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_STXQ_32_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qw, imm8:$qu, select_4:$sel4, select_8:$sel8), + "!esp_stxq_32_p $qu, $qw, $rs1, $sel4, $sel8", + [(int_riscv_esp_stxq_32 GPRPIE:$rs1, timm:$qw, timm:$qu, timm:$sel4, timm:$sel8)]>; + +def ESP_VLD_128_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.vld.128.ip\t $qu, $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30-28} = off25616{7-5}; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-20} = off25616{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off25616{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616, imm8:$qu), + "!esp_vld_128_ip_p $qu, $rs1, $off25616", + [(int_riscv_esp_vld_128_ip GPRPIE:$rs1, timm:$off25616, timm:$qu)]>; + +def ESP_VLD_128_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vld.128.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vld_128_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vld_128_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VLD_H_64_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "esp.vld.h.64.ip\t $qu, $rs1, $off2568", []> +{ + bits<5> rs1; + bits<8> off2568; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29-27} = off2568{7-5}; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2568{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_H_64_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568, imm8:$qu), + "!esp_vld_h_64_ip_p $qu, $rs1, $off2568", + [(int_riscv_esp_vld_h_64_ip GPRPIE:$rs1, timm:$off2568, timm:$qu)]>; + +def ESP_VLD_H_64_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vld.h.64.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_H_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vld_h_64_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vld_h_64_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VLD_L_64_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "esp.vld.l.64.ip\t $qu, $rs1, $off2568", []> +{ + bits<5> rs1; + bits<8> off2568; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29-27} = off2568{7-5}; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2568{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_L_64_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568, imm8:$qu), + "!esp_vld_l_64_ip_p $qu, $rs1, $off2568", + [(int_riscv_esp_vld_l_64_ip GPRPIE:$rs1, timm:$off2568, timm:$qu)]>; + +def ESP_VLD_L_64_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vld.l.64.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_L_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vld_l_64_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vld_l_64_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VST_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, offset_256_16:$off25616), + "esp.vst.128.ip\t $qu, $rs1, $off25616", []> +{ + bits<3> qu; + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30-28} = off25616{7-5}; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-20} = off25616{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off25616{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_128_IP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_vst_128_ip_p $qu, $rs1, $off25616", + [(int_riscv_esp_vst_128_ip timm:$qu, GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_VST_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, GPRPIE:$rs1), + "esp.vst.128.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), + "!esp_vst_128_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vst_128_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VST_H_64_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, offset_256_8:$off2568), + "esp.vst.h.64.ip\t $qu, $rs1, $off2568", []> +{ + bits<3> qu; + bits<5> rs1; + bits<8> off2568; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29-27} = off2568{7-5}; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2568{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_H_64_IP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, offset_256_8:$off2568), + "!esp_vst_h_64_ip_p $qu, $rs1, $off2568", + [(int_riscv_esp_vst_h_64_ip timm:$qu, GPRPIE:$rs1, timm:$off2568)]>; + +def ESP_VST_H_64_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, GPRPIE:$rs1), + "esp.vst.h.64.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_H_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), + "!esp_vst_h_64_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vst_h_64_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VST_L_64_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, offset_256_8:$off2568), + "esp.vst.l.64.ip\t $qu, $rs1, $off2568", []> +{ + bits<3> qu; + bits<5> rs1; + bits<8> off2568; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29-27} = off2568{7-5}; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2568{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_L_64_IP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, offset_256_8:$off2568), + "!esp_vst_l_64_ip_p $qu, $rs1, $off2568", + [(int_riscv_esp_vst_l_64_ip timm:$qu, GPRPIE:$rs1, timm:$off2568)]>; + +def ESP_VST_L_64_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, GPRPIE:$rs1), + "esp.vst.l.64.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_L_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), + "!esp_vst_l_64_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vst_l_64_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_SLCI_2Q: Esp32P4Inst<(outs QR:$qyr, QR:$qwr), (ins QR:$qy, QR:$qw, select_16:$sel16), + "esp.slci.2q\t $qy, $qw, $sel16", []> +{ + bits<3> qy; + bits<3> qw; + bits<4> sel16; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy, $qwr = $qw"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = sel16{3}; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = sel16{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SLCI_2Q_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, select_16:$sel16), + "!esp_slci_2q_p $qy, $qw, $sel16", + [(int_riscv_esp_slci_2q timm:$qy, timm:$qw, timm:$sel16)]>; + +def ESP_SLCXXP_2Q: Esp32P4Inst<(outs QR:$qyr, QR:$qwr), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qy, QR:$qw), + "esp.slcxxp.2q\t $qy, $qw, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qy; + bits<3> qw; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy, $qwr = $qw"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SLCXXP_2Q_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qy, imm8:$qw), + "!esp_slcxxp_2q_p $qy, $qw, $rs1, $rs2", + [(int_riscv_esp_slcxxp_2q GPRPIE:$rs1, GPRPIE:$rs2, timm:$qy, timm:$qw)]>; + +def ESP_SRC_Q: Esp32P4Inst<(outs QR:$qz), (ins QR:$qy, QR:$qw), + "esp.src.q\t $qz, $qw, $qy", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRC_Q_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qz), + "!esp_src_q_p $qz, $qw, $qy", + [(int_riscv_esp_src_q timm:$qy, timm:$qw, timm:$qz)]>; + +def ESP_SRC_Q_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r, QR:$qwr), (ins QR:$qy, GPRPIE:$rs1, QR:$qw, offset_256_16:$off25616), + "esp.src.q.ld.ip\t $qu, $rs1, $off25616, $qw, $qy", []> +{ + bits<3> qy; + bits<5> rs1; + bits<3> qw; + bits<8> off25616; + bits<3> qu; + bits<5> rs1r; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1, $qwr = $qw"; + + let Inst{31-29} = off25616{7-5}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23-20} = off25616{4-1}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off25616{0}; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRC_Q_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qy, GPRPIE:$rs1, imm8:$qw, offset_256_16:$off25616, imm8:$qu), + "!esp_src_q_ld_ip_p $qu, $rs1, $off25616, $qw, $qy", + [(int_riscv_esp_src_q_ld_ip timm:$qy, GPRPIE:$rs1, timm:$qw, timm:$off25616, timm:$qu)]>; + +def ESP_SRC_Q_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r, QR:$qwr), (ins GPRPIE:$rs2, QR:$qy, GPRPIE:$rs1, QR:$qw), + "esp.src.q.ld.xp\t $qu, $rs1, $rs2, $qw, $qy", []> +{ + bits<5> rs2; + bits<3> qy; + bits<5> rs1; + bits<3> qw; + bits<3> qu; + bits<5> rs1r; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1, $qwr = $qw"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRC_Q_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qy, GPRPIE:$rs1, imm8:$qw, imm8:$qu), + "!esp_src_q_ld_xp_p $qu, $rs1, $rs2, $qw, $qy", + [(int_riscv_esp_src_q_ld_xp GPRPIE:$rs2, timm:$qy, GPRPIE:$rs1, timm:$qw, timm:$qu)]>; + +def ESP_SRC_Q_QUP: Esp32P4Inst<(outs QR:$qz, QR:$qwr), (ins QR:$qy, QR:$qw), + "esp.src.q.qup\t $qz, $qw, $qy", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qz; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qwr = $qw"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRC_Q_QUP_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qz), + "!esp_src_q_qup_p $qz, $qw, $qy", + [(int_riscv_esp_src_q_qup timm:$qy, timm:$qw, timm:$qz)]>; + +def ESP_SRCI_2Q: Esp32P4Inst<(outs QR:$qyr, QR:$qwr), (ins QR:$qy, QR:$qw, select_16:$sel16), + "esp.srci.2q\t $qy, $qw, $sel16", []> +{ + bits<3> qy; + bits<3> qw; + bits<4> sel16; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy, $qwr = $qw"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = sel16{3}; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = sel16{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCI_2Q_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, select_16:$sel16), + "!esp_srci_2q_p $qy, $qw, $sel16", + [(int_riscv_esp_srci_2q timm:$qy, timm:$qw, timm:$sel16)]>; + +def ESP_SRCMB_S16_Q_QACC: Esp32P4Inst<(outs QR:$qu), (ins QR:$qw, select_2:$sel2), + "esp.srcmb.s16.q.qacc\t $qu, $qw, $sel2", []> +{ + bits<3> qw; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = sel2{0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_S16_Q_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qw, select_2:$sel2, imm8:$qu), + "!esp_srcmb_s16_q_qacc_p $qu, $qw, $sel2", + [(int_riscv_esp_srcmb_s16_q_qacc timm:$qw, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_S16_QACC: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, select_2:$sel2), + "esp.srcmb.s16.qacc\t $qu, $rs1, $sel2", []> +{ + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = sel2{0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_S16_QACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_2:$sel2, imm8:$qu), + "!esp_srcmb_s16_qacc_p $qu, $rs1, $sel2", + [(int_riscv_esp_srcmb_s16_qacc GPRPIE:$rs1, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_S8_Q_QACC: Esp32P4Inst<(outs QR:$qu), (ins QR:$qw, select_2:$sel2), + "esp.srcmb.s8.q.qacc\t $qu, $qw, $sel2", []> +{ + bits<3> qw; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = sel2{0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_S8_Q_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qw, select_2:$sel2, imm8:$qu), + "!esp_srcmb_s8_q_qacc_p $qu, $qw, $sel2", + [(int_riscv_esp_srcmb_s8_q_qacc timm:$qw, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_S8_QACC: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, select_2:$sel2), + "esp.srcmb.s8.qacc\t $qu, $rs1, $sel2", []> +{ + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = sel2{0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_S8_QACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_2:$sel2, imm8:$qu), + "!esp_srcmb_s8_qacc_p $qu, $rs1, $sel2", + [(int_riscv_esp_srcmb_s8_qacc GPRPIE:$rs1, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_U16_Q_QACC: Esp32P4Inst<(outs QR:$qu), (ins QR:$qw, select_2:$sel2), + "esp.srcmb.u16.q.qacc\t $qu, $qw, $sel2", []> +{ + bits<3> qw; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = sel2{0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_U16_Q_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qw, select_2:$sel2, imm8:$qu), + "!esp_srcmb_u16_q_qacc_p $qu, $qw, $sel2", + [(int_riscv_esp_srcmb_u16_q_qacc timm:$qw, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_U16_QACC: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, select_2:$sel2), + "esp.srcmb.u16.qacc\t $qu, $rs1, $sel2", []> +{ + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = sel2{0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_U16_QACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_2:$sel2, imm8:$qu), + "!esp_srcmb_u16_qacc_p $qu, $rs1, $sel2", + [(int_riscv_esp_srcmb_u16_qacc GPRPIE:$rs1, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_U8_Q_QACC: Esp32P4Inst<(outs QR:$qu), (ins QR:$qw, select_2:$sel2), + "esp.srcmb.u8.q.qacc\t $qu, $qw, $sel2", []> +{ + bits<3> qw; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = sel2{0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_U8_Q_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qw, select_2:$sel2, imm8:$qu), + "!esp_srcmb_u8_q_qacc_p $qu, $qw, $sel2", + [(int_riscv_esp_srcmb_u8_q_qacc timm:$qw, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_U8_QACC: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, select_2:$sel2), + "esp.srcmb.u8.qacc\t $qu, $rs1, $sel2", []> +{ + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = sel2{0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_U8_QACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_2:$sel2, imm8:$qu), + "!esp_srcmb_u8_qacc_p $qu, $rs1, $sel2", + [(int_riscv_esp_srcmb_u8_qacc GPRPIE:$rs1, timm:$sel2, timm:$qu)]>; + +def ESP_SRCQ_128_ST_INCP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qy, QR:$qw, GPRPIE:$rs1), + "esp.srcq.128.st.incp\t $qw, $qy, $rs1", []> +{ + bits<3> qy; + bits<3> qw; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCQ_128_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, GPRPIE:$rs1), + "!esp_srcq_128_st_incp_p $qw, $qy, $rs1", + [(int_riscv_esp_srcq_128_st_incp timm:$qy, timm:$qw, GPRPIE:$rs1)]>; + +def ESP_SRCXXP_2Q: Esp32P4Inst<(outs QR:$qyr, QR:$qwr), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qy, QR:$qw), + "esp.srcxxp.2q\t $qy, $qw, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qy; + bits<3> qw; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy, $qwr = $qw"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCXXP_2Q_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qy, imm8:$qw), + "!esp_srcxxp_2q_p $qy, $qw, $rs1, $rs2", + [(int_riscv_esp_srcxxp_2q GPRPIE:$rs1, GPRPIE:$rs2, timm:$qy, timm:$qw)]>; + +def ESP_SRS_S_XACC: Esp32P4Inst<(outs GPRPIE:$rd), (ins GPRPIE:$rs1), + "esp.srs.s.xacc\t $rd, $rs1", []> +{ + bits<5> rs1; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRS_S_XACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rd), + "!esp_srs_s_xacc_p $rd, $rs1", + [(int_riscv_esp_srs_s_xacc GPRPIE:$rs1, GPRPIE:$rd)]>; + +def ESP_SRS_U_XACC: Esp32P4Inst<(outs GPRPIE:$rd), (ins GPRPIE:$rs1), + "esp.srs.u.xacc\t $rd, $rs1", []> +{ + bits<5> rs1; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRS_U_XACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rd), + "!esp_srs_u_xacc_p $rd, $rs1", + [(int_riscv_esp_srs_u_xacc GPRPIE:$rs1, GPRPIE:$rd)]>; + +def ESP_VSL_32: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy), + "esp.vsl.32\t $qu, $qy", []> +{ + bits<3> qy; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSL_32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qu), + "!esp_vsl_32_p $qu, $qy", + [(int_riscv_esp_vsl_32 timm:$qy, timm:$qu)]>; + +def ESP_VSLD_16: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsld.16\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSLD_16_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsld_16_p $qu, $qy, $qw", + [(int_riscv_esp_vsld_16 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_VSLD_32: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsld.32\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSLD_32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsld_32_p $qu, $qy, $qw", + [(int_riscv_esp_vsld_32 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_VSLD_8: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsld.8\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSLD_8_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsld_8_p $qu, $qy, $qw", + [(int_riscv_esp_vsld_8 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_VSR_S32: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy), + "esp.vsr.s32\t $qu, $qy", []> +{ + bits<3> qy; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSR_S32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qu), + "!esp_vsr_s32_p $qu, $qy", + [(int_riscv_esp_vsr_s32 timm:$qy, timm:$qu)]>; + +def ESP_VSR_U32: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy), + "esp.vsr.u32\t $qu, $qy", []> +{ + bits<3> qy; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSR_U32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qu), + "!esp_vsr_u32_p $qu, $qy", + [(int_riscv_esp_vsr_u32 timm:$qy, timm:$qu)]>; + +def ESP_VSRD_16: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsrd.16\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSRD_16_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsrd_16_p $qu, $qy, $qw", + [(int_riscv_esp_vsrd_16 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_VSRD_32: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsrd.32\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSRD_32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsrd_32_p $qu, $qy, $qw", + [(int_riscv_esp_vsrd_32 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_VSRD_8: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsrd.8\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSRD_8_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsrd_8_p $qu, $qy, $qw", + [(int_riscv_esp_vsrd_8 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_ST_S_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "esp.st.s.xacc.ip\t $rs1, $off2568", []> +{ + bits<5> rs1; + bits<8> off2568; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = off2568{7}; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{6-3}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = off2568{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_S_XACC_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "!esp_st_s_xacc_ip_p $rs1, $off2568", + [(int_riscv_esp_st_s_xacc_ip GPRPIE:$rs1, timm:$off2568)]>; + +def ESP_ST_U_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "esp.st.u.xacc.ip\t $rs1, $off2568", []> +{ + bits<5> rs1; + bits<8> off2568; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = off2568{7}; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{6-3}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = off2568{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_U_XACC_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "!esp_st_u_xacc_ip_p $rs1, $off2568", + [(int_riscv_esp_st_u_xacc_ip GPRPIE:$rs1, timm:$off2568)]>; + diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP4HWLP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP4HWLP.td new file mode 100644 index 0000000000000..95b4bbd5d58b3 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP4HWLP.td @@ -0,0 +1,172 @@ +//===- RISCVInstrInfoP4HWLP.td - RISCV Target Description -*- tablegen -*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the RISCV ESP32P4 DSP instructions in TableGen format. +// +// These definitions are generated +// This file is generated +// +//===----------------------------------------------------------------------===// + +def ESP_LP_SETUPI: Esp32P4Inst<(outs), (ins uimm1:$id, uimm12:$count, uimm10_step4:$offset), + "esp.lp.setupi\t $id, $count, $offset", []> +{ + bits<1> id; + bits<13> count; + bits<10> offset; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = count{11-0}; + let Inst{19-15} = offset{5-1}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11-8} = offset{9-6}; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +def ESP_LP_SETUP: Esp32P4Inst<(outs), (ins uimm1:$id, GPR:$rs1, uimm13_step4:$offset), + "esp.lp.setup\t $id, $rs1, $offset", []> +{ + bits<1> id; + bits<5> rs1; + bits<13> offset; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = offset{12-1}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-8} = 0xb0000; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +def ESP_LP_STARTI: Esp32P4Inst<(outs), (ins uimm1:$id, uimm13_step4:$offset), + "esp.lp.starti\t $id, $offset", []> +{ + bits<1> id; + bits<5> rs1; + bits<13> offset; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = offset{12-1}; + let Inst{19-15} = 0xb00000; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-8} = 0xb0000; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +def ESP_LP_ENDI: Esp32P4Inst<(outs), (ins uimm1:$id, uimm13_step4:$offset), + "esp.lp.endi\t $id, $offset", []> +{ + bits<1> id; + bits<5> rs1; + bits<13> offset; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = offset{12-1}; + let Inst{19-15} = 0xb00000; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11-8} = 0xb0000; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +def ESP_LP_COUNTI: Esp32P4Inst<(outs), (ins uimm1:$id, uimm12:$count), + "esp.lp.counti\t $id, $count", []> +{ + bits<1> id; + bits<5> rs1; + bits<12> count; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = count{11-0}; + let Inst{19-15} = 0xb00000; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11-8} = 0xb0000; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +def ESP_LP_COUNT: Esp32P4Inst<(outs), (ins uimm1:$id, GPR:$rs1), + "esp.lp.count\t $id, $rs1", []> +{ + bits<1> id; + bits<5> rs1; + bits<13> offset; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = 0xb000000000000; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11-8} = 0xb0000; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} \ No newline at end of file diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td index 6eed2ae01f646..a658384dba332 100644 --- a/llvm/lib/Target/RISCV/RISCVProcessors.td +++ b/llvm/lib/Target/RISCV/RISCVProcessors.td @@ -419,3 +419,17 @@ def SPACEMIT_X60 : RISCVProcessorModel<"spacemit-x60", FeatureStdExtZvkt, FeatureStdExtZvl256b]), [TuneDLenFactor2]>; + +def ESPRESSIF_ESP32P4 : RISCVProcessorModel<"esp32p4", + NoSchedModel, + [Feature32Bit, + FeatureVendorESP32P4, + FeatureStdExtZicsr, + FeatureStdExtZifencei, + FeatureStdExtM, + FeatureStdExtA, + FeatureStdExtF, + FeatureStdExtC, + FeatureStdExtZcb, + FeatureStdExtZcmp, + FeatureStdExtZcmt]>; diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td index b12634c24622f..9163f5ae6bc83 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -121,6 +121,24 @@ let RegAltNameIndices = [ABIRegAltName] in { } } +class QRReg num, string n, listalt = []> : Register{ + let Namespace = "RISCV"; + let HWEncoding{15-0} = num; + let AltNames = alt; +} +let RegAltNameIndices = [ABIRegAltName] in { + def Q0 : QRReg<0, "q0", ["q0"]>, DwarfRegNum<[0]>; + def Q1 : QRReg<1, "q1", ["q1"]>, DwarfRegNum<[1]>; + def Q2 : QRReg<2, "q2", ["q2"]>, DwarfRegNum<[2]>; + def Q3 : QRReg<3, "q3", ["q3"]>, DwarfRegNum<[3]>; + def Q4 : QRReg<4, "q4", ["q4"]>, DwarfRegNum<[4]>; + def Q5 : QRReg<5, "q5", ["q5"]>, DwarfRegNum<[5]>; + def Q6 : QRReg<6, "q6", ["q6"]>, DwarfRegNum<[6]>; + def Q7 : QRReg<7, "q7", ["q7"]>, DwarfRegNum<[7]>; +} + +def QR : RegisterClass<"RISCV", [v16i8, v4i32], 128, (sequence "Q%u", 0, 7)>; + def XLenVT : ValueTypeByHwMode<[RV32, RV64], [i32, i64]>; // Allow f64 in GPR for ZDINX on RV64. @@ -159,6 +177,9 @@ def GPR : GPRRegisterClass<(add (sequence "X%u", 10, 17), (sequence "X%u", 18, 27), (sequence "X%u", 0, 4))>; +def GPRPIE : GPRRegisterClass<(add (sequence "X%u", 8, 15), + (sequence "X%u", 24, 31))>; + def GPRX0 : GPRRegisterClass<(add X0)>; def GPRX1 : GPRRegisterClass<(add X1)>; def GPRX5 : GPRRegisterClass<(add X5)>; diff --git a/llvm/test/CodeGen/RISCV/esp32p4.ll b/llvm/test/CodeGen/RISCV/esp32p4.ll new file mode 100644 index 0000000000000..a30afb429845e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/esp32p4.ll @@ -0,0 +1,1289 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O1 -mtriple=riscv32 -mcpu=esp32p4 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=CHECK + +define void @test(){ +; CHECK-LABEL: test: +; CHECK: # %bb.0: +; CHECK-NEXT: cm.push {ra, s0-s11}, -64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: .cfi_offset s0, -48 +; CHECK-NEXT: .cfi_offset s1, -44 +; CHECK-NEXT: .cfi_offset s8, -16 +; CHECK-NEXT: .cfi_offset s9, -12 +; CHECK-NEXT: .cfi_offset s10, -8 +; CHECK-NEXT: .cfi_offset s11, -4 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q0, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q1, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q2, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q3, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q4, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q5, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q6, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q7, a0, 784 +; CHECK-NEXT: esp.vcmulas.s16.qacc.h q0, q4 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vcmulas.s16.qacc.h.ld.ip q1, a0, -48, q6, q1 +; CHECK-NEXT: li s9, 12 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vcmulas.s16.qacc.h.ld.xp q1, a0, s9, q2, q7 +; CHECK-NEXT: esp.vcmulas.s16.qacc.l q7, q6 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vcmulas.s16.qacc.l.ld.ip q7, a0, 48, q7, q0 +; CHECK-NEXT: li t4, 14 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vcmulas.s16.qacc.l.ld.xp q1, a0, t4, q2, q7 +; CHECK-NEXT: esp.vcmulas.s8.qacc.h q1, q1 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vcmulas.s8.qacc.h.ld.ip q4, a0, 32, q1, q6 +; CHECK-NEXT: li s11, 7 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vcmulas.s8.qacc.h.ld.xp q6, a0, s11, q3, q2 +; CHECK-NEXT: esp.vcmulas.s8.qacc.l q4, q5 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vcmulas.s8.qacc.l.ld.ip q4, a0, -48, q2, q5 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vcmulas.s8.qacc.l.ld.xp q7, a0, s11, q6, q3 +; CHECK-NEXT: esp.vmulas.s16.qacc q4, q2 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vmulas.s16.qacc.ld.ip q1, a0, 96, q5, q7 +; CHECK-NEXT: li t3, 3 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.s16.qacc.ld.xp q6, a0, t3, q4, q2 +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmulas.s16.qacc.st.ip q1, a0, 80, q7, q6 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vmulas.s16.qacc.st.xp q6, a0, a0, q0, q7 +; CHECK-NEXT: esp.vmulas.s16.xacc q3, q5 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vmulas.s16.xacc.ld.ip q5, a0, 96, q1, q7 +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vmulas.s16.xacc.ld.xp q0, a0, a2, q5, q5 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vmulas.s16.xacc.st.ip q2, a0, 16, q4, q6 +; CHECK-NEXT: li t6, 5 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vmulas.s16.xacc.st.xp q7, a0, t6, q7, q7 +; CHECK-NEXT: esp.vmulas.s8.qacc q6, q1 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.s8.qacc.ld.ip q2, a0, -128, q3, q5 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.s8.qacc.ld.xp q4, a0, t6, q0, q5 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vmulas.s8.qacc.st.ip q7, a0, 16, q6, q0 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vmulas.s8.qacc.st.xp q4, a0, s9, q6, q1 +; CHECK-NEXT: esp.vmulas.s8.xacc q3, q7 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vmulas.s8.xacc.ld.ip q7, a0, -16, q4, q5 +; CHECK-NEXT: li t5, 10 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vmulas.s8.xacc.ld.xp q1, a0, t5, q7, q0 +; CHECK-NEXT: li a1, 2 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.s8.xacc.st.ip q6, a0, -128, q6, q1 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vmulas.s8.xacc.st.xp q5, a0, a1, q4, q1 +; CHECK-NEXT: esp.vmulas.u16.qacc q6, q1 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.u16.qacc.ld.ip q7, a0, -32, q0, q0 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vmulas.u16.qacc.ld.xp q2, a0, s11, q6, q7 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.u16.qacc.st.ip q4, a0, 16, q6, q5 +; CHECK-NEXT: li s0, 9 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vmulas.u16.qacc.st.xp q4, a0, s0, q3, q7 +; CHECK-NEXT: esp.vmulas.u16.xacc q6, q1 +; CHECK-NEXT: li a4, 6 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vmulas.u16.xacc.ld.ip q2, a0, -48, q2, q2 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmulas.u16.xacc.ld.xp q7, a0, a4, q3, q0 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vmulas.u16.xacc.st.ip q0, a0, 96, q1, q4 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vmulas.u16.xacc.st.xp q6, a0, t6, q3, q7 +; CHECK-NEXT: esp.vmulas.u8.qacc q7, q1 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vmulas.u8.qacc.ld.ip q7, a0, -48, q7, q4 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vmulas.u8.qacc.ld.xp q4, a0, s9, q6, q7 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vmulas.u8.qacc.st.ip q2, a0, 0, q1, q7 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vmulas.u8.qacc.st.xp q4, a0, a0, q0, q0 +; CHECK-NEXT: esp.vmulas.u8.xacc q6, q4 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vmulas.u8.xacc.ld.ip q3, a0, -80, q6, q2 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vmulas.u8.xacc.ld.xp q4, a0, a0, q5, q1 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vmulas.u8.xacc.st.ip q7, a0, -128, q2, q3 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vmulas.u8.xacc.st.xp q2, a0, a4, q7, q2 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vmulas.s16.qacc.ldbc.incp q0, a0, q0, q2 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmulas.s8.qacc.ldbc.incp q5, a0, q2, q6 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.u16.qacc.ldbc.incp q5, a0, q7, q3 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vmulas.u8.qacc.ldbc.incp q3, a0, q4, q4 +; CHECK-NEXT: esp.vsmulas.s16.qacc q1, q5, 14 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vsmulas.s16.qacc.ld.incp q0, a0, q7, q4, 0 +; CHECK-NEXT: esp.vsmulas.s8.qacc q3, q5, 0 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vsmulas.s8.qacc.ld.incp q1, a0, q1, q4, 6 +; CHECK-NEXT: esp.vsmulas.u16.qacc q6, q5, 15 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vsmulas.u16.qacc.ld.incp q7, a0, q7, q1, 10 +; CHECK-NEXT: esp.vsmulas.u8.qacc q0, q7, 2 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vsmulas.u8.qacc.ld.incp q4, a0, q3, q7, 8 +; CHECK-NEXT: esp.cmul.s16 q6, q0, q7, 3 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.cmul.s16.ld.incp q5, a0, q3, q0, q3, 0 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.cmul.s16.st.incp q5, a0, q0, q4, q5, 2 +; CHECK-NEXT: esp.cmul.s8 q1, q1, q0, 3 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.cmul.s8.ld.incp q4, a0, q7, q5, q4, 1 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.cmul.s8.st.incp q5, a0, q0, q6, q0, 3 +; CHECK-NEXT: esp.cmul.u16 q7, q7, q5, 2 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.cmul.u16.ld.incp q0, a0, q0, q0, q1, 1 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.cmul.u16.st.incp q2, a0, q4, q1, q4, 2 +; CHECK-NEXT: esp.cmul.u8 q3, q7, q5, 0 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.cmul.u8.ld.incp q4, a0, q0, q0, q2, 0 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.cmul.u8.st.incp q1, a0, q4, q6, q3, 2 +; CHECK-NEXT: esp.max.s16.a q6, a0 +; CHECK-NEXT: esp.max.s32.a q2, a0 +; CHECK-NEXT: esp.max.s8.a q0, a0 +; CHECK-NEXT: esp.max.u16.a q6, a0 +; CHECK-NEXT: esp.max.u32.a q6, a0 +; CHECK-NEXT: esp.max.u8.a q1, a0 +; CHECK-NEXT: esp.min.s16.a q6, a0 +; CHECK-NEXT: esp.min.s32.a q1, a0 +; CHECK-NEXT: esp.min.s8.a q0, a0 +; CHECK-NEXT: esp.min.u16.a q3, a0 +; CHECK-NEXT: esp.min.u32.a q0, a0 +; CHECK-NEXT: esp.min.u8.a q5, a0 +; CHECK-NEXT: esp.vabs.16 q6, q0 +; CHECK-NEXT: esp.vabs.32 q1, q4 +; CHECK-NEXT: esp.vabs.8 q5, q2 +; CHECK-NEXT: esp.vadd.s16 q6, q1, q5 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vadd.s16.ld.incp q0, a0, q1, q0, q6 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vadd.s16.st.incp q1, a0, q7, q0, q4 +; CHECK-NEXT: esp.vadd.s32 q7, q7, q3 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vadd.s32.ld.incp q4, a0, q4, q7, q2 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vadd.s32.st.incp q2, a0, q7, q1, q7 +; CHECK-NEXT: esp.vadd.s8 q7, q1, q7 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vadd.s8.ld.incp q2, a0, q1, q5, q6 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vadd.s8.st.incp q3, a0, q4, q1, q0 +; CHECK-NEXT: esp.vadd.u16 q0, q7, q7 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vadd.u16.ld.incp q6, a0, q1, q7, q5 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vadd.u16.st.incp q0, a0, q7, q6, q3 +; CHECK-NEXT: esp.vadd.u32 q4, q0, q1 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vadd.u32.ld.incp q1, a0, q4, q5, q0 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vadd.u32.st.incp q4, a0, q6, q0, q1 +; CHECK-NEXT: esp.vadd.u8 q5, q2, q5 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vadd.u8.ld.incp q7, a0, q1, q4, q3 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vadd.u8.st.incp q0, a0, q2, q0, q0 +; CHECK-NEXT: esp.vclamp.s16 q4, q5, 14 +; CHECK-NEXT: esp.vmax.s16 q5, q6, q5 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vmax.s16.ld.incp q2, a0, q3, q5, q5 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vmax.s16.st.incp q3, a0, q4, q3, q5 +; CHECK-NEXT: esp.vmax.s32 q2, q5, q2 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vmax.s32.ld.incp q0, a0, q6, q0, q1 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vmax.s32.st.incp q6, a0, q1, q7, q6 +; CHECK-NEXT: esp.vmax.s8 q7, q5, q7 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vmax.s8.ld.incp q6, a0, q1, q5, q1 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vmax.s8.st.incp q5, a0, q7, q1, q3 +; CHECK-NEXT: esp.vmax.u16 q1, q4, q1 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmax.u16.ld.incp q3, a0, q5, q5, q4 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vmax.u16.st.incp q5, a0, q5, q0, q7 +; CHECK-NEXT: esp.vmax.u32 q4, q0, q2 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vmax.u32.ld.incp q6, a0, q1, q0, q6 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vmax.u32.st.incp q0, a0, q1, q4, q7 +; CHECK-NEXT: esp.vmax.u8 q5, q2, q0 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vmax.u8.ld.incp q0, a0, q5, q6, q1 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vmax.u8.st.incp q7, a0, q1, q6, q7 +; CHECK-NEXT: esp.vmin.s16 q4, q1, q3 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vmin.s16.ld.incp q2, a0, q2, q2, q1 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vmin.s16.st.incp q2, a0, q1, q7, q6 +; CHECK-NEXT: esp.vmin.s32 q2, q0, q3 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vmin.s32.ld.incp q1, a0, q5, q7, q6 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vmin.s32.st.incp q7, a0, q5, q5, q1 +; CHECK-NEXT: esp.vmin.s8 q2, q3, q6 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vmin.s8.ld.incp q7, a0, q1, q4, q3 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vmin.s8.st.incp q1, a0, q4, q0, q1 +; CHECK-NEXT: esp.vmin.u16 q4, q3, q7 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vmin.u16.ld.incp q4, a0, q5, q6, q6 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.vmin.u16.st.incp q1, a0, q2, q6, q0 +; CHECK-NEXT: esp.vmin.u32 q5, q0, q7 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vmin.u32.ld.incp q7, a0, q6, q5, q6 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vmin.u32.st.incp q5, a0, q4, q3, q7 +; CHECK-NEXT: esp.vmin.u8 q7, q5, q5 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmin.u8.ld.incp q2, a0, q5, q0, q5 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.vmin.u8.st.incp q2, a0, q1, q6, q6 +; CHECK-NEXT: esp.vmul.s16 q6, q2, q1 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vmul.s16.ld.incp q0, a0, q3, q6, q7 +; CHECK-NEXT: esp.vmul.s16.s8xs8 q7, q0, q3, q5 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vmul.s16.st.incp q2, a0, q0, q7, q1 +; CHECK-NEXT: esp.vmul.s32.s16xs16 q3, q4, q5, q2 +; CHECK-NEXT: esp.vmul.s8 q3, q4, q0 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmul.s8.ld.incp q0, a0, q2, q2, q3 +; CHECK-NEXT: li s1, 4 +; CHECK-NEXT: li s8, 13 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmul.s8.st.incp q0, a0, q3, q0, q7 +; CHECK-NEXT: esp.vmul.u16 q2, q3, q7 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vmul.u16.ld.incp q5, a0, q5, q6, q6 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vmul.u16.st.incp q3, a0, q2, q4, q4 +; CHECK-NEXT: esp.vmul.u8 q7, q3, q7 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vmul.u8.ld.incp q0, a0, q1, q0, q6 +; CHECK-NEXT: li s10, 1 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: li a3, 6 +; CHECK-NEXT: esp.vmul.u8.st.incp q7, a3, q4, q0, q3 +; CHECK-NEXT: esp.vprelu.s16 q1, q4, q3, a2 +; CHECK-NEXT: esp.vprelu.s8 q2, q4, q5, t3 +; CHECK-NEXT: esp.vrelu.s16 q6, s0, a2 +; CHECK-NEXT: esp.vrelu.s8 q5, s10, s9 +; CHECK-NEXT: esp.vsadds.s16 q3, q3, s9 +; CHECK-NEXT: esp.vsadds.s8 q7, q1, s11 +; CHECK-NEXT: esp.vsadds.u16 q3, q2, s1 +; CHECK-NEXT: esp.vsadds.u8 q2, q3, a2 +; CHECK-NEXT: esp.vsat.s16 q5, q0, s0, s10 +; CHECK-NEXT: esp.vsat.s32 q3, q3, s9, s0 +; CHECK-NEXT: esp.vsat.s8 q0, q7, t5, a1 +; CHECK-NEXT: esp.vsat.u16 q3, q7, s11, s11 +; CHECK-NEXT: esp.vsat.u32 q3, q5, a2, a1 +; CHECK-NEXT: esp.vsat.u8 q0, q6, s10, s8 +; CHECK-NEXT: esp.vssubs.s16 q3, q7, t3 +; CHECK-NEXT: esp.vssubs.s8 q7, q0, t4 +; CHECK-NEXT: esp.vssubs.u16 q5, q4, a5 +; CHECK-NEXT: esp.vssubs.u8 q5, q1, a0 +; CHECK-NEXT: esp.vsub.s16 q0, q0, q6 +; CHECK-NEXT: li a3, 6 +; CHECK-NEXT: esp.vsub.s16.ld.incp q2, a3, q2, q3, q7 +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: esp.vsub.s16.st.incp q7, a3, q0, q0, q3 +; CHECK-NEXT: esp.vsub.s32 q7, q2, q7 +; CHECK-NEXT: li a3, 7 +; CHECK-NEXT: esp.vsub.s32.ld.incp q4, a3, q3, q2, q0 +; CHECK-NEXT: li a3, 5 +; CHECK-NEXT: esp.vsub.s32.st.incp q4, a3, q1, q1, q1 +; CHECK-NEXT: esp.vsub.s8 q7, q5, q6 +; CHECK-NEXT: li a3, 1 +; CHECK-NEXT: esp.vsub.s8.ld.incp q4, a3, q1, q2, q6 +; CHECK-NEXT: li a3, 4 +; CHECK-NEXT: esp.vsub.s8.st.incp q5, a3, q4, q2, q3 +; CHECK-NEXT: esp.vsub.u16 q5, q7, q0 +; CHECK-NEXT: li a3, 11 +; CHECK-NEXT: esp.vsub.u16.ld.incp q4, a3, q0, q7, q5 +; CHECK-NEXT: li a3, 11 +; CHECK-NEXT: esp.vsub.u16.st.incp q0, a3, q1, q3, q1 +; CHECK-NEXT: esp.vsub.u32 q5, q4, q2 +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: esp.vsub.u32.ld.incp q4, a3, q2, q4, q2 +; CHECK-NEXT: li a3, 11 +; CHECK-NEXT: esp.vsub.u32.st.incp q0, a3, q7, q7, q4 +; CHECK-NEXT: esp.vsub.u8 q6, q5, q4 +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: esp.vsub.u8.ld.incp q4, a3, q6, q2, q4 +; CHECK-NEXT: esp.vsub.u8.st.incp q3, a0, q3, q2, q0 +; CHECK-NEXT: esp.addx2 zero, t6, t4 +; CHECK-NEXT: esp.addx4 zero, t4, t6 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.sat a0, a4, a1 +; CHECK-NEXT: esp.subx2 zero, a1, a1 +; CHECK-NEXT: esp.subx4 zero, a5, s0 +; CHECK-NEXT: esp.andq q7, q6, q3 +; CHECK-NEXT: esp.notq q6, q5 +; CHECK-NEXT: esp.orq q1, q1, q0 +; CHECK-NEXT: esp.xorq q5, q1, q6 +; CHECK-NEXT: esp.vcmp.eq.s16 q1, q0, q2 +; CHECK-NEXT: esp.vcmp.eq.s32 q5, q1, q6 +; CHECK-NEXT: esp.vcmp.eq.s8 q2, q0, q3 +; CHECK-NEXT: esp.vcmp.eq.u16 q7, q7, q1 +; CHECK-NEXT: esp.vcmp.eq.u32 q2, q1, q2 +; CHECK-NEXT: esp.vcmp.eq.u8 q3, q1, q6 +; CHECK-NEXT: esp.vcmp.gt.s16 q4, q5, q6 +; CHECK-NEXT: esp.vcmp.gt.s32 q0, q6, q2 +; CHECK-NEXT: esp.vcmp.gt.s8 q2, q3, q5 +; CHECK-NEXT: esp.vcmp.gt.u16 q7, q7, q4 +; CHECK-NEXT: esp.vcmp.gt.u32 q2, q6, q2 +; CHECK-NEXT: esp.vcmp.gt.u8 q0, q2, q0 +; CHECK-NEXT: esp.vcmp.lt.s16 q7, q2, q1 +; CHECK-NEXT: esp.vcmp.lt.s32 q4, q2, q1 +; CHECK-NEXT: esp.vcmp.lt.s8 q6, q5, q2 +; CHECK-NEXT: esp.vcmp.lt.u16 q4, q1, q5 +; CHECK-NEXT: esp.vcmp.lt.u32 q2, q5, q6 +; CHECK-NEXT: esp.vcmp.lt.u8 q5, q3, q5 +; CHECK-NEXT: esp.mov.s16.qacc q2 +; CHECK-NEXT: esp.mov.s8.qacc q5 +; CHECK-NEXT: esp.mov.u16.qacc q5 +; CHECK-NEXT: esp.mov.u8.qacc q3 +; CHECK-NEXT: esp.movi.16.a q2, a0, 3 +; CHECK-NEXT: esp.movi.16.q q3, s11, 13 +; CHECK-NEXT: esp.movi.32.a q6, a0, 1 +; CHECK-NEXT: esp.movi.32.q q5, s0, 1 +; CHECK-NEXT: esp.movi.8.a q5, a0, 15 +; CHECK-NEXT: esp.movi.8.q q1, a5, 6 +; CHECK-NEXT: esp.movx.r.cfg a0 +; CHECK-NEXT: esp.movx.r.fft.bit.width a0 +; CHECK-NEXT: li a0, 33 +; CHECK-NEXT: esp.movx.r.perf a0, a0 +; CHECK-NEXT: esp.movx.r.sar a0 +; CHECK-NEXT: esp.movx.r.sar.bytes a0 +; CHECK-NEXT: esp.movx.r.xacc.h a0 +; CHECK-NEXT: esp.movx.r.xacc.l a0 +; CHECK-NEXT: esp.movx.w.cfg t5 +; CHECK-NEXT: esp.movx.w.fft.bit.width s10 +; CHECK-NEXT: esp.movx.w.perf a2 +; CHECK-NEXT: esp.movx.w.sar t3 +; CHECK-NEXT: esp.movx.w.sar.bytes s1 +; CHECK-NEXT: esp.movx.w.xacc.h a2 +; CHECK-NEXT: esp.movx.w.xacc.l s1 +; CHECK-NEXT: esp.vext.s16 q7, q0, q6 +; CHECK-NEXT: esp.vext.s8 q5, q3, q3 +; CHECK-NEXT: esp.vext.u16 q4, q2, q6 +; CHECK-NEXT: esp.vext.u8 q4, q0, q0 +; CHECK-NEXT: esp.vunzip.16 q1, q0 +; CHECK-NEXT: esp.vunzip.32 q6, q4 +; CHECK-NEXT: esp.vunzip.8 q2, q1 +; CHECK-NEXT: esp.vunzipt.16 q7, q0, q2 +; CHECK-NEXT: esp.vunzipt.8 q0, q6, q2 +; CHECK-NEXT: esp.vzip.16 q1, q6 +; CHECK-NEXT: esp.vzip.32 q4, q6 +; CHECK-NEXT: esp.vzip.8 q4, q0 +; CHECK-NEXT: esp.vzipt.16 q0, q3, q5 +; CHECK-NEXT: esp.vzipt.8 q6, q1, q5 +; CHECK-NEXT: esp.zero.q q5 +; CHECK-NEXT: esp.zero.qacc +; CHECK-NEXT: esp.zero.xacc +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.fft.ams.s16.ld.incp q6, a0, q6, q0, q3, q0, q1, 0 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.fft.ams.s16.ld.incp.uaup q3, a0, q0, q2, q3, q1, q0, 0 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.fft.ams.s16.ld.r32.decp q7, a0, q0, q6, q3, q1, q5, 1 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: li a3, 4 +; CHECK-NEXT: esp.fft.ams.s16.st.incp q5, q7, a0, a3, q5, q3, q6, 0 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.fft.bitrev q7, a0 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.fft.cmul.s16.ld.xp q2, a0, s10, q3, q7, q7, 1 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.fft.cmul.s16.st.xp q7, q0, q4, a0, a1, 4, 3, 1 +; CHECK-NEXT: esp.fft.r2bf.s16 q7, q3, q5, q1, 0 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.fft.r2bf.s16.st.incp q7, q7, q4, a0, 2 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.fft.vst.r32.decp q5, a0, 1 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.ld.128.usar.ip q1, a0, 608 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.ld.128.usar.xp q2, a0, a2 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.ld.xacc.ip a0, 400 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.ldqa.s16.128.ip a0, 912 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.ldqa.s16.128.xp a0, t5 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.ldqa.s8.128.ip a0, 1824 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.ldqa.s8.128.xp a0, s1 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.ldqa.u16.128.ip a0, -1904 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.ldqa.u16.128.xp a0, t4 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.ldqa.u8.128.ip a0, 1216 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.ldqa.u8.128.xp a0, a4 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vldbc.16.ip q7, a0, -448 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vldbc.16.xp q3, a0, s0 +; CHECK-NEXT: mv a0, a5 +; CHECK-NEXT: esp.vldbc.32.ip q3, a0, 220 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.vldbc.32.xp q7, a0, a1 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.vldbc.8.ip q2, a0, 396 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vldbc.8.xp q7, a0, s0 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vldext.s16.ip q7, q4, a0, 16 +; CHECK-NEXT: mv a0, a5 +; CHECK-NEXT: esp.vldext.s16.xp q5, q0, a0, a2 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vldext.s8.ip q3, q6, a0, 80 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vldext.s8.xp q1, q1, a0, a4 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vldext.u16.ip q2, q5, a0, 48 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vldext.u16.xp q2, q0, a0, s9 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vldext.u8.ip q7, q2, a0, 64 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vldext.u8.xp q7, q2, a0, a0 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vldhbc.16.incp q4, q7, a0 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.ld.qacc.h.h.128.ip a0, 512 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.ld.qacc.h.l.128.ip a0, -784 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.ld.qacc.l.h.128.ip a0, -800 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.ld.qacc.l.l.128.ip a0, -1952 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.ld.ua.state.ip a0, -752 +; CHECK-NEXT: esp.ldxq.32 q7, q4, a5, 2, 4 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.st.qacc.h.h.128.ip a0, -336 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.st.qacc.h.l.128.ip a0, 1568 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.st.qacc.l.h.128.ip a0, 16 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.st.qacc.l.l.128.ip a0, 416 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.st.ua.state.ip a0, -1360 +; CHECK-NEXT: esp.stxq.32 q0, q6, a4, 2, 5 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q3, a0, 784 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vld.128.xp q3, a0, s0 +; CHECK-NEXT: mv a0, a5 +; CHECK-NEXT: esp.vld.h.64.ip q0, a0, -352 +; CHECK-NEXT: esp.vld.h.64.xp q2, a1, t4 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vld.l.64.ip q2, a0, 56 +; CHECK-NEXT: esp.vld.l.64.xp q5, s0, s10 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vst.128.ip q5, a0, -960 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vst.128.xp q6, a0, s11 +; CHECK-NEXT: esp.vst.h.64.ip q7, s1, 944 +; CHECK-NEXT: esp.vst.h.64.xp q7, s11, t5 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vst.l.64.ip q5, a0, 984 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vst.l.64.xp q5, a0, s9 +; CHECK-NEXT: esp.slci.2q q1, q5, 12 +; CHECK-NEXT: esp.slcxxp.2q q2, q3, t6, t6 +; CHECK-NEXT: esp.src.q q2, q1, q3 +; CHECK-NEXT: esp.src.q.ld.ip q0, a5, -272, q5, q5 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.src.q.ld.xp q1, a0, t3, q7, q6 +; CHECK-NEXT: esp.src.q.qup q3, q7, q4 +; CHECK-NEXT: esp.srci.2q q2, q3, 7 +; CHECK-NEXT: esp.srcmb.s16.q.qacc q2, q4, 0 +; CHECK-NEXT: esp.srcmb.s16.qacc q5, s8, 1 +; CHECK-NEXT: esp.srcmb.s8.q.qacc q5, q4, 0 +; CHECK-NEXT: esp.srcmb.s8.qacc q1, a2, 1 +; CHECK-NEXT: esp.srcmb.u16.q.qacc q0, q3, 1 +; CHECK-NEXT: esp.srcmb.u16.qacc q7, s8, 0 +; CHECK-NEXT: esp.srcmb.u8.q.qacc q3, q5, 1 +; CHECK-NEXT: esp.srcmb.u8.qacc q0, t6, 0 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.srcq.128.st.incp q0, q5, a0 +; CHECK-NEXT: esp.srcxxp.2q q4, q6, s9, a4 +; CHECK-NEXT: esp.srs.s.xacc a0, a4 +; CHECK-NEXT: esp.srs.u.xacc a0, t3 +; CHECK-NEXT: esp.vsl.32 q5, q2 +; CHECK-NEXT: esp.vsld.16 q3, q3, q7 +; CHECK-NEXT: esp.vsld.32 q3, q7, q1 +; CHECK-NEXT: esp.vsld.8 q0, q1, q5 +; CHECK-NEXT: esp.vsr.s32 q3, q0 +; CHECK-NEXT: esp.vsr.u32 q1, q2 +; CHECK-NEXT: esp.vsrd.16 q4, q3, q0 +; CHECK-NEXT: esp.vsrd.32 q0, q6, q3 +; CHECK-NEXT: esp.vsrd.8 q5, q4, q1 +; CHECK-NEXT: esp.st.s.xacc.ip a2, 80 +; CHECK-NEXT: esp.st.u.xacc.ip a4, -464 +; CHECK-NEXT: cm.popret {ra, s0-s11}, 64 + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 0) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 1) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 2) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 3) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 4) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 5) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 6) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 7) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.h(i32 0, i32 4) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32 6, i32 1, i32 10, i32 -48, i32 1) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32 12, i32 2, i32 7, i32 2, i32 1) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32 7, i32 6) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32 7, i32 0, i32 8, i32 48, i32 7) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32 14, i32 2, i32 7, i32 7, i32 1) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32 1, i32 1) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32 1, i32 6, i32 5, i32 32, i32 4) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32 7, i32 3, i32 2, i32 2, i32 6) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32 4, i32 5) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32 2, i32 5, i32 4, i32 -48, i32 4) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32 7, i32 6, i32 3, i32 14, i32 7) + tail call void @llvm.riscv.esp.vmulas.s16.qacc(i32 4, i32 2) + tail call void @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32 5, i32 7, i32 4, i32 96, i32 1) + tail call void @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32 3, i32 4, i32 2, i32 8, i32 6) + tail call void @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32 7, i32 6, i32 1, i32 0, i32 80) + tail call void @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32 5, i32 0, i32 7, i32 6, i32 5) + tail call void @llvm.riscv.esp.vmulas.s16.xacc(i32 3, i32 5) + tail call void @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32 1, i32 7, i32 9, i32 96, i32 5) + tail call void @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32 8, i32 5, i32 5, i32 13, i32 0) + tail call void @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32 4, i32 6, i32 2, i32 1, i32 16) + tail call void @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32 5, i32 7, i32 7, i32 7, i32 2) + tail call void @llvm.riscv.esp.vmulas.s8.qacc(i32 6, i32 1) + tail call void @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32 3, i32 5, i32 8, i32 -128, i32 2) + tail call void @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32 5, i32 0, i32 5, i32 8, i32 4) + tail call void @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32 6, i32 0, i32 7, i32 1, i32 16) + tail call void @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32 12, i32 6, i32 1, i32 4, i32 10) + tail call void @llvm.riscv.esp.vmulas.s8.xacc(i32 3, i32 7) + tail call void @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32 4, i32 5, i32 1, i32 -16, i32 7) + tail call void @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32 10, i32 7, i32 0, i32 7, i32 1) + tail call void @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32 6, i32 1, i32 6, i32 8, i32 -128) + tail call void @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32 2, i32 4, i32 1, i32 5, i32 4) + tail call void @llvm.riscv.esp.vmulas.u16.qacc(i32 6, i32 1) + tail call void @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32 0, i32 0, i32 8, i32 -32, i32 7) + tail call void @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32 7, i32 6, i32 7, i32 6, i32 2) + tail call void @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32 6, i32 5, i32 4, i32 8, i32 16) + tail call void @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32 9, i32 3, i32 7, i32 4, i32 2) + tail call void @llvm.riscv.esp.vmulas.u16.xacc(i32 6, i32 1) + tail call void @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32 2, i32 2, i32 3, i32 -48, i32 2) + tail call void @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32 6, i32 3, i32 0, i32 0, i32 7) + tail call void @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32 1, i32 4, i32 0, i32 9, i32 96) + tail call void @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32 5, i32 3, i32 7, i32 6, i32 2) + tail call void @llvm.riscv.esp.vmulas.u8.qacc(i32 7, i32 1) + tail call void @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32 7, i32 4, i32 9, i32 -48, i32 7) + tail call void @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32 12, i32 6, i32 7, i32 11, i32 4) + tail call void @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32 1, i32 7, i32 2, i32 14, i32 0) + tail call void @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32 7, i32 0, i32 0, i32 4, i32 7) + tail call void @llvm.riscv.esp.vmulas.u8.xacc(i32 6, i32 4) + tail call void @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32 6, i32 2, i32 5, i32 -80, i32 3) + tail call void @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32 13, i32 5, i32 1, i32 13, i32 4) + tail call void @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32 2, i32 3, i32 7, i32 5, i32 -128) + tail call void @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32 6, i32 7, i32 2, i32 2, i32 5) + tail call void @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32 0, i32 2, i32 14, i32 0) + tail call void @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32 2, i32 6, i32 0, i32 5) + tail call void @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32 7, i32 3, i32 8, i32 5) + tail call void @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32 4, i32 4, i32 6, i32 3) + tail call void @llvm.riscv.esp.vsmulas.s16.qacc(i32 1, i32 5, i32 14) + tail call void @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32 7, i32 4, i32 5, i32 0, i32 0) + tail call void @llvm.riscv.esp.vsmulas.s8.qacc(i32 3, i32 5, i32 0) + tail call void @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32 1, i32 4, i32 8, i32 6, i32 1) + tail call void @llvm.riscv.esp.vsmulas.u16.qacc(i32 6, i32 5, i32 15) + tail call void @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32 7, i32 1, i32 0, i32 10, i32 7) + tail call void @llvm.riscv.esp.vsmulas.u8.qacc(i32 0, i32 7, i32 2) + tail call void @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32 3, i32 7, i32 10, i32 8, i32 4) + tail call void @llvm.riscv.esp.cmul.s16(i32 0, i32 7, i32 3, i32 6) + tail call void @llvm.riscv.esp.cmul.s16.ld.incp(i32 0, i32 3, i32 6, i32 0, i32 3, i32 5) + tail call void @llvm.riscv.esp.cmul.s16.st.incp(i32 4, i32 5, i32 5, i32 0, i32 2, i32 0) + tail call void @llvm.riscv.esp.cmul.s8(i32 1, i32 0, i32 3, i32 1) + tail call void @llvm.riscv.esp.cmul.s8.ld.incp(i32 5, i32 4, i32 5, i32 1, i32 7, i32 4) + tail call void @llvm.riscv.esp.cmul.s8.st.incp(i32 6, i32 0, i32 5, i32 14, i32 3, i32 0) + tail call void @llvm.riscv.esp.cmul.u16(i32 7, i32 5, i32 2, i32 7) + tail call void @llvm.riscv.esp.cmul.u16.ld.incp(i32 0, i32 1, i32 2, i32 1, i32 0, i32 0) + tail call void @llvm.riscv.esp.cmul.u16.st.incp(i32 1, i32 4, i32 2, i32 5, i32 2, i32 4) + tail call void @llvm.riscv.esp.cmul.u8(i32 7, i32 5, i32 0, i32 3) + tail call void @llvm.riscv.esp.cmul.u8.ld.incp(i32 0, i32 2, i32 11, i32 0, i32 0, i32 4) + tail call void @llvm.riscv.esp.cmul.u8.st.incp(i32 6, i32 3, i32 1, i32 10, i32 2, i32 4) + tail call void @llvm.riscv.esp.max.s16.a(i32 6, i32 3) + tail call void @llvm.riscv.esp.max.s32.a(i32 2, i32 0) + tail call void @llvm.riscv.esp.max.s8.a(i32 0, i32 9) + tail call void @llvm.riscv.esp.max.u16.a(i32 6, i32 6) + tail call void @llvm.riscv.esp.max.u32.a(i32 6, i32 1) + tail call void @llvm.riscv.esp.max.u8.a(i32 1, i32 4) + tail call void @llvm.riscv.esp.min.s16.a(i32 6, i32 11) + tail call void @llvm.riscv.esp.min.s32.a(i32 1, i32 14) + tail call void @llvm.riscv.esp.min.s8.a(i32 0, i32 1) + tail call void @llvm.riscv.esp.min.u16.a(i32 3, i32 14) + tail call void @llvm.riscv.esp.min.u32.a(i32 0, i32 9) + tail call void @llvm.riscv.esp.min.u8.a(i32 5, i32 8) + tail call void @llvm.riscv.esp.vabs.16(i32 0, i32 6) + tail call void @llvm.riscv.esp.vabs.32(i32 4, i32 1) + tail call void @llvm.riscv.esp.vabs.8(i32 2, i32 5) + tail call void @llvm.riscv.esp.vadd.s16(i32 1, i32 5, i32 6) + tail call void @llvm.riscv.esp.vadd.s16.ld.incp(i32 0, i32 6, i32 10, i32 1, i32 0) + tail call void @llvm.riscv.esp.vadd.s16.st.incp(i32 0, i32 4, i32 1, i32 11, i32 7) + tail call void @llvm.riscv.esp.vadd.s32(i32 7, i32 3, i32 7) + tail call void @llvm.riscv.esp.vadd.s32.ld.incp(i32 7, i32 2, i32 14, i32 4, i32 4) + tail call void @llvm.riscv.esp.vadd.s32.st.incp(i32 1, i32 7, i32 2, i32 13, i32 7) + tail call void @llvm.riscv.esp.vadd.s8(i32 1, i32 7, i32 7) + tail call void @llvm.riscv.esp.vadd.s8.ld.incp(i32 5, i32 6, i32 14, i32 1, i32 2) + tail call void @llvm.riscv.esp.vadd.s8.st.incp(i32 1, i32 0, i32 3, i32 9, i32 4) + tail call void @llvm.riscv.esp.vadd.u16(i32 7, i32 7, i32 0) + tail call void @llvm.riscv.esp.vadd.u16.ld.incp(i32 7, i32 5, i32 8, i32 1, i32 6) + tail call void @llvm.riscv.esp.vadd.u16.st.incp(i32 6, i32 3, i32 0, i32 5, i32 7) + tail call void @llvm.riscv.esp.vadd.u32(i32 0, i32 1, i32 4) + tail call void @llvm.riscv.esp.vadd.u32.ld.incp(i32 5, i32 0, i32 2, i32 4, i32 1) + tail call void @llvm.riscv.esp.vadd.u32.st.incp(i32 0, i32 1, i32 4, i32 1, i32 6) + tail call void @llvm.riscv.esp.vadd.u8(i32 2, i32 5, i32 5) + tail call void @llvm.riscv.esp.vadd.u8.ld.incp(i32 4, i32 3, i32 8, i32 1, i32 7) + tail call void @llvm.riscv.esp.vadd.u8.st.incp(i32 0, i32 0, i32 0, i32 5, i32 2) + tail call void @llvm.riscv.esp.vclamp.s16(i32 5, i32 14, i32 4) + tail call void @llvm.riscv.esp.vmax.s16(i32 6, i32 5, i32 5) + tail call void @llvm.riscv.esp.vmax.s16.ld.incp(i32 5, i32 5, i32 11, i32 3, i32 2) + tail call void @llvm.riscv.esp.vmax.s16.st.incp(i32 3, i32 5, i32 3, i32 2, i32 4) + tail call void @llvm.riscv.esp.vmax.s32(i32 5, i32 2, i32 2) + tail call void @llvm.riscv.esp.vmax.s32.ld.incp(i32 0, i32 1, i32 7, i32 6, i32 0) + tail call void @llvm.riscv.esp.vmax.s32.st.incp(i32 7, i32 6, i32 6, i32 14, i32 1) + tail call void @llvm.riscv.esp.vmax.s8(i32 5, i32 7, i32 7) + tail call void @llvm.riscv.esp.vmax.s8.ld.incp(i32 5, i32 1, i32 2, i32 1, i32 6) + tail call void @llvm.riscv.esp.vmax.s8.st.incp(i32 1, i32 3, i32 5, i32 4, i32 7) + tail call void @llvm.riscv.esp.vmax.u16(i32 4, i32 1, i32 1) + tail call void @llvm.riscv.esp.vmax.u16.ld.incp(i32 5, i32 4, i32 0, i32 5, i32 3) + tail call void @llvm.riscv.esp.vmax.u16.st.incp(i32 0, i32 7, i32 5, i32 11, i32 5) + tail call void @llvm.riscv.esp.vmax.u32(i32 0, i32 2, i32 4) + tail call void @llvm.riscv.esp.vmax.u32.ld.incp(i32 0, i32 6, i32 4, i32 1, i32 6) + tail call void @llvm.riscv.esp.vmax.u32.st.incp(i32 4, i32 7, i32 0, i32 6, i32 1) + tail call void @llvm.riscv.esp.vmax.u8(i32 2, i32 0, i32 5) + tail call void @llvm.riscv.esp.vmax.u8.ld.incp(i32 6, i32 1, i32 1, i32 5, i32 0) + tail call void @llvm.riscv.esp.vmax.u8.st.incp(i32 6, i32 7, i32 7, i32 10, i32 1) + tail call void @llvm.riscv.esp.vmin.s16(i32 1, i32 3, i32 4) + tail call void @llvm.riscv.esp.vmin.s16.ld.incp(i32 2, i32 1, i32 3, i32 2, i32 2) + tail call void @llvm.riscv.esp.vmin.s16.st.incp(i32 7, i32 6, i32 2, i32 7, i32 1) + tail call void @llvm.riscv.esp.vmin.s32(i32 0, i32 3, i32 2) + tail call void @llvm.riscv.esp.vmin.s32.ld.incp(i32 7, i32 6, i32 14, i32 5, i32 1) + tail call void @llvm.riscv.esp.vmin.s32.st.incp(i32 5, i32 1, i32 7, i32 6, i32 5) + tail call void @llvm.riscv.esp.vmin.s8(i32 3, i32 6, i32 2) + tail call void @llvm.riscv.esp.vmin.s8.ld.incp(i32 4, i32 3, i32 11, i32 1, i32 7) + tail call void @llvm.riscv.esp.vmin.s8.st.incp(i32 0, i32 1, i32 1, i32 6, i32 4) + tail call void @llvm.riscv.esp.vmin.u16(i32 3, i32 7, i32 4) + tail call void @llvm.riscv.esp.vmin.u16.ld.incp(i32 6, i32 6, i32 11, i32 5, i32 4) + tail call void @llvm.riscv.esp.vmin.u16.st.incp(i32 6, i32 0, i32 1, i32 12, i32 2) + tail call void @llvm.riscv.esp.vmin.u32(i32 0, i32 7, i32 5) + tail call void @llvm.riscv.esp.vmin.u32.ld.incp(i32 5, i32 6, i32 13, i32 6, i32 7) + tail call void @llvm.riscv.esp.vmin.u32.st.incp(i32 3, i32 7, i32 5, i32 4, i32 4) + tail call void @llvm.riscv.esp.vmin.u8(i32 5, i32 5, i32 7) + tail call void @llvm.riscv.esp.vmin.u8.ld.incp(i32 0, i32 5, i32 0, i32 5, i32 2) + tail call void @llvm.riscv.esp.vmin.u8.st.incp(i32 6, i32 6, i32 2, i32 12, i32 1) + tail call void @llvm.riscv.esp.vmul.s16(i32 2, i32 1, i32 6) + tail call void @llvm.riscv.esp.vmul.s16.ld.incp(i32 6, i32 7, i32 10, i32 3, i32 0) + tail call void @llvm.riscv.esp.vmul.s16.s8xs8(i32 3, i32 5, i32 7, i32 0) + tail call void @llvm.riscv.esp.vmul.s16.st.incp(i32 7, i32 1, i32 2, i32 3, i32 0) + tail call void @llvm.riscv.esp.vmul.s32.s16xs16(i32 5, i32 2, i32 3, i32 4) + tail call void @llvm.riscv.esp.vmul.s8(i32 4, i32 0, i32 3) + tail call void @llvm.riscv.esp.vmul.s8.ld.incp(i32 2, i32 3, i32 0, i32 2, i32 0) + tail call void @llvm.riscv.esp.vmul.s8.st.incp(i32 0, i32 7, i32 0, i32 8, i32 3) + tail call void @llvm.riscv.esp.vmul.u16(i32 3, i32 7, i32 2) + tail call void @llvm.riscv.esp.vmul.u16.ld.incp(i32 6, i32 6, i32 7, i32 5, i32 5) + tail call void @llvm.riscv.esp.vmul.u16.st.incp(i32 4, i32 4, i32 3, i32 3, i32 2) + tail call void @llvm.riscv.esp.vmul.u8(i32 3, i32 7, i32 7) + tail call void @llvm.riscv.esp.vmul.u8.ld.incp(i32 0, i32 6, i32 9, i32 1, i32 0) + tail call void @llvm.riscv.esp.vmul.u8.st.incp(i32 0, i32 3, i32 7, i32 6, i32 4) + tail call void @llvm.riscv.esp.vprelu.s16(i32 8, i32 3, i32 4, i32 1) + tail call void @llvm.riscv.esp.vprelu.s8(i32 3, i32 5, i32 4, i32 2) + tail call void @llvm.riscv.esp.vrelu.s16(i32 8, i32 9, i32 6) + tail call void @llvm.riscv.esp.vrelu.s8(i32 12, i32 1, i32 5) + tail call void @llvm.riscv.esp.vsadds.s16(i32 12, i32 3, i32 3) + tail call void @llvm.riscv.esp.vsadds.s8(i32 7, i32 1, i32 7) + tail call void @llvm.riscv.esp.vsadds.u16(i32 4, i32 2, i32 3) + tail call void @llvm.riscv.esp.vsadds.u8(i32 8, i32 3, i32 2) + tail call void @llvm.riscv.esp.vsat.s16(i32 9, i32 1, i32 0, i32 5) + tail call void @llvm.riscv.esp.vsat.s32(i32 12, i32 9, i32 3, i32 3) + tail call void @llvm.riscv.esp.vsat.s8(i32 10, i32 2, i32 7, i32 0) + tail call void @llvm.riscv.esp.vsat.u16(i32 7, i32 7, i32 7, i32 3) + tail call void @llvm.riscv.esp.vsat.u32(i32 8, i32 2, i32 5, i32 3) + tail call void @llvm.riscv.esp.vsat.u8(i32 1, i32 13, i32 6, i32 0) + tail call void @llvm.riscv.esp.vssubs.s16(i32 3, i32 7, i32 3) + tail call void @llvm.riscv.esp.vssubs.s8(i32 14, i32 0, i32 7) + tail call void @llvm.riscv.esp.vssubs.u16(i32 0, i32 4, i32 5) + tail call void @llvm.riscv.esp.vssubs.u8(i32 11, i32 1, i32 5) + tail call void @llvm.riscv.esp.vsub.s16(i32 0, i32 6, i32 0) + tail call void @llvm.riscv.esp.vsub.s16.ld.incp(i32 3, i32 7, i32 6, i32 2, i32 2) + tail call void @llvm.riscv.esp.vsub.s16.st.incp(i32 0, i32 3, i32 7, i32 0, i32 0) + tail call void @llvm.riscv.esp.vsub.s32(i32 2, i32 7, i32 7) + tail call void @llvm.riscv.esp.vsub.s32.ld.incp(i32 2, i32 0, i32 7, i32 3, i32 4) + tail call void @llvm.riscv.esp.vsub.s32.st.incp(i32 1, i32 1, i32 4, i32 5, i32 1) + tail call void @llvm.riscv.esp.vsub.s8(i32 5, i32 6, i32 7) + tail call void @llvm.riscv.esp.vsub.s8.ld.incp(i32 2, i32 6, i32 1, i32 1, i32 4) + tail call void @llvm.riscv.esp.vsub.s8.st.incp(i32 2, i32 3, i32 5, i32 4, i32 4) + tail call void @llvm.riscv.esp.vsub.u16(i32 7, i32 0, i32 5) + tail call void @llvm.riscv.esp.vsub.u16.ld.incp(i32 7, i32 5, i32 11, i32 0, i32 4) + tail call void @llvm.riscv.esp.vsub.u16.st.incp(i32 3, i32 1, i32 0, i32 11, i32 1) + tail call void @llvm.riscv.esp.vsub.u32(i32 4, i32 2, i32 5) + tail call void @llvm.riscv.esp.vsub.u32.ld.incp(i32 4, i32 2, i32 0, i32 2, i32 4) + tail call void @llvm.riscv.esp.vsub.u32.st.incp(i32 7, i32 4, i32 0, i32 11, i32 7) + tail call void @llvm.riscv.esp.vsub.u8(i32 5, i32 4, i32 6) + tail call void @llvm.riscv.esp.vsub.u8.ld.incp(i32 2, i32 4, i32 0, i32 6, i32 4) + tail call void @llvm.riscv.esp.vsub.u8.st.incp(i32 2, i32 0, i32 3, i32 11, i32 3) + tail call void @llvm.riscv.esp.addx2(i32 5, i32 14, i32 4) + tail call void @llvm.riscv.esp.addx4(i32 14, i32 5, i32 4) + tail call void @llvm.riscv.esp.sat(i32 6, i32 2, i32 4) + tail call void @llvm.riscv.esp.subx2(i32 2, i32 2, i32 9) + tail call void @llvm.riscv.esp.subx4(i32 0, i32 9, i32 3) + tail call void @llvm.riscv.esp.andq(i32 6, i32 3, i32 7) + tail call void @llvm.riscv.esp.notq(i32 5, i32 6) + tail call void @llvm.riscv.esp.orq(i32 1, i32 0, i32 1) + tail call void @llvm.riscv.esp.xorq(i32 1, i32 6, i32 5) + tail call void @llvm.riscv.esp.vcmp.eq.s16(i32 0, i32 2, i32 1) + tail call void @llvm.riscv.esp.vcmp.eq.s32(i32 1, i32 6, i32 5) + tail call void @llvm.riscv.esp.vcmp.eq.s8(i32 0, i32 3, i32 2) + tail call void @llvm.riscv.esp.vcmp.eq.u16(i32 7, i32 1, i32 7) + tail call void @llvm.riscv.esp.vcmp.eq.u32(i32 1, i32 2, i32 2) + tail call void @llvm.riscv.esp.vcmp.eq.u8(i32 1, i32 6, i32 3) + tail call void @llvm.riscv.esp.vcmp.gt.s16(i32 5, i32 6, i32 4) + tail call void @llvm.riscv.esp.vcmp.gt.s32(i32 6, i32 2, i32 0) + tail call void @llvm.riscv.esp.vcmp.gt.s8(i32 3, i32 5, i32 2) + tail call void @llvm.riscv.esp.vcmp.gt.u16(i32 7, i32 4, i32 7) + tail call void @llvm.riscv.esp.vcmp.gt.u32(i32 6, i32 2, i32 2) + tail call void @llvm.riscv.esp.vcmp.gt.u8(i32 2, i32 0, i32 0) + tail call void @llvm.riscv.esp.vcmp.lt.s16(i32 2, i32 1, i32 7) + tail call void @llvm.riscv.esp.vcmp.lt.s32(i32 2, i32 1, i32 4) + tail call void @llvm.riscv.esp.vcmp.lt.s8(i32 5, i32 2, i32 6) + tail call void @llvm.riscv.esp.vcmp.lt.u16(i32 1, i32 5, i32 4) + tail call void @llvm.riscv.esp.vcmp.lt.u32(i32 5, i32 6, i32 2) + tail call void @llvm.riscv.esp.vcmp.lt.u8(i32 3, i32 5, i32 5) + tail call void @llvm.riscv.esp.mov.s16.qacc(i32 2) + tail call void @llvm.riscv.esp.mov.s8.qacc(i32 5) + tail call void @llvm.riscv.esp.mov.u16.qacc(i32 5) + tail call void @llvm.riscv.esp.mov.u8.qacc(i32 3) + tail call void @llvm.riscv.esp.movi.16.a(i32 2, i32 3, i32 1) + tail call void @llvm.riscv.esp.movi.16.q(i32 7, i32 13, i32 3) + tail call void @llvm.riscv.esp.movi.32.a(i32 6, i32 1, i32 14) + tail call void @llvm.riscv.esp.movi.32.q(i32 9, i32 1, i32 5) + tail call void @llvm.riscv.esp.movi.8.a(i32 5, i32 15, i32 14) + tail call void @llvm.riscv.esp.movi.8.q(i32 0, i32 6, i32 1) + tail call void @llvm.riscv.esp.movx.r.cfg(i32 5) + tail call void @llvm.riscv.esp.movx.r.fft.bit.width(i32 2) + tail call void @llvm.riscv.esp.movx.r.perf(i32 3, i32 33) + tail call void @llvm.riscv.esp.movx.r.sar(i32 5) + tail call void @llvm.riscv.esp.movx.r.sar.bytes(i32 6) + tail call void @llvm.riscv.esp.movx.r.xacc.h(i32 10) + tail call void @llvm.riscv.esp.movx.r.xacc.l(i32 12) + tail call void @llvm.riscv.esp.movx.w.cfg(i32 10) + tail call void @llvm.riscv.esp.movx.w.fft.bit.width(i32 1) + tail call void @llvm.riscv.esp.movx.w.perf(i32 8) + tail call void @llvm.riscv.esp.movx.w.sar(i32 3) + tail call void @llvm.riscv.esp.movx.w.sar.bytes(i32 4) + tail call void @llvm.riscv.esp.movx.w.xacc.h(i32 8) + tail call void @llvm.riscv.esp.movx.w.xacc.l(i32 4) + tail call void @llvm.riscv.esp.vext.s16(i32 6, i32 7, i32 0) + tail call void @llvm.riscv.esp.vext.s8(i32 3, i32 5, i32 3) + tail call void @llvm.riscv.esp.vext.u16(i32 6, i32 4, i32 2) + tail call void @llvm.riscv.esp.vext.u8(i32 0, i32 4, i32 0) + tail call void @llvm.riscv.esp.vunzip.16(i32 1, i32 0) + tail call void @llvm.riscv.esp.vunzip.32(i32 6, i32 4) + tail call void @llvm.riscv.esp.vunzip.8(i32 2, i32 1) + tail call void @llvm.riscv.esp.vunzipt.16(i32 7, i32 0, i32 2) + tail call void @llvm.riscv.esp.vunzipt.8(i32 0, i32 6, i32 2) + tail call void @llvm.riscv.esp.vzip.16(i32 1, i32 6) + tail call void @llvm.riscv.esp.vzip.32(i32 4, i32 6) + tail call void @llvm.riscv.esp.vzip.8(i32 4, i32 0) + tail call void @llvm.riscv.esp.vzipt.16(i32 0, i32 3, i32 5) + tail call void @llvm.riscv.esp.vzipt.8(i32 6, i32 1, i32 5) + tail call void @llvm.riscv.esp.zero.q(i32 5) + tail call void @llvm.riscv.esp.zero.qacc() + tail call void @llvm.riscv.esp.zero.xacc() + tail call void @llvm.riscv.esp.fft.ams.s16.ld.incp(i32 3, i32 1, i32 0, i32 3, i32 0, i32 6, i32 6, i32 0) + tail call void @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32 3, i32 0, i32 1, i32 5, i32 0, i32 3, i32 0, i32 2) + tail call void @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32 3, i32 5, i32 1, i32 3, i32 1, i32 7, i32 0, i32 6) + tail call void @llvm.riscv.esp.fft.ams.s16.st.incp(i32 5, i32 6, i32 3, i32 5, i32 4, i32 2, i32 0, i32 7) + tail call void @llvm.riscv.esp.fft.bitrev(i32 2, i32 7) + tail call void @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32 1, i32 7, i32 7, i32 4, i32 1, i32 3, i32 2) + tail call void @llvm.riscv.esp.fft.cmul.s16.st.xp(i32 2, i32 0, i32 7, i32 4, i32 4, i32 1, i32 3, i32 4) + tail call void @llvm.riscv.esp.fft.r2bf.s16(i32 5, i32 1, i32 0, i32 7, i32 3) + tail call void @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32 7, i32 4, i32 10, i32 2, i32 7) + tail call void @llvm.riscv.esp.fft.vst.r32.decp(i32 5, i32 7, i32 1) + tail call void @llvm.riscv.esp.ld.128.usar.ip(i32 4, i32 608, i32 1) + tail call void @llvm.riscv.esp.ld.128.usar.xp(i32 8, i32 1, i32 2) + tail call void @llvm.riscv.esp.ld.xacc.ip(i32 6, i32 400) + tail call void @llvm.riscv.esp.ldqa.s16.128.ip(i32 13, i32 912) + tail call void @llvm.riscv.esp.ldqa.s16.128.xp(i32 10, i32 2) + tail call void @llvm.riscv.esp.ldqa.s8.128.ip(i32 1, i32 1824) + tail call void @llvm.riscv.esp.ldqa.s8.128.xp(i32 4, i32 9) + tail call void @llvm.riscv.esp.ldqa.u16.128.ip(i32 4, i32 -1904) + tail call void @llvm.riscv.esp.ldqa.u16.128.xp(i32 14, i32 6) + tail call void @llvm.riscv.esp.ldqa.u8.128.ip(i32 3, i32 1216) + tail call void @llvm.riscv.esp.ldqa.u8.128.xp(i32 6, i32 2) + tail call void @llvm.riscv.esp.vldbc.16.ip(i32 9, i32 -448, i32 7) + tail call void @llvm.riscv.esp.vldbc.16.xp(i32 9, i32 5, i32 3) + tail call void @llvm.riscv.esp.vldbc.32.ip(i32 0, i32 220, i32 3) + tail call void @llvm.riscv.esp.vldbc.32.xp(i32 2, i32 12, i32 7) + tail call void @llvm.riscv.esp.vldbc.8.ip(i32 12, i32 396, i32 2) + tail call void @llvm.riscv.esp.vldbc.8.xp(i32 9, i32 4, i32 7) + tail call void @llvm.riscv.esp.vldext.s16.ip(i32 13, i32 16, i32 7, i32 4) + tail call void @llvm.riscv.esp.vldext.s16.xp(i32 8, i32 0, i32 5, i32 0) + tail call void @llvm.riscv.esp.vldext.s8.ip(i32 4, i32 80, i32 3, i32 6) + tail call void @llvm.riscv.esp.vldext.s8.xp(i32 6, i32 3, i32 1, i32 1) + tail call void @llvm.riscv.esp.vldext.u16.ip(i32 14, i32 48, i32 2, i32 5) + tail call void @llvm.riscv.esp.vldext.u16.xp(i32 12, i32 7, i32 2, i32 0) + tail call void @llvm.riscv.esp.vldext.u8.ip(i32 13, i32 64, i32 7, i32 2) + tail call void @llvm.riscv.esp.vldext.u8.xp(i32 6, i32 6, i32 7, i32 2) + tail call void @llvm.riscv.esp.vldhbc.16.incp(i32 1, i32 4, i32 7) + tail call void @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32 6, i32 512) + tail call void @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32 5, i32 -784) + tail call void @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32 10, i32 -800) + tail call void @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32 10, i32 -1952) + tail call void @llvm.riscv.esp.ld.ua.state.ip(i32 8, i32 -752) + tail call void @llvm.riscv.esp.ldxq.32(i32 0, i32 4, i32 2, i32 4, i32 7) + tail call void @llvm.riscv.esp.st.qacc.h.h.128.ip(i32 13, i32 -336) + tail call void @llvm.riscv.esp.st.qacc.h.l.128.ip(i32 8, i32 1568) + tail call void @llvm.riscv.esp.st.qacc.l.h.128.ip(i32 4, i32 16) + tail call void @llvm.riscv.esp.st.qacc.l.l.128.ip(i32 8, i32 416) + tail call void @llvm.riscv.esp.st.ua.state.ip(i32 7, i32 -1360) + tail call void @llvm.riscv.esp.stxq.32(i32 6, i32 6, i32 0, i32 2, i32 5) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 3) + tail call void @llvm.riscv.esp.vld.128.xp(i32 9, i32 7, i32 3) + tail call void @llvm.riscv.esp.vld.h.64.ip(i32 0, i32 -352, i32 0) + tail call void @llvm.riscv.esp.vld.h.64.xp(i32 14, i32 2, i32 2) + tail call void @llvm.riscv.esp.vld.l.64.ip(i32 6, i32 56, i32 2) + tail call void @llvm.riscv.esp.vld.l.64.xp(i32 1, i32 9, i32 5) + tail call void @llvm.riscv.esp.vst.128.ip(i32 5, i32 6, i32 -960) + tail call void @llvm.riscv.esp.vst.128.xp(i32 7, i32 6, i32 13) + tail call void @llvm.riscv.esp.vst.h.64.ip(i32 7, i32 4, i32 944) + tail call void @llvm.riscv.esp.vst.h.64.xp(i32 10, i32 7, i32 7) + tail call void @llvm.riscv.esp.vst.l.64.ip(i32 5, i32 3, i32 984) + tail call void @llvm.riscv.esp.vst.l.64.xp(i32 12, i32 5, i32 3) + tail call void @llvm.riscv.esp.slci.2q(i32 1, i32 5, i32 12) + tail call void @llvm.riscv.esp.slcxxp.2q(i32 5, i32 5, i32 2, i32 3) + tail call void @llvm.riscv.esp.src.q(i32 3, i32 1, i32 2) + tail call void @llvm.riscv.esp.src.q.ld.ip(i32 5, i32 0, i32 5, i32 -272, i32 0) + tail call void @llvm.riscv.esp.src.q.ld.xp(i32 3, i32 6, i32 12, i32 7, i32 1) + tail call void @llvm.riscv.esp.src.q.qup(i32 4, i32 7, i32 3) + tail call void @llvm.riscv.esp.srci.2q(i32 2, i32 3, i32 7) + tail call void @llvm.riscv.esp.srcmb.s16.q.qacc(i32 4, i32 0, i32 2) + tail call void @llvm.riscv.esp.srcmb.s16.qacc(i32 13, i32 1, i32 5) + tail call void @llvm.riscv.esp.srcmb.s8.q.qacc(i32 4, i32 0, i32 5) + tail call void @llvm.riscv.esp.srcmb.s8.qacc(i32 8, i32 1, i32 1) + tail call void @llvm.riscv.esp.srcmb.u16.q.qacc(i32 3, i32 1, i32 0) + tail call void @llvm.riscv.esp.srcmb.u16.qacc(i32 13, i32 0, i32 7) + tail call void @llvm.riscv.esp.srcmb.u8.q.qacc(i32 5, i32 1, i32 3) + tail call void @llvm.riscv.esp.srcmb.u8.qacc(i32 5, i32 0, i32 0) + tail call void @llvm.riscv.esp.srcq.128.st.incp(i32 5, i32 0, i32 12) + tail call void @llvm.riscv.esp.srcxxp.2q(i32 12, i32 6, i32 4, i32 6) + tail call void @llvm.riscv.esp.srs.s.xacc(i32 6, i32 13) + tail call void @llvm.riscv.esp.srs.u.xacc(i32 3, i32 12) + tail call void @llvm.riscv.esp.vsl.32(i32 2, i32 5) + tail call void @llvm.riscv.esp.vsld.16(i32 3, i32 7, i32 3) + tail call void @llvm.riscv.esp.vsld.32(i32 7, i32 1, i32 3) + tail call void @llvm.riscv.esp.vsld.8(i32 1, i32 5, i32 0) + tail call void @llvm.riscv.esp.vsr.s32(i32 0, i32 3) + tail call void @llvm.riscv.esp.vsr.u32(i32 2, i32 1) + tail call void @llvm.riscv.esp.vsrd.16(i32 3, i32 0, i32 4) + tail call void @llvm.riscv.esp.vsrd.32(i32 6, i32 3, i32 0) + tail call void @llvm.riscv.esp.vsrd.8(i32 4, i32 1, i32 5) + tail call void @llvm.riscv.esp.st.s.xacc.ip(i32 8, i32 80) + tail call void @llvm.riscv.esp.st.u.xacc.ip(i32 6, i32 -464) + ret void +} + +declare void @llvm.riscv.esp.vcmulas.s16.qacc.h(i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.s16.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.s8.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.u16.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.u8.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s16.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s16.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s8.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s8.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u16.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u16.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u8.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u8.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.max.s16.a(i32, i32) nounwind +declare void @llvm.riscv.esp.max.s32.a(i32, i32) nounwind +declare void @llvm.riscv.esp.max.s8.a(i32, i32) nounwind +declare void @llvm.riscv.esp.max.u16.a(i32, i32) nounwind +declare void @llvm.riscv.esp.max.u32.a(i32, i32) nounwind +declare void @llvm.riscv.esp.max.u8.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.s16.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.s32.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.s8.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.u16.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.u32.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.u8.a(i32, i32) nounwind +declare void @llvm.riscv.esp.vabs.16(i32, i32) nounwind +declare void @llvm.riscv.esp.vabs.32(i32, i32) nounwind +declare void @llvm.riscv.esp.vabs.8(i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vclamp.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s16.s8xs8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s32.s16xs16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vprelu.s16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vprelu.s8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vrelu.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vrelu.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsadds.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsadds.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsadds.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsadds.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.s16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.s32(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.s8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.u16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.u32(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.u8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vssubs.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vssubs.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vssubs.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vssubs.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.addx2(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.addx4(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.sat(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.subx2(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.subx4(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.andq(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.notq(i32, i32) nounwind +declare void @llvm.riscv.esp.orq(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.xorq(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.mov.s16.qacc(i32) nounwind +declare void @llvm.riscv.esp.mov.s8.qacc(i32) nounwind +declare void @llvm.riscv.esp.mov.u16.qacc(i32) nounwind +declare void @llvm.riscv.esp.mov.u8.qacc(i32) nounwind +declare void @llvm.riscv.esp.movi.16.a(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movi.16.q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movi.32.a(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movi.32.q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movi.8.a(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movi.8.q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movx.r.cfg(i32) nounwind +declare void @llvm.riscv.esp.movx.r.fft.bit.width(i32) nounwind +declare void @llvm.riscv.esp.movx.r.perf(i32, i32) nounwind +declare void @llvm.riscv.esp.movx.r.sar(i32) nounwind +declare void @llvm.riscv.esp.movx.r.sar.bytes(i32) nounwind +declare void @llvm.riscv.esp.movx.r.xacc.h(i32) nounwind +declare void @llvm.riscv.esp.movx.r.xacc.l(i32) nounwind +declare void @llvm.riscv.esp.movx.w.cfg(i32) nounwind +declare void @llvm.riscv.esp.movx.w.fft.bit.width(i32) nounwind +declare void @llvm.riscv.esp.movx.w.perf(i32) nounwind +declare void @llvm.riscv.esp.movx.w.sar(i32) nounwind +declare void @llvm.riscv.esp.movx.w.sar.bytes(i32) nounwind +declare void @llvm.riscv.esp.movx.w.xacc.h(i32) nounwind +declare void @llvm.riscv.esp.movx.w.xacc.l(i32) nounwind +declare void @llvm.riscv.esp.vext.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vext.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vext.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vext.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vunzip.16(i32, i32) nounwind +declare void @llvm.riscv.esp.vunzip.32(i32, i32) nounwind +declare void @llvm.riscv.esp.vunzip.8(i32, i32) nounwind +declare void @llvm.riscv.esp.vunzipt.16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vunzipt.8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vzip.16(i32, i32) nounwind +declare void @llvm.riscv.esp.vzip.32(i32, i32) nounwind +declare void @llvm.riscv.esp.vzip.8(i32, i32) nounwind +declare void @llvm.riscv.esp.vzipt.16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vzipt.8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.zero.q(i32) nounwind +declare void @llvm.riscv.esp.zero.qacc() nounwind +declare void @llvm.riscv.esp.zero.xacc() nounwind +declare void @llvm.riscv.esp.fft.ams.s16.ld.incp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.ams.s16.st.incp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.bitrev(i32, i32) nounwind +declare void @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.cmul.s16.st.xp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.r2bf.s16(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.vst.r32.decp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.ld.128.usar.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.ld.128.usar.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.ld.xacc.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.s16.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.s16.128.xp(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.s8.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.s8.128.xp(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.u16.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.u16.128.xp(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.u8.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.u8.128.xp(i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.16.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.16.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.32.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.32.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.8.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.8.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.s16.ip(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.s16.xp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.s8.ip(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.s8.xp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.u16.ip(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.u16.xp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.u8.ip(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.u8.xp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldhbc.16.incp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ld.ua.state.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldxq.32(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.st.qacc.h.h.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.st.qacc.h.l.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.st.qacc.l.h.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.st.qacc.l.l.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.st.ua.state.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.stxq.32(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.128.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.128.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.h.64.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.h.64.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.l.64.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.l.64.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.128.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.128.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.h.64.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.h.64.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.l.64.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.l.64.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.slci.2q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.slcxxp.2q(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.src.q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.src.q.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.src.q.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.src.q.qup(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srci.2q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.s16.q.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.s16.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.s8.q.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.s8.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.u16.q.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.u16.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.u8.q.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.u8.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcq.128.st.incp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcxxp.2q(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srs.s.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.srs.u.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vsl.32(i32, i32) nounwind +declare void @llvm.riscv.esp.vsld.16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsld.32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsld.8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsr.s32(i32, i32) nounwind +declare void @llvm.riscv.esp.vsr.u32(i32, i32) nounwind +declare void @llvm.riscv.esp.vsrd.16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsrd.32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsrd.8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.st.s.xacc.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.st.u.xacc.ip(i32, i32) nounwind diff --git a/llvm/test/MC/RISCV/esp32p4-hwlp-valid.s b/llvm/test/MC/RISCV/esp32p4-hwlp-valid.s new file mode 100644 index 0000000000000..a9205f3b590a8 --- /dev/null +++ b/llvm/test/MC/RISCV/esp32p4-hwlp-valid.s @@ -0,0 +1,22 @@ +# RUN: llvm-mc %s -triple=riscv32 -mcpu=esp32p4 -show-encoding | FileCheck -check-prefixes=CHECK %s + +dl_hwlp_test: +# CHECK: dl_hwlp_test: + esp.lp.setup 0, a1, loop_last_instruction +# CHECK: esp.lp.setup 0, a1, loop_last_instruction # encoding: [0x2b'A',0xc0'A',0x05'A',A] + esp.lp.starti 0, loop_last_instruction +# CHECK: esp.lp.starti 0, loop_last_instruction # encoding: [0x2b'A',A,A,A] + esp.lp.counti 0, 4000 +# CHECK: esp.lp.counti 0, 4000 # encoding: [0x2b,0x30,0x00,0xfa] + esp.lp.count 0, a1 +# CHECK: esp.lp.count 0, a1 # encoding: [0x2b,0xa0,0x05,0x00] + esp.lp.setupi 0, 1234, loop_last_instruction +# CHECK: esp.lp.setupi 0, 1234, loop_last_instruction # encoding: [0x2b'A',0x50'A',0x20'A',0x4d'A'] + # lp.setup 0, a1, loop_last_instruction +# CHECK: # fixup A - offset: 0, value: loop_last_instruction, kind: fixup_riscv_branch + loop_last_instruction: +# CHECK: loop_last_instruction: + addi a0, a0, 1 +# CHECK: addi a0, a0, 1 # encoding: [0x05,0x05] + ret +# CHECK: ret # encoding: [0x82,0x80] diff --git a/llvm/test/MC/RISCV/esp32p4-valid.s b/llvm/test/MC/RISCV/esp32p4-valid.s new file mode 100644 index 0000000000000..d64dadffb322f --- /dev/null +++ b/llvm/test/MC/RISCV/esp32p4-valid.s @@ -0,0 +1,710 @@ +# RUN: llvm-mc %s -triple=riscv32 -mcpu=esp32p4 -show-encoding | FileCheck -check-prefixes=CHECK %s + +esp.vcmulas.s16.qacc.h q2, q2 +# CHECK: esp.vcmulas.s16.qacc.h q2, q2 # encoding: [0x5f,0x80,0x87,0x4a] +esp.vcmulas.s16.qacc.h.ld.ip q5, a3, -16, q0, q4 +# CHECK: esp.vcmulas.s16.qacc.h.ld.ip q5, a3, -16, q0, q4 # encoding: [0x3b,0xf7,0xfa,0x13] +esp.vcmulas.s16.qacc.h.ld.xp q0, a4, a5, q2, q3 +# CHECK: esp.vcmulas.s16.qacc.h.ld.xp q0, a4, a5, q2, q3 # encoding: [0x7f,0x20,0x73,0x4f] +esp.vcmulas.s16.qacc.l q3, q5 +# CHECK: esp.vcmulas.s16.qacc.l q3, q5 # encoding: [0x5f,0x80,0x83,0x76] +esp.vcmulas.s16.qacc.l.ld.ip q6, a1, -32, q6, q5 +# CHECK: esp.vcmulas.s16.qacc.l.ld.ip q6, a1, -32, q6, q5 # encoding: [0x3b,0xfb,0xf1,0xd5] +esp.vcmulas.s16.qacc.l.ld.xp q6, a2, a0, q4, q5 +# CHECK: esp.vcmulas.s16.qacc.l.ld.xp q6, a2, a0, q4, q5 # encoding: [0x7f,0x38,0x22,0x95] +esp.vcmulas.s8.qacc.h q4, q2 +# CHECK: esp.vcmulas.s8.qacc.h q4, q2 # encoding: [0x5f,0x80,0x85,0x8a] +esp.vcmulas.s8.qacc.h.ld.ip q5, a1, 96, q3, q4 +# CHECK: esp.vcmulas.s8.qacc.h.ld.ip q5, a1, 96, q3, q4 # encoding: [0x3b,0xf7,0xb1,0x72] +esp.vcmulas.s8.qacc.h.ld.xp q6, a1, a3, q1, q6 +# CHECK: esp.vcmulas.s8.qacc.h.ld.xp q6, a1, a3, q1, q6 # encoding: [0x7f,0xb8,0x51,0x3a] +esp.vcmulas.s8.qacc.l q0, q2 +# CHECK: esp.vcmulas.s8.qacc.l q0, q2 # encoding: [0x5f,0x80,0x81,0x0a] +esp.vcmulas.s8.qacc.l.ld.ip q4, a2, -128, q0, q2 +# CHECK: esp.vcmulas.s8.qacc.l.ld.ip q4, a2, -128, q0, q2 # encoding: [0x3b,0x73,0xc2,0x08] +esp.vcmulas.s8.qacc.l.ld.xp q0, a5, a1, q2, q2 +# CHECK: esp.vcmulas.s8.qacc.l.ld.xp q0, a5, a1, q2, q2 # encoding: [0x7f,0xa0,0x33,0x48] +esp.vmulas.s16.qacc q2, q3 +# CHECK: esp.vmulas.s16.qacc q2, q3 # encoding: [0x5f,0x00,0xc7,0x4e] +esp.vmulas.s16.qacc.ld.ip q1, a5, -112, q1, q4 +# CHECK: esp.vmulas.s16.qacc.ld.ip q1, a5, -112, q1, q4 # encoding: [0xbb,0xe6,0xe3,0x32] +esp.vmulas.s16.qacc.ld.xp q4, a4, a1, q6, q3 +# CHECK: esp.vmulas.s16.qacc.ld.xp q4, a4, a1, q6, q3 # encoding: [0xff,0x32,0x3b,0xce] +esp.vmulas.s16.qacc.st.ip q6, a5, -96, q5, q2 +# CHECK: esp.vmulas.s16.qacc.st.ip q6, a5, -96, q5, q2 # encoding: [0xbb,0xf8,0xeb,0xab] +esp.vmulas.s16.qacc.st.xp q4, a2, a5, q3, q1 +# CHECK: esp.vmulas.s16.qacc.st.xp q4, a2, a5, q3, q1 # encoding: [0xff,0x32,0x7a,0x67] +esp.vmulas.s16.xacc q5, q2 +# CHECK: esp.vmulas.s16.xacc q5, q2 # encoding: [0x5f,0x00,0xc3,0xaa] +esp.vmulas.s16.xacc.ld.ip q4, a3, 48, q3, q6 +# CHECK: esp.vmulas.s16.xacc.ld.ip q4, a3, 48, q3, q6 # encoding: [0xbb,0xf2,0xca,0x78] +esp.vmulas.s16.xacc.ld.xp q6, a2, a5, q0, q0 +# CHECK: esp.vmulas.s16.xacc.ld.xp q6, a2, a5, q0, q0 # encoding: [0xff,0x3a,0x7a,0x00] +esp.vmulas.s16.xacc.st.ip q1, a1, 16, q4, q3 +# CHECK: esp.vmulas.s16.xacc.st.ip q1, a1, 16, q4, q3 # encoding: [0xbb,0xe6,0xc1,0x8d] +esp.vmulas.s16.xacc.st.xp q2, a4, a0, q5, q0 +# CHECK: esp.vmulas.s16.xacc.st.xp q2, a4, a0, q5, q0 # encoding: [0xff,0x2a,0x2b,0xa1] +esp.vmulas.s8.qacc q4, q6 +# CHECK: esp.vmulas.s8.qacc q4, q6 # encoding: [0x5f,0x00,0xc5,0x9a] +esp.vmulas.s8.qacc.ld.ip q5, a2, 64, q5, q0 +# CHECK: esp.vmulas.s8.qacc.ld.ip q5, a2, 64, q5, q0 # encoding: [0xbb,0x74,0x52,0xa2] +esp.vmulas.s8.qacc.ld.xp q1, a1, a3, q5, q0 +# CHECK: esp.vmulas.s8.qacc.ld.xp q1, a1, a3, q5, q0 # encoding: [0xff,0xa6,0x51,0xa2] +esp.vmulas.s8.qacc.st.ip q5, a3, 16, q5, q1 +# CHECK: esp.vmulas.s8.qacc.st.ip q5, a3, 16, q5, q1 # encoding: [0xbb,0xf6,0x42,0xa7] +esp.vmulas.s8.qacc.st.xp q5, a1, a4, q4, q0 +# CHECK: esp.vmulas.s8.qacc.st.xp q5, a1, a4, q4, q0 # encoding: [0xff,0xb6,0x61,0x83] +esp.vmulas.s8.xacc q1, q0 +# CHECK: esp.vmulas.s8.xacc q1, q0 # encoding: [0x5f,0x00,0xc1,0x22] +esp.vmulas.s8.xacc.ld.ip q0, a4, 16, q4, q4 +# CHECK: esp.vmulas.s8.xacc.ld.ip q0, a4, 16, q4, q4 # encoding: [0xbb,0x62,0x43,0x90] +esp.vmulas.s8.xacc.ld.xp q0, a5, a2, q4, q2 +# CHECK: esp.vmulas.s8.xacc.ld.xp q0, a5, a2, q4, q2 # encoding: [0xff,0xa2,0x43,0x88] +esp.vmulas.s8.xacc.st.ip q3, a3, -32, q0, q5 +# CHECK: esp.vmulas.s8.xacc.st.ip q3, a3, -32, q0, q5 # encoding: [0xbb,0xec,0x7a,0x15] +esp.vmulas.s8.xacc.st.xp q0, a5, a4, q3, q3 +# CHECK: esp.vmulas.s8.xacc.st.xp q0, a5, a4, q3, q3 # encoding: [0xff,0xa2,0x63,0x6d] +esp.vmulas.u16.qacc q2, q0 +# CHECK: esp.vmulas.u16.qacc q2, q0 # encoding: [0x5f,0x00,0xc6,0x42] +esp.vmulas.u16.qacc.ld.ip q5, a4, 80, q1, q3 +# CHECK: esp.vmulas.u16.qacc.ld.ip q5, a4, 80, q1, q3 # encoding: [0xbb,0x76,0x93,0x2e] +esp.vmulas.u16.qacc.ld.xp q5, a5, a3, q6, q3 +# CHECK: esp.vmulas.u16.qacc.ld.xp q5, a5, a3, q6, q3 # encoding: [0xff,0xb4,0x5b,0xce] +esp.vmulas.u16.qacc.st.ip q5, a3, -80, q2, q0 +# CHECK: esp.vmulas.u16.qacc.st.ip q5, a3, -80, q2, q0 # encoding: [0xbb,0xf6,0xaa,0x43] +esp.vmulas.u16.qacc.st.xp q0, a2, a4, q5, q5 +# CHECK: esp.vmulas.u16.qacc.st.xp q0, a2, a4, q5, q5 # encoding: [0xff,0x20,0x6a,0xb7] +esp.vmulas.u16.xacc q2, q6 +# CHECK: esp.vmulas.u16.xacc q2, q6 # encoding: [0x5f,0x00,0xc2,0x5a] +esp.vmulas.u16.xacc.ld.ip q1, a3, 16, q5, q2 +# CHECK: esp.vmulas.u16.xacc.ld.ip q1, a3, 16, q5, q2 # encoding: [0xbb,0xe6,0x82,0xa8] +esp.vmulas.u16.xacc.ld.xp q2, a3, a4, q1, q3 +# CHECK: esp.vmulas.u16.xacc.ld.xp q2, a3, a4, q1, q3 # encoding: [0xff,0xa8,0x6a,0x2c] +esp.vmulas.u16.xacc.st.ip q3, a4, -112, q1, q3 +# CHECK: esp.vmulas.u16.xacc.st.ip q3, a4, -112, q1, q3 # encoding: [0xbb,0x6e,0xa3,0x2d] +esp.vmulas.u16.xacc.st.xp q4, a0, a2, q0, q3 +# CHECK: esp.vmulas.u16.xacc.st.xp q4, a0, a2, q0, q3 # encoding: [0xff,0x30,0x49,0x0d] +esp.vmulas.u8.qacc q6, q1 +# CHECK: esp.vmulas.u8.qacc q6, q1 # encoding: [0x5f,0x00,0xc4,0xc6] +esp.vmulas.u8.qacc.ld.ip q0, a4, -80, q0, q3 +# CHECK: esp.vmulas.u8.qacc.ld.ip q0, a4, -80, q0, q3 # encoding: [0xbb,0x62,0x2b,0x0e] +esp.vmulas.u8.qacc.ld.xp q0, a3, a0, q1, q3 +# CHECK: esp.vmulas.u8.qacc.ld.xp q0, a3, a0, q1, q3 # encoding: [0xff,0xa0,0x22,0x2e] +esp.vmulas.u8.qacc.st.ip q2, a3, 64, q0, q0 +# CHECK: esp.vmulas.u8.qacc.st.ip q2, a3, 64, q0, q0 # encoding: [0xbb,0xe8,0x12,0x03] +esp.vmulas.u8.qacc.st.xp q6, a2, a2, q3, q1 +# CHECK: esp.vmulas.u8.qacc.st.xp q6, a2, a2, q3, q1 # encoding: [0xff,0x38,0x42,0x67] +esp.vmulas.u8.xacc q3, q3 +# CHECK: esp.vmulas.u8.xacc q3, q3 # encoding: [0x5f,0x00,0xc0,0x6e] +esp.vmulas.u8.xacc.ld.ip q0, a5, 16, q0, q1 +# CHECK: esp.vmulas.u8.xacc.ld.ip q0, a5, 16, q0, q1 # encoding: [0xbb,0xe2,0x03,0x04] +esp.vmulas.u8.xacc.ld.xp q4, a0, a2, q1, q5 +# CHECK: esp.vmulas.u8.xacc.ld.xp q4, a0, a2, q1, q5 # encoding: [0xff,0x30,0x41,0x34] +esp.vmulas.u8.xacc.st.ip q4, a1, -48, q4, q6 +# CHECK: esp.vmulas.u8.xacc.st.ip q4, a1, -48, q4, q6 # encoding: [0xbb,0xf2,0x31,0x99] +esp.vmulas.u8.xacc.st.xp q4, a2, a3, q0, q2 +# CHECK: esp.vmulas.u8.xacc.st.xp q4, a2, a3, q0, q2 # encoding: [0xff,0x30,0x52,0x09] +esp.vmulas.s16.qacc.ldbc.incp q5, a2, q0, q6 +# CHECK: esp.vmulas.s16.qacc.ldbc.incp q5, a2, q0, q6 # encoding: [0xbb,0x75,0x62,0x18] +esp.vmulas.s8.qacc.ldbc.incp q3, a5, q4, q2 +# CHECK: esp.vmulas.s8.qacc.ldbc.incp q3, a5, q4, q2 # encoding: [0xbb,0xed,0x23,0x88] +esp.vmulas.u16.qacc.ldbc.incp q2, a1, q1, q3 +# CHECK: esp.vmulas.u16.qacc.ldbc.incp q2, a1, q1, q3 # encoding: [0xbb,0xe9,0x41,0x2c] +esp.vmulas.u8.qacc.ldbc.incp q0, a1, q2, q0 +# CHECK: esp.vmulas.u8.qacc.ldbc.incp q0, a1, q2, q0 # encoding: [0xbb,0xe1,0x01,0x40] +esp.vsmulas.s16.qacc q0, q5, 5 +# CHECK: esp.vsmulas.s16.qacc q0, q5, 5 # encoding: [0x5f,0x80,0xf2,0x16] +esp.vsmulas.s16.qacc.ld.incp q1, a2, q4, q2, 4 +# CHECK: esp.vsmulas.s16.qacc.ld.incp q1, a2, q4, q2, 4 # encoding: [0xbb,0x67,0xa2,0x8b] +esp.vsmulas.s8.qacc q2, q1, 13 +# CHECK: esp.vsmulas.s8.qacc q2, q1, 13 # encoding: [0x5f,0x80,0xb6,0x46] +esp.vsmulas.s8.qacc.ld.incp q2, a5, q1, q3, 0 +# CHECK: esp.vsmulas.s8.qacc.ld.incp q2, a5, q1, q3, 0 # encoding: [0xbb,0xeb,0x83,0x2d] +esp.vsmulas.u16.qacc q6, q1, 5 +# CHECK: esp.vsmulas.u16.qacc q6, q1, 5 # encoding: [0x5f,0x80,0xd2,0xc6] +esp.vsmulas.u16.qacc.ld.incp q0, a0, q6, q6, 0 +# CHECK: esp.vsmulas.u16.qacc.ld.incp q0, a0, q6, q6, 0 # encoding: [0xbb,0x63,0x81,0xda] +esp.vsmulas.u8.qacc q0, q3, 7 +# CHECK: esp.vsmulas.u8.qacc q0, q3, 7 # encoding: [0x5f,0x80,0x93,0x0e] +esp.vsmulas.u8.qacc.ld.incp q6, a0, q6, q5, 8 +# CHECK: esp.vsmulas.u8.qacc.ld.incp q6, a0, q6, q5, 8 # encoding: [0xbb,0x7b,0xc1,0xd4] +esp.cmul.s16 q0, q2, q4, 3 +# CHECK: esp.cmul.s16 q0, q2, q4, 3 # encoding: [0x5f,0xa4,0x07,0x50] +esp.cmul.s16.ld.incp q6, a4, q1, q1, q5, 0 +# CHECK: esp.cmul.s16.ld.incp q6, a4, q1, q1, q5, 0 # encoding: [0xbf,0x58,0xc3,0x34] +esp.cmul.s16.st.incp q4, a0, q0, q5, q0, 0 +# CHECK: esp.cmul.s16.st.incp q4, a0, q0, q5, q0, 0 # encoding: [0x3f,0x50,0xc1,0xa2] +esp.cmul.s8 q6, q1, q6, 2 +# CHECK: esp.cmul.s8 q6, q1, q6, 2 # encoding: [0x5f,0x27,0x03,0x38] +esp.cmul.s8.ld.incp q4, a3, q0, q4, q2, 3 +# CHECK: esp.cmul.s8.ld.incp q4, a3, q0, q4, q2, 3 # encoding: [0x3f,0xd0,0x72,0x88] +esp.cmul.s8.st.incp q5, a1, q5, q2, q0, 3 +# CHECK: esp.cmul.s8.st.incp q5, a1, q5, q2, q0, 3 # encoding: [0xbf,0xd6,0x71,0x42] +esp.cmul.u16 q2, q3, q5, 1 +# CHECK: esp.cmul.u16 q2, q3, q5, 1 # encoding: [0x5f,0xa5,0x04,0x74] +esp.cmul.u16.ld.incp q1, a1, q6, q5, q1, 0 +# CHECK: esp.cmul.u16.ld.incp q1, a1, q6, q5, q1, 0 # encoding: [0x3f,0xc7,0x81,0xa4] +esp.cmul.u16.st.incp q6, a1, q2, q2, q4, 0 +# CHECK: esp.cmul.u16.st.incp q6, a1, q2, q2, q4, 0 # encoding: [0x3f,0xd9,0x81,0x52] +esp.cmul.u8 q1, q4, q3, 3 +# CHECK: esp.cmul.u8 q1, q4, q3, 3 # encoding: [0xdf,0xa4,0x01,0x8c] +esp.cmul.u8.ld.incp q2, a5, q3, q0, q5, 1 +# CHECK: esp.cmul.u8.ld.incp q2, a5, q3, q0, q5, 1 # encoding: [0xbf,0xc9,0x13,0x14] +esp.cmul.u8.st.incp q4, a2, q0, q4, q4, 0 +# CHECK: esp.cmul.u8.st.incp q4, a2, q0, q4, q4, 0 # encoding: [0x3f,0x50,0x02,0x92] +esp.max.s16.a q5, a2 +# CHECK: esp.max.s16.a q5, a2 # encoding: [0x5b,0x52,0xc8,0x91] +esp.max.s32.a q5, a5 +# CHECK: esp.max.s32.a q5, a5 # encoding: [0xdb,0x53,0xa8,0x91] +esp.max.s8.a q2, a0 +# CHECK: esp.max.s8.a q2, a0 # encoding: [0x5b,0x51,0x40,0x92] +esp.max.u16.a q6, a3 +# CHECK: esp.max.u16.a q6, a3 # encoding: [0xdb,0x52,0x88,0x92] +esp.max.u32.a q0, a2 +# CHECK: esp.max.u32.a q0, a2 # encoding: [0x5b,0x52,0x20,0x90] +esp.max.u8.a q5, a0 +# CHECK: esp.max.u8.a q5, a0 # encoding: [0x5b,0x51,0x08,0x91] +esp.min.s16.a q0, a3 +# CHECK: esp.min.s16.a q0, a3 # encoding: [0xdb,0x52,0xd0,0x90] +esp.min.s32.a q1, a1 +# CHECK: esp.min.s32.a q1, a1 # encoding: [0xdb,0x51,0xb0,0x91] +esp.min.s8.a q3, a4 +# CHECK: esp.min.s8.a q3, a4 # encoding: [0x5b,0x53,0x50,0x93] +esp.min.u16.a q6, a0 +# CHECK: esp.min.u16.a q6, a0 # encoding: [0x5b,0x51,0x98,0x92] +esp.min.u32.a q2, a3 +# CHECK: esp.min.u32.a q2, a3 # encoding: [0xdb,0x52,0x30,0x92] +esp.min.u8.a q1, a3 +# CHECK: esp.min.u8.a q1, a3 # encoding: [0xdb,0x52,0x10,0x91] +esp.vabs.16 q5, q2 +# CHECK: esp.vabs.16 q5, q2 # encoding: [0x5b,0x10,0x50,0x88] +esp.vabs.32 q0, q3 +# CHECK: esp.vabs.32 q0, q3 # encoding: [0x5b,0x08,0x00,0x8c] +esp.vabs.8 q6, q1 +# CHECK: esp.vabs.8 q6, q1 # encoding: [0x5b,0x00,0x60,0x84] +esp.vadd.s16 q1, q0, q3 +# CHECK: esp.vadd.s16 q1, q0, q3 # encoding: [0x5f,0x06,0x94,0x0e] +esp.vadd.s16.ld.incp q2, a3, q4, q3, q1 +# CHECK: esp.vadd.s16.ld.incp q2, a3, q4, q3, q1 # encoding: [0x3b,0xe8,0x4a,0x65] +esp.vadd.s16.st.incp q6, a2, q1, q3, q1 +# CHECK: esp.vadd.s16.st.incp q6, a2, q1, q3, q1 # encoding: [0x3b,0x78,0x1a,0x67] +esp.vadd.s32 q2, q5, q3 +# CHECK: esp.vadd.s32 q2, q5, q3 # encoding: [0x5f,0x05,0xa4,0xae] +esp.vadd.s32.ld.incp q5, a0, q0, q6, q2 +# CHECK: esp.vadd.s32.ld.incp q5, a0, q0, q6, q2 # encoding: [0x3b,0x75,0x01,0xc9] +esp.vadd.s32.st.incp q5, a0, q2, q6, q1 +# CHECK: esp.vadd.s32.st.incp q5, a0, q2, q6, q1 # encoding: [0x3b,0x75,0x21,0xc7] +esp.vadd.s8 q6, q4, q1 +# CHECK: esp.vadd.s8 q6, q4, q1 # encoding: [0x5f,0x06,0xe0,0x86] +esp.vadd.s8.ld.incp q6, a0, q3, q0, q4 +# CHECK: esp.vadd.s8.ld.incp q6, a0, q3, q0, q4 # encoding: [0x3b,0x78,0x39,0x10] +esp.vadd.s8.st.incp q0, a5, q0, q0, q4 +# CHECK: esp.vadd.s8.st.incp q0, a5, q0, q0, q4 # encoding: [0x3b,0xe0,0x0b,0x12] +esp.vadd.u16 q3, q5, q0 +# CHECK: esp.vadd.u16 q3, q5, q0 # encoding: [0x5f,0x04,0xb4,0xa2] +esp.vadd.u16.ld.incp q6, a1, q4, q6, q4 +# CHECK: esp.vadd.u16.ld.incp q6, a1, q4, q6, q4 # encoding: [0x3b,0xf8,0x41,0xd1] +esp.vadd.u16.st.incp q5, a2, q4, q3, q5 +# CHECK: esp.vadd.u16.st.incp q5, a2, q4, q3, q5 # encoding: [0x3b,0x74,0x42,0x77] +esp.vadd.u32 q5, q2, q2 +# CHECK: esp.vadd.u32 q5, q2, q2 # encoding: [0x5f,0x05,0xd0,0x4a] +esp.vadd.u32.ld.incp q3, a0, q6, q1, q2 +# CHECK: esp.vadd.u32.ld.incp q3, a0, q6, q1, q2 # encoding: [0x3b,0x6d,0x61,0x28] +esp.vadd.u32.st.incp q1, a1, q2, q6, q6 +# CHECK: esp.vadd.u32.st.incp q1, a1, q2, q6, q6 # encoding: [0x3b,0xe5,0x21,0xda] +esp.vadd.u8 q0, q0, q3 +# CHECK: esp.vadd.u8 q0, q0, q3 # encoding: [0x5f,0x04,0x80,0x0e] +esp.vadd.u8.ld.incp q3, a1, q0, q0, q1 +# CHECK: esp.vadd.u8.ld.incp q3, a1, q0, q0, q1 # encoding: [0x3b,0xec,0x01,0x04] +esp.vadd.u8.st.incp q1, a1, q0, q4, q6 +# CHECK: esp.vadd.u8.st.incp q1, a1, q0, q4, q6 # encoding: [0x3b,0xe4,0x01,0x9a] +esp.vclamp.s16 q0, q5, 4 +# CHECK: esp.vclamp.s16 q0, q5, 4 # encoding: [0x5b,0x50,0x00,0xa1] +esp.vmax.s16 q4, q1, q2 +# CHECK: esp.vmax.s16 q4, q1, q2 # encoding: [0x5f,0xae,0x06,0x28] +esp.vmax.s16.ld.incp q3, a5, q3, q4, q4 +# CHECK: esp.vmax.s16.ld.incp q3, a5, q3, q4, q4 # encoding: [0xbf,0xcd,0x6b,0x90] +esp.vmax.s16.st.incp q4, a4, q0, q5, q6 +# CHECK: esp.vmax.s16.st.incp q4, a4, q0, q5, q6 # encoding: [0x3f,0x50,0xeb,0xb8] +esp.vmax.s32 q4, q0, q4 +# CHECK: esp.vmax.s32 q4, q0, q4 # encoding: [0x5f,0xae,0x05,0x10] +esp.vmax.s32.ld.incp q2, a0, q2, q2, q1 +# CHECK: esp.vmax.s32.ld.incp q2, a0, q2, q2, q1 # encoding: [0x3f,0x49,0x59,0x44] +esp.vmax.s32.st.incp q4, a1, q1, q4, q2 +# CHECK: esp.vmax.s32.st.incp q4, a1, q1, q4, q2 # encoding: [0xbf,0xd0,0xd9,0x88] +esp.vmax.s8 q3, q1, q0 +# CHECK: esp.vmax.s8 q3, q1, q0 # encoding: [0xdf,0xad,0x02,0x20] +esp.vmax.s8.ld.incp q4, a3, q5, q2, q5 +# CHECK: esp.vmax.s8.ld.incp q4, a3, q5, q2, q5 # encoding: [0xbf,0xd2,0x2a,0x54] +esp.vmax.s8.st.incp q0, a3, q3, q4, q5 +# CHECK: esp.vmax.s8.st.incp q0, a3, q3, q4, q5 # encoding: [0xbf,0xc1,0xaa,0x94] +esp.vmax.u16 q6, q3, q2 +# CHECK: esp.vmax.u16 q6, q3, q2 # encoding: [0x5f,0xaf,0x04,0x68] +esp.vmax.u16.ld.incp q5, a1, q5, q5, q3 +# CHECK: esp.vmax.u16.ld.incp q5, a1, q5, q5, q3 # encoding: [0xbf,0xd6,0x49,0xac] +esp.vmax.u16.st.incp q2, a4, q3, q2, q5 +# CHECK: esp.vmax.u16.st.incp q2, a4, q3, q2, q5 # encoding: [0xbf,0x49,0xcb,0x54] +esp.vmax.u32 q1, q0, q3 +# CHECK: esp.vmax.u32 q1, q0, q3 # encoding: [0xdf,0xac,0x01,0x0c] +esp.vmax.u32.ld.incp q3, a1, q5, q0, q4 +# CHECK: esp.vmax.u32.ld.incp q3, a1, q5, q0, q4 # encoding: [0xbf,0xce,0x19,0x10] +esp.vmax.u32.st.incp q1, a3, q2, q5, q5 +# CHECK: esp.vmax.u32.st.incp q1, a3, q2, q5, q5 # encoding: [0x3f,0xc5,0x9a,0xb4] +esp.vmax.u8 q1, q4, q0 +# CHECK: esp.vmax.u8 q1, q4, q0 # encoding: [0xdf,0xac,0x00,0x80] +esp.vmax.u8.ld.incp q1, a2, q3, q4, q6 +# CHECK: esp.vmax.u8.ld.incp q1, a2, q3, q4, q6 # encoding: [0xbf,0x45,0x0a,0x98] +esp.vmax.u8.st.incp q2, a3, q5, q3, q5 +# CHECK: esp.vmax.u8.st.incp q2, a3, q5, q3, q5 # encoding: [0xbf,0xca,0x8a,0x74] +esp.vmin.s16 q5, q3, q2 +# CHECK: esp.vmin.s16 q5, q3, q2 # encoding: [0xdf,0x3e,0x06,0x68] +esp.vmin.s16.ld.incp q6, a3, q1, q4, q5 +# CHECK: esp.vmin.s16.ld.incp q6, a3, q1, q4, q5 # encoding: [0xbf,0xd8,0x6a,0x95] +esp.vmin.s16.st.incp q0, a1, q5, q0, q3 +# CHECK: esp.vmin.s16.st.incp q0, a1, q5, q0, q3 # encoding: [0xbf,0xc2,0xe9,0x0d] +esp.vmin.s32 q3, q1, q4 +# CHECK: esp.vmin.s32 q3, q1, q4 # encoding: [0xdf,0x3d,0x05,0x30] +esp.vmin.s32.ld.incp q4, a0, q1, q6, q3 +# CHECK: esp.vmin.s32.ld.incp q4, a0, q1, q6, q3 # encoding: [0xbf,0x50,0x59,0xcd] +esp.vmin.s32.st.incp q6, a5, q4, q6, q2 +# CHECK: esp.vmin.s32.st.incp q6, a5, q4, q6, q2 # encoding: [0x3f,0xda,0xdb,0xc9] +esp.vmin.s8 q3, q2, q1 +# CHECK: esp.vmin.s8 q3, q2, q1 # encoding: [0xdf,0x3d,0x02,0x44] +esp.vmin.s8.ld.incp q1, a4, q5, q5, q6 +# CHECK: esp.vmin.s8.ld.incp q1, a4, q5, q5, q6 # encoding: [0xbf,0x46,0x2b,0xb9] +esp.vmin.s8.st.incp q5, a1, q6, q2, q0 +# CHECK: esp.vmin.s8.st.incp q5, a1, q6, q2, q0 # encoding: [0x3f,0xd7,0xa9,0x41] +esp.vmin.u16 q5, q0, q1 +# CHECK: esp.vmin.u16 q5, q0, q1 # encoding: [0xdf,0x3e,0x04,0x04] +esp.vmin.u16.ld.incp q3, a5, q5, q3, q5 +# CHECK: esp.vmin.u16.ld.incp q3, a5, q5, q3, q5 # encoding: [0xbf,0xce,0x4b,0x75] +esp.vmin.u16.st.incp q2, a3, q5, q6, q3 +# CHECK: esp.vmin.u16.st.incp q2, a3, q5, q6, q3 # encoding: [0xbf,0xca,0xca,0xcd] +esp.vmin.u32 q5, q0, q2 +# CHECK: esp.vmin.u32 q5, q0, q2 # encoding: [0xdf,0x3e,0x01,0x08] +esp.vmin.u32.ld.incp q4, a5, q4, q4, q2 +# CHECK: esp.vmin.u32.ld.incp q4, a5, q4, q4, q2 # encoding: [0x3f,0xd2,0x1b,0x89] +esp.vmin.u32.st.incp q4, a2, q1, q6, q3 +# CHECK: esp.vmin.u32.st.incp q4, a2, q1, q6, q3 # encoding: [0xbf,0x50,0x9a,0xcd] +esp.vmin.u8 q0, q0, q0 +# CHECK: esp.vmin.u8 q0, q0, q0 # encoding: [0x5f,0x3c,0x00,0x00] +esp.vmin.u8.ld.incp q2, a5, q1, q0, q6 +# CHECK: esp.vmin.u8.ld.incp q2, a5, q1, q0, q6 # encoding: [0xbf,0xc8,0x0b,0x19] +esp.vmin.u8.st.incp q1, a2, q0, q1, q1 +# CHECK: esp.vmin.u8.st.incp q1, a2, q0, q1, q1 # encoding: [0x3f,0x44,0x8a,0x25] +esp.vmul.s16 q1, q2, q1 +# CHECK: esp.vmul.s16 q1, q2, q1 # encoding: [0xdf,0xbc,0x06,0x44] +esp.vmul.s16.ld.incp q4, a3, q1, q4, q2 +# CHECK: esp.vmul.s16.ld.incp q4, a3, q1, q4, q2 # encoding: [0xbf,0xd0,0x6a,0x8b] +esp.vmul.s16.s8xs8 q4, q5, q1, q1 +# CHECK: esp.vmul.s16.s8xs8 q4, q5, q1, q1 # encoding: [0x5f,0x06,0xd3,0x26] +esp.vmul.s16.st.incp q2, a3, q1, q4, q0 +# CHECK: esp.vmul.s16.st.incp q2, a3, q1, q4, q0 # encoding: [0xbf,0xc8,0xea,0x83] +esp.vmul.s32.s16xs16 q4, q5, q2, q1 +# CHECK: esp.vmul.s32.s16xs16 q4, q5, q2, q1 # encoding: [0x5f,0x06,0xd7,0x46] +esp.vmul.s8 q1, q4, q1 +# CHECK: esp.vmul.s8 q1, q4, q1 # encoding: [0xdf,0xbc,0x02,0x84] +esp.vmul.s8.ld.incp q0, a0, q4, q0, q4 +# CHECK: esp.vmul.s8.ld.incp q0, a0, q4, q0, q4 # encoding: [0x3f,0x42,0x29,0x13] +esp.vmul.s8.st.incp q4, a3, q5, q6, q6 +# CHECK: esp.vmul.s8.st.incp q4, a3, q5, q6, q6 # encoding: [0xbf,0xd2,0xaa,0xdb] +esp.vmul.u16 q6, q6, q1 +# CHECK: esp.vmul.u16 q6, q6, q1 # encoding: [0x5f,0xbf,0x04,0xc4] +esp.vmul.u16.ld.incp q1, a5, q5, q4, q6 +# CHECK: esp.vmul.u16.ld.incp q1, a5, q5, q4, q6 # encoding: [0xbf,0xc6,0x4b,0x9b] +esp.vmul.u16.st.incp q3, a4, q5, q3, q3 +# CHECK: esp.vmul.u16.st.incp q3, a4, q5, q3, q3 # encoding: [0xbf,0x4e,0xcb,0x6f] +esp.vmul.u8 q0, q1, q5 +# CHECK: esp.vmul.u8 q0, q1, q5 # encoding: [0x5f,0xbc,0x00,0x34] +esp.vmul.u8.ld.incp q5, a3, q0, q4, q5 +# CHECK: esp.vmul.u8.ld.incp q5, a3, q0, q4, q5 # encoding: [0x3f,0xd4,0x0a,0x97] +esp.vmul.u8.st.incp q5, a0, q2, q4, q5 +# CHECK: esp.vmul.u8.st.incp q5, a0, q2, q4, q5 # encoding: [0x3f,0x55,0x89,0x97] +esp.vprelu.s16 q0, q3, q2, a4 +# CHECK: esp.vprelu.s16 q0, q3, q2, a4 # encoding: [0x5f,0x60,0xa3,0x4e] +esp.vprelu.s8 q4, q5, q5, a3 +# CHECK: esp.vprelu.s8 q4, q5, q5, a3 # encoding: [0x5f,0xe2,0x22,0xb6] +esp.vrelu.s16 q1, a1, a4 +# CHECK: esp.vrelu.s16 q1, a1, a4 # encoding: [0x5b,0x5c,0x33,0x86] +esp.vrelu.s8 q6, a1, a5 +# CHECK: esp.vrelu.s8 q6, a1, a5 # encoding: [0x5b,0xd8,0x33,0x9a] +esp.vsadds.s16 q5, q6, a4 +# CHECK: esp.vsadds.s16 q5, q6, a4 # encoding: [0x5f,0x02,0xd3,0xda] +esp.vsadds.s8 q6, q1, a4 +# CHECK: esp.vsadds.s8 q6, q1, a4 # encoding: [0x5f,0x02,0xe3,0x2a] +esp.vsadds.u16 q0, q6, a2 +# CHECK: esp.vsadds.u16 q0, q6, a2 # encoding: [0x5f,0x02,0x82,0xd2] +esp.vsadds.u8 q3, q3, a1 +# CHECK: esp.vsadds.u8 q3, q3, a1 # encoding: [0x5f,0x82,0xb1,0x62] +esp.vsat.s16 q3, q6, a0, a4 +# CHECK: esp.vsat.s16 q3, q6, a0, a4 # encoding: [0xbb,0x59,0x61,0xd8] +esp.vsat.s32 q6, q4, a3, a2 +# CHECK: esp.vsat.s32 q6, q4, a3, a2 # encoding: [0x3b,0xd7,0x42,0x98] +esp.vsat.s8 q3, q2, a2, a1 +# CHECK: esp.vsat.s8 q3, q2, a2, a1 # encoding: [0xbb,0x49,0x32,0x58] +esp.vsat.u16 q5, q5, a0, a1 +# CHECK: esp.vsat.u16 q5, q5, a0, a1 # encoding: [0xbb,0x52,0x31,0xb8] +esp.vsat.u32 q6, q5, a3, a3 +# CHECK: esp.vsat.u32 q6, q5, a3, a3 # encoding: [0x3b,0xc7,0x52,0xb8] +esp.vsat.u8 q4, q1, a0, a3 +# CHECK: esp.vsat.u8 q4, q1, a0, a3 # encoding: [0x3b,0x42,0x51,0x38] +esp.vssubs.s16 q1, q6, a1 +# CHECK: esp.vssubs.s16 q1, q6, a1 # encoding: [0x5f,0x82,0x91,0xde] +esp.vssubs.s8 q6, q0, a3 +# CHECK: esp.vssubs.s8 q6, q0, a3 # encoding: [0x5f,0x82,0xe2,0x0e] +esp.vssubs.u16 q6, q1, a2 +# CHECK: esp.vssubs.u16 q6, q1, a2 # encoding: [0x5f,0x02,0xe2,0x36] +esp.vssubs.u8 q2, q4, a1 +# CHECK: esp.vssubs.u8 q2, q4, a1 # encoding: [0x5f,0x82,0xa1,0x86] +esp.vsub.s16 q3, q1, q2 +# CHECK: esp.vsub.s16 q3, q1, q2 # encoding: [0xdf,0x06,0xb4,0x2a] +esp.vsub.s16.ld.incp q5, a2, q6, q3, q5 +# CHECK: esp.vsub.s16.ld.incp q5, a2, q6, q3, q5 # encoding: [0x3b,0x75,0xea,0x75] +esp.vsub.s16.st.incp q4, a0, q1, q1, q1 +# CHECK: esp.vsub.s16.st.incp q4, a0, q1, q1, q1 # encoding: [0x3b,0x71,0x99,0x27] +esp.vsub.s32 q4, q1, q0 +# CHECK: esp.vsub.s32 q4, q1, q0 # encoding: [0xdf,0x05,0xc4,0x22] +esp.vsub.s32.ld.incp q2, a2, q5, q0, q5 +# CHECK: esp.vsub.s32.ld.incp q2, a2, q5, q0, q5 # encoding: [0x3b,0x6b,0x52,0x15] +esp.vsub.s32.st.incp q5, a5, q4, q1, q6 +# CHECK: esp.vsub.s32.st.incp q5, a5, q4, q1, q6 # encoding: [0x3b,0xf7,0x43,0x3b] +esp.vsub.s8 q2, q1, q3 +# CHECK: esp.vsub.s8 q2, q1, q3 # encoding: [0xdf,0x06,0xa0,0x2e] +esp.vsub.s8.ld.incp q4, a4, q5, q4, q0 +# CHECK: esp.vsub.s8.ld.incp q4, a4, q5, q4, q0 # encoding: [0x3b,0x71,0xdb,0x80] +esp.vsub.s8.st.incp q1, a5, q0, q1, q1 +# CHECK: esp.vsub.s8.st.incp q1, a5, q0, q1, q1 # encoding: [0x3b,0xe5,0x8b,0x26] +esp.vsub.u16 q1, q2, q1 +# CHECK: esp.vsub.u16 q1, q2, q1 # encoding: [0xdf,0x04,0x94,0x46] +esp.vsub.u16.ld.incp q6, a4, q1, q6, q1 +# CHECK: esp.vsub.u16.ld.incp q6, a4, q1, q6, q1 # encoding: [0x3b,0x79,0x93,0xc5] +esp.vsub.u16.st.incp q1, a0, q0, q1, q6 +# CHECK: esp.vsub.u16.st.incp q1, a0, q0, q1, q6 # encoding: [0x3b,0x65,0x81,0x3b] +esp.vsub.u32 q2, q2, q5 +# CHECK: esp.vsub.u32 q2, q2, q5 # encoding: [0xdf,0x05,0xa0,0x56] +esp.vsub.u32.ld.incp q4, a5, q2, q4, q3 +# CHECK: esp.vsub.u32.ld.incp q4, a5, q2, q4, q3 # encoding: [0x3b,0xf3,0x23,0x8c] +esp.vsub.u32.st.incp q0, a0, q3, q3, q1 +# CHECK: esp.vsub.u32.st.incp q0, a0, q3, q3, q1 # encoding: [0x3b,0x63,0x31,0x66] +esp.vsub.u8 q1, q4, q1 +# CHECK: esp.vsub.u8 q1, q4, q1 # encoding: [0xdf,0x04,0x90,0x86] +esp.vsub.u8.ld.incp q4, a1, q4, q5, q2 +# CHECK: esp.vsub.u8.ld.incp q4, a1, q4, q5, q2 # encoding: [0x3b,0xf1,0xc1,0xa8] +esp.vsub.u8.st.incp q6, a4, q6, q5, q5 +# CHECK: esp.vsub.u8.st.incp q6, a4, q6, q5, q5 # encoding: [0x3b,0x79,0xe3,0xb6] +esp.addx2 a1, a4, a2 +# CHECK: esp.addx2 a1, a4, a2 # encoding: [0xb3,0x05,0xc7,0x04] +esp.addx4 a1, a3, a3 +# CHECK: esp.addx4 a1, a3, a3 # encoding: [0xb3,0x85,0xd6,0x08] +esp.sat a5, a1, a2 +# CHECK: esp.sat a5, a1, a2 # encoding: [0xb3,0x25,0xf6,0x40] +esp.subx2 a0, a3, a5 +# CHECK: esp.subx2 a0, a3, a5 # encoding: [0x33,0x85,0xf6,0x44] +esp.subx4 a5, a4, a0 +# CHECK: esp.subx4 a5, a4, a0 # encoding: [0xb3,0x07,0xa7,0x48] +esp.andq q4, q2, q2 +# CHECK: esp.andq q4, q2, q2 # encoding: [0x5f,0x22,0x04,0x48] +esp.notq q0, q1 +# CHECK: esp.notq q0, q1 # encoding: [0x5f,0x20,0x06,0x20] +esp.orq q4, q3, q1 +# CHECK: esp.orq q4, q3, q1 # encoding: [0x5f,0x22,0x00,0x64] +esp.xorq q2, q1, q1 +# CHECK: esp.xorq q2, q1, q1 # encoding: [0x5f,0x21,0x02,0x24] +esp.vcmp.eq.s16 q1, q5, q1 +# CHECK: esp.vcmp.eq.s16 q1, q5, q1 # encoding: [0xdf,0xb4,0x01,0xa4] +esp.vcmp.eq.s32 q3, q3, q2 +# CHECK: esp.vcmp.eq.s32 q3, q3, q2 # encoding: [0xdf,0x2d,0x01,0x68] +esp.vcmp.eq.s8 q3, q6, q6 +# CHECK: esp.vcmp.eq.s8 q3, q6, q6 # encoding: [0xdf,0xb5,0x00,0xd8] +esp.vcmp.eq.u16 q6, q2, q5 +# CHECK: esp.vcmp.eq.u16 q6, q2, q5 # encoding: [0x5f,0x37,0x01,0x54] +esp.vcmp.eq.u32 q0, q6, q6 +# CHECK: esp.vcmp.eq.u32 q0, q6, q6 # encoding: [0x5f,0x2c,0x00,0xd8] +esp.vcmp.eq.u8 q4, q2, q4 +# CHECK: esp.vcmp.eq.u8 q4, q2, q4 # encoding: [0x5f,0x36,0x00,0x50] +esp.vcmp.gt.s16 q4, q0, q2 +# CHECK: esp.vcmp.gt.s16 q4, q0, q2 # encoding: [0x5f,0xb6,0x05,0x08] +esp.vcmp.gt.s32 q1, q1, q0 +# CHECK: esp.vcmp.gt.s32 q1, q1, q0 # encoding: [0xdf,0x2c,0x05,0x20] +esp.vcmp.gt.s8 q4, q0, q2 +# CHECK: esp.vcmp.gt.s8 q4, q0, q2 # encoding: [0x5f,0xb6,0x04,0x08] +esp.vcmp.gt.u16 q5, q6, q2 +# CHECK: esp.vcmp.gt.u16 q5, q6, q2 # encoding: [0xdf,0x36,0x05,0xc8] +esp.vcmp.gt.u32 q1, q5, q2 +# CHECK: esp.vcmp.gt.u32 q1, q5, q2 # encoding: [0xdf,0x2c,0x04,0xa8] +esp.vcmp.gt.u8 q1, q4, q4 +# CHECK: esp.vcmp.gt.u8 q1, q4, q4 # encoding: [0xdf,0x34,0x04,0x90] +esp.vcmp.lt.s16 q6, q2, q5 +# CHECK: esp.vcmp.lt.s16 q6, q2, q5 # encoding: [0x5f,0xb7,0x03,0x54] +esp.vcmp.lt.s32 q2, q3, q2 +# CHECK: esp.vcmp.lt.s32 q2, q3, q2 # encoding: [0x5f,0x2d,0x03,0x68] +esp.vcmp.lt.s8 q0, q6, q2 +# CHECK: esp.vcmp.lt.s8 q0, q6, q2 # encoding: [0x5f,0xb4,0x02,0xc8] +esp.vcmp.lt.u16 q0, q2, q5 +# CHECK: esp.vcmp.lt.u16 q0, q2, q5 # encoding: [0x5f,0x34,0x03,0x54] +esp.vcmp.lt.u32 q1, q0, q3 +# CHECK: esp.vcmp.lt.u32 q1, q0, q3 # encoding: [0xdf,0x2c,0x02,0x0c] +esp.vcmp.lt.u8 q1, q1, q4 +# CHECK: esp.vcmp.lt.u8 q1, q1, q4 # encoding: [0xdf,0x34,0x02,0x30] +esp.mov.s16.qacc q5 +# CHECK: esp.mov.s16.qacc q5 # encoding: [0x5b,0x14,0x60,0x10] +esp.mov.s8.qacc q3 +# CHECK: esp.mov.s8.qacc q3 # encoding: [0x5b,0x0c,0x20,0x10] +esp.mov.u16.qacc q6 +# CHECK: esp.mov.u16.qacc q6 # encoding: [0x5b,0x18,0x40,0x10] +esp.mov.u8.qacc q3 +# CHECK: esp.mov.u8.qacc q3 # encoding: [0x5b,0x0c,0x00,0x10] +esp.movi.16.a q5, a4, 2 +# CHECK: esp.movi.16.a q5, a4, 2 # encoding: [0x5f,0x03,0xc1,0x14] +esp.movi.16.q q1, a3, 7 +# CHECK: esp.movi.16.q q1, a3, 7 # encoding: [0xdf,0x83,0xe2,0x84] +esp.movi.32.a q6, a0, 2 +# CHECK: esp.movi.32.a q6, a0, 2 # encoding: [0x5f,0x01,0xc0,0x98] +esp.movi.32.q q6, a4, 2 +# CHECK: esp.movi.32.q q6, a4, 2 # encoding: [0x5f,0x04,0x93,0x98] +esp.movi.8.a q5, a5, 6 +# CHECK: esp.movi.8.a q5, a5, 6 # encoding: [0xdf,0x03,0x83,0x14] +esp.movi.8.q q2, a1, 15 +# CHECK: esp.movi.8.q q2, a1, 15 # encoding: [0xdf,0x87,0xa1,0x88] +esp.movx.r.cfg a4 +# CHECK: esp.movx.r.cfg a4 # encoding: [0x5f,0x03,0xd0,0x80] +esp.movx.r.fft.bit.width a2 +# CHECK: esp.movx.r.fft.bit.width a2 # encoding: [0x5f,0x02,0xd0,0x84] +esp.movx.r.perf a4, a1 +# CHECK: esp.movx.r.perf a4, a1 # encoding: [0x5f,0x83,0xd1,0x8c] +esp.movx.r.sar a5 +# CHECK: esp.movx.r.sar a5 # encoding: [0xdf,0x03,0xb0,0x80] +esp.movx.r.sar.bytes a4 +# CHECK: esp.movx.r.sar.bytes a4 # encoding: [0x5f,0x03,0xb0,0x88] +esp.movx.r.xacc.h a5 +# CHECK: esp.movx.r.xacc.h a5 # encoding: [0xdf,0x03,0xb0,0x8c] +esp.movx.r.xacc.l a3 +# CHECK: esp.movx.r.xacc.l a3 # encoding: [0xdf,0x02,0xb0,0x84] +esp.movx.w.cfg a1 +# CHECK: esp.movx.w.cfg a1 # encoding: [0x5f,0x80,0xd1,0x90] +esp.movx.w.fft.bit.width a1 +# CHECK: esp.movx.w.fft.bit.width a1 # encoding: [0x5f,0x80,0xd1,0x94] +esp.movx.w.perf a1 +# CHECK: esp.movx.w.perf a1 # encoding: [0x5f,0x80,0xd1,0x9c] +esp.movx.w.sar a2 +# CHECK: esp.movx.w.sar a2 # encoding: [0x5f,0x00,0xb2,0x90] +esp.movx.w.sar.bytes a1 +# CHECK: esp.movx.w.sar.bytes a1 # encoding: [0x5f,0x80,0xb1,0x98] +esp.movx.w.xacc.h a5 +# CHECK: esp.movx.w.xacc.h a5 # encoding: [0x5f,0x80,0xb3,0x9c] +esp.movx.w.xacc.l a2 +# CHECK: esp.movx.w.xacc.l a2 # encoding: [0x5f,0x00,0xb2,0x94] +esp.vext.s16 q3, q1, q5 +# CHECK: esp.vext.s16 q3, q1, q5 # encoding: [0xdb,0x59,0x18,0x19] +esp.vext.s8 q0, q0, q6 +# CHECK: esp.vext.s8 q0, q0, q6 # encoding: [0x5b,0x58,0x08,0x0a] +esp.vext.u16 q4, q2, q6 +# CHECK: esp.vext.u16 q4, q2, q6 # encoding: [0x5b,0x5a,0x28,0x12] +esp.vext.u8 q1, q2, q5 +# CHECK: esp.vext.u8 q1, q2, q5 # encoding: [0xdb,0x58,0x28,0x01] +esp.vunzip.16 q2, q3 +# CHECK: esp.vunzip.16 q2, q3 # encoding: [0x5f,0x00,0x86,0x4e] +esp.vunzip.32 q2, q1 +# CHECK: esp.vunzip.32 q2, q1 # encoding: [0x5f,0x80,0x84,0x46] +esp.vunzip.8 q3, q5 +# CHECK: esp.vunzip.8 q3, q5 # encoding: [0x5f,0x00,0x84,0x76] +esp.vunzipt.16 q5, q4, q5 +# CHECK: esp.vunzipt.16 q5, q4, q5 # encoding: [0x5b,0x4c,0xc8,0xb1] +esp.vunzipt.8 q3, q6, q4 +# CHECK: esp.vunzipt.8 q3, q6, q4 # encoding: [0x5b,0x4c,0x88,0x78] +esp.vzip.16 q1, q0 +# CHECK: esp.vzip.16 q1, q0 # encoding: [0x5f,0x00,0x82,0x22] +esp.vzip.32 q1, q1 +# CHECK: esp.vzip.32 q1, q1 # encoding: [0x5f,0x80,0x80,0x26] +esp.vzip.8 q2, q2 +# CHECK: esp.vzip.8 q2, q2 # encoding: [0x5f,0x00,0x80,0x4a] +esp.vzipt.16 q1, q2, q1 +# CHECK: esp.vzipt.16 q1, q2, q1 # encoding: [0x5b,0x4c,0x40,0x29] +esp.vzipt.8 q3, q3, q5 +# CHECK: esp.vzipt.8 q3, q3, q5 # encoding: [0x5b,0x4c,0x08,0x6d] +esp.zero.q q0 +# CHECK: esp.zero.q q0 # encoding: [0x5b,0x00,0x40,0x00] +esp.zero.qacc +# CHECK: esp.zero.qacc # encoding: [0x5b,0x02,0x00,0x00] +esp.zero.xacc +# CHECK: esp.zero.xacc # encoding: [0x5b,0x00,0x00,0x00] +esp.fft.ams.s16.ld.incp q5, a4, q6, q2, q3, q3, q0, 1 +# CHECK: esp.fft.ams.s16.ld.incp q5, a4, q6, q2, q3, q3, q0, 1 # encoding: [0x7b,0x17,0xa3,0x63] +esp.fft.ams.s16.ld.incp.uaup q1, a5, q3, q5, q6, q2, q0, 0 +# CHECK: esp.fft.ams.s16.ld.incp.uaup q1, a5, q3, q5, q6, q2, q0, 0 # encoding: [0xfb,0xa5,0x53,0xc2] +esp.fft.ams.s16.ld.r32.decp q4, a5, q3, q6, q5, q6, q6, 1 +# CHECK: esp.fft.ams.s16.ld.r32.decp q4, a5, q3, q6, q5, q6, q6, 1 # encoding: [0xfb,0xf1,0xeb,0xba] +esp.fft.ams.s16.st.incp q5, q3, a3, a1, q6, q2, q3, 0 +# CHECK: esp.fft.ams.s16.st.incp q5, q3, a3, a1, q6, q2, q3, 0 # encoding: [0xbf,0xb5,0x51,0xce] +esp.fft.bitrev q3, a5 +# CHECK: esp.fft.bitrev q3, a5 # encoding: [0x5b,0x82,0x33,0x10] +esp.fft.cmul.s16.ld.xp q3, a3, a4, q1, q3, q2, 4 +# CHECK: esp.fft.cmul.s16.ld.xp q3, a3, a4, q1, q3, q2, 4 # encoding: [0xbf,0x8c,0x62,0x4e] +esp.fft.cmul.s16.st.xp q1, q2, q3, a4, a0, 7, 1, 1 +# CHECK: esp.fft.cmul.s16.st.xp q1, q2, q3, a4, a0, 7, 1, 1 # encoding: [0xff,0x4f,0x23,0x45] +esp.fft.r2bf.s16 q3, q2, q5, q1, 1 +# CHECK: esp.fft.r2bf.s16 q3, q2, q5, q1, 1 # encoding: [0xdf,0x05,0xa5,0xa6] +esp.fft.r2bf.s16.st.incp q5, q4, q4, a5, 1 +# CHECK: esp.fft.r2bf.s16.st.incp q5, q4, q4, a5, 1 # encoding: [0xdf,0xe2,0x43,0x92] +esp.fft.vst.r32.decp q1, a4, 1 +# CHECK: esp.fft.vst.r32.decp q1, a4, 1 # encoding: [0x3b,0x24,0x03,0x80] +esp.ld.128.usar.ip q5, a4, -896 +# CHECK: esp.ld.128.usar.ip q5, a4, -896 # encoding: [0x3b,0x34,0x43,0xc8] +esp.ld.128.usar.xp q5, a2, a5 +# CHECK: esp.ld.128.usar.xp q5, a2, a5 # encoding: [0x5f,0x54,0x72,0x80] +esp.ld.xacc.ip a1, 616 +# CHECK: esp.ld.xacc.ip a1, 616 # encoding: [0x3b,0xd7,0x91,0x20] +esp.ldqa.s16.128.ip a3, -672 +# CHECK: esp.ldqa.s16.128.ip a3, -672 # encoding: [0xbb,0xcc,0xd2,0xe0] +esp.ldqa.s16.128.xp a4, a1 +# CHECK: esp.ldqa.s16.128.xp a4, a1 # encoding: [0x5b,0x53,0x33,0x13] +esp.ldqa.s8.128.ip a4, 1808 +# CHECK: esp.ldqa.s8.128.ip a4, 1808 # encoding: [0xbb,0x42,0x73,0x60] +esp.ldqa.s8.128.xp a0, a4 +# CHECK: esp.ldqa.s8.128.xp a0, a4 # encoding: [0x5b,0x51,0x61,0x13] +esp.ldqa.u16.128.ip a3, -496 +# CHECK: esp.ldqa.u16.128.ip a3, -496 # encoding: [0xbb,0xc2,0xe2,0xa0] +esp.ldqa.u16.128.xp a2, a4 +# CHECK: esp.ldqa.u16.128.xp a2, a4 # encoding: [0x5b,0x52,0x62,0x13] +esp.ldqa.u8.128.ip a2, 1200 +# CHECK: esp.ldqa.u8.128.ip a2, 1200 # encoding: [0xbb,0x56,0x42,0x20] +esp.ldqa.u8.128.xp a2, a5 +# CHECK: esp.ldqa.u8.128.xp a2, a5 # encoding: [0x5b,0x50,0x72,0x13] +esp.vldbc.16.ip q4, a3, 408 +# CHECK: esp.vldbc.16.ip q4, a3, 408 # encoding: [0x3b,0xb0,0x32,0xb6] +esp.vldbc.16.xp q5, a2, a1 +# CHECK: esp.vldbc.16.xp q5, a2, a1 # encoding: [0x5f,0x54,0x32,0x96] +esp.vldbc.32.ip q6, a2, -176 +# CHECK: esp.vldbc.32.ip q6, a2, -176 # encoding: [0x3b,0x38,0xa2,0xce] +esp.vldbc.32.xp q0, a1, a2 +# CHECK: esp.vldbc.32.xp q0, a1, a2 # encoding: [0x5f,0xc0,0x41,0x8e] +esp.vldbc.8.ip q6, a0, 200 +# CHECK: esp.vldbc.8.ip q6, a0, 200 # encoding: [0x3b,0x38,0x91,0x16] +esp.vldbc.8.xp q5, a4, a2 +# CHECK: esp.vldbc.8.xp q5, a4, a2 # encoding: [0x5f,0x54,0x43,0x86] +esp.vldext.s16.ip q4, q1, a3, -112 +# CHECK: esp.vldext.s16.ip q4, q1, a3, -112 # encoding: [0xbb,0xd0,0x92,0xc8] +esp.vldext.s16.xp q2, q4, a1, a0 +# CHECK: esp.vldext.s16.xp q2, q4, a1, a0 # encoding: [0x5f,0xea,0x21,0xf0] +esp.vldext.s8.ip q3, q2, a4, 0 +# CHECK: esp.vldext.s8.ip q3, q2, a4, 0 # encoding: [0x3b,0x4d,0x03,0x48] +esp.vldext.s8.xp q3, q6, a4, a1 +# CHECK: esp.vldext.s8.xp q3, q6, a4, a1 # encoding: [0x5f,0x6f,0x33,0x70] +esp.vldext.u16.ip q2, q1, a1, 48 +# CHECK: esp.vldext.u16.ip q2, q1, a1, 48 # encoding: [0xbb,0xc8,0x31,0x88] +esp.vldext.u16.xp q1, q2, a3, a1 +# CHECK: esp.vldext.u16.xp q1, q2, a3, a1 # encoding: [0x5f,0xe5,0x32,0xb0] +esp.vldext.u8.ip q5, q6, a0, -48 +# CHECK: esp.vldext.u8.ip q5, q6, a0, -48 # encoding: [0x3b,0x57,0xd1,0x08] +esp.vldext.u8.xp q0, q6, a3, a1 +# CHECK: esp.vldext.u8.xp q0, q6, a3, a1 # encoding: [0x5f,0xe3,0x32,0x30] +esp.vldhbc.16.incp q4, q2, a2 +# CHECK: esp.vldhbc.16.incp q4, q2, a2 # encoding: [0x3b,0x51,0x02,0x28] +esp.ld.qacc.h.h.128.ip a0, 816 +# CHECK: esp.ld.qacc.h.h.128.ip a0, 816 # encoding: [0x3b,0x46,0x31,0x40] +esp.ld.qacc.h.l.128.ip a2, -496 +# CHECK: esp.ld.qacc.h.l.128.ip a2, -496 # encoding: [0x3b,0x42,0xe2,0x60] +esp.ld.qacc.l.h.128.ip a1, -432 +# CHECK: esp.ld.qacc.l.h.128.ip a1, -432 # encoding: [0x3b,0xca,0xe1,0x00] +esp.ld.qacc.l.l.128.ip a0, 1840 +# CHECK: esp.ld.qacc.l.l.128.ip a0, 1840 # encoding: [0x3b,0x46,0x71,0x20] +esp.ld.ua.state.ip a4, -1392 +# CHECK: esp.ld.ua.state.ip a4, -1392 # encoding: [0x3b,0x45,0x53,0x60] +esp.ldxq.32 q5, q4, a4, 3, 6 +# CHECK: esp.ldxq.32 q5, q4, a4, 3, 6 # encoding: [0x5f,0x34,0x8b,0x78] +esp.st.qacc.h.h.128.ip a4, 656 +# CHECK: esp.st.qacc.h.h.128.ip a4, 656 # encoding: [0x3b,0x52,0x23,0xc0] +esp.st.qacc.h.l.128.ip a3, -1072 +# CHECK: esp.st.qacc.h.l.128.ip a3, -1072 # encoding: [0x3b,0xda,0xb2,0xe0] +esp.st.qacc.l.h.128.ip a2, 784 +# CHECK: esp.st.qacc.l.h.128.ip a2, 784 # encoding: [0x3b,0x42,0x32,0x80] +esp.st.qacc.l.l.128.ip a3, -736 +# CHECK: esp.st.qacc.l.l.128.ip a3, -736 # encoding: [0x3b,0xc4,0xd2,0xa0] +esp.st.ua.state.ip a5, 1376 +# CHECK: esp.st.ua.state.ip a5, 1376 # encoding: [0x3b,0xd9,0xa3,0xa0] +esp.stxq.32 q6, q5, a5, 3, 4 +# CHECK: esp.stxq.32 q6, q5, a5, 3, 4 # encoding: [0x5f,0xb8,0x8b,0xf1] +esp.vld.128.ip q1, a3, 560 +# CHECK: esp.vld.128.ip q1, a3, 560 # encoding: [0x3b,0xa6,0x12,0x12] +esp.vld.128.xp q1, a5, a3 +# CHECK: esp.vld.128.xp q1, a5, a3 # encoding: [0x5f,0xc4,0x53,0x82] +esp.vld.h.64.ip q4, a5, -568 +# CHECK: esp.vld.h.64.ip q4, a5, -568 # encoding: [0x3b,0xb2,0xc3,0x6c] +esp.vld.h.64.xp q1, a1, a2 +# CHECK: esp.vld.h.64.xp q1, a1, a2 # encoding: [0x5f,0xc4,0x41,0x8c] +esp.vld.l.64.ip q2, a3, -696 +# CHECK: esp.vld.l.64.ip q2, a3, -696 # encoding: [0x3b,0xaa,0x42,0x2c] +esp.vld.l.64.xp q5, a4, a4 +# CHECK: esp.vld.l.64.xp q5, a4, a4 # encoding: [0x5f,0x54,0x63,0x84] +esp.vst.128.ip q3, a4, 1088 +# CHECK: esp.vst.128.ip q3, a4, 1088 # encoding: [0x3b,0x2c,0x23,0xa2] +esp.vst.128.xp q1, a4, a0 +# CHECK: esp.vst.128.xp q1, a4, a0 # encoding: [0x5f,0x44,0x23,0x92] +esp.vst.h.64.ip q5, a4, -136 +# CHECK: esp.vst.h.64.ip q5, a4, -136 # encoding: [0x3b,0x36,0x73,0xfc] +esp.vst.h.64.xp q1, a0, a2 +# CHECK: esp.vst.h.64.xp q1, a0, a2 # encoding: [0x5f,0x44,0x41,0x9c] +esp.vst.l.64.ip q5, a0, -440 +# CHECK: esp.vst.l.64.ip q5, a0, -440 # encoding: [0x3b,0x36,0x41,0xb4] +esp.vst.l.64.xp q2, a5, a3 +# CHECK: esp.vst.l.64.xp q2, a5, a3 # encoding: [0x5f,0xc8,0x53,0x94] +esp.slci.2q q2, q6, 10 +# CHECK: esp.slci.2q q2, q6, 10 # encoding: [0x5b,0x49,0x48,0x0a] +esp.slcxxp.2q q2, q1, a0, a3 +# CHECK: esp.slcxxp.2q q2, q1, a0, a3 # encoding: [0x5f,0x40,0x51,0x09] +esp.src.q q5, q6, q2 +# CHECK: esp.src.q q5, q6, q2 # encoding: [0xdb,0x02,0x2c,0x8a] +esp.src.q.ld.ip q3, a4, 48, q6, q6 +# CHECK: esp.src.q.ld.ip q3, a4, 48, q6, q6 # encoding: [0x3b,0x2f,0x1b,0x1a] +esp.src.q.ld.xp q6, a5, a0, q6, q6 +# CHECK: esp.src.q.ld.xp q6, a5, a0, q6, q6 # encoding: [0x3b,0x98,0x2b,0x1a] +esp.src.q.qup q1, q5, q0 +# CHECK: esp.src.q.qup q1, q5, q0 # encoding: [0xdb,0x10,0x2c,0x81] +esp.srci.2q q5, q5, 11 +# CHECK: esp.srci.2q q5, q5, 11 # encoding: [0xdb,0x49,0xc8,0x15] +esp.srcmb.s16.q.qacc q6, q2, 0 +# CHECK: esp.srcmb.s16.q.qacc q6, q2, 0 # encoding: [0x5b,0x18,0x64,0x9a] +esp.srcmb.s16.qacc q2, a2, 0 +# CHECK: esp.srcmb.s16.qacc q2, a2, 0 # encoding: [0x3b,0x28,0x02,0xd8] +esp.srcmb.s8.q.qacc q5, q3, 0 +# CHECK: esp.srcmb.s8.q.qacc q5, q3, 0 # encoding: [0x5b,0x14,0x64,0x8b] +esp.srcmb.s8.qacc q1, a3, 1 +# CHECK: esp.srcmb.s8.qacc q1, a3, 1 # encoding: [0x3b,0xa4,0x02,0x78] +esp.srcmb.u16.q.qacc q3, q4, 0 +# CHECK: esp.srcmb.u16.q.qacc q3, q4, 0 # encoding: [0x5b,0x0c,0x6c,0x90] +esp.srcmb.u16.qacc q1, a0, 1 +# CHECK: esp.srcmb.u16.qacc q1, a0, 1 # encoding: [0x3b,0x24,0x01,0xb8] +esp.srcmb.u8.q.qacc q0, q5, 1 +# CHECK: esp.srcmb.u8.q.qacc q0, q5, 1 # encoding: [0x5b,0x00,0x6c,0x85] +esp.srcmb.u8.qacc q1, a3, 1 +# CHECK: esp.srcmb.u8.qacc q1, a3, 1 # encoding: [0x3b,0xa4,0x02,0x38] +esp.srcq.128.st.incp q1, q2, a0 +# CHECK: esp.srcq.128.st.incp q1, q2, a0 # encoding: [0x5b,0x40,0x01,0x09] +esp.srcxxp.2q q3, q3, a4, a4 +# CHECK: esp.srcxxp.2q q3, q3, a4, a4 # encoding: [0x5f,0x44,0x63,0x0f] +esp.srs.s.xacc a0, a0 +# CHECK: esp.srs.s.xacc a0, a0 # encoding: [0x5f,0x01,0xf1,0x94] +esp.srs.u.xacc a0, a3 +# CHECK: esp.srs.u.xacc a0, a3 # encoding: [0x5f,0x81,0xf2,0x84] +esp.vsl.32 q6, q4 +# CHECK: esp.vsl.32 q6, q4 # encoding: [0x5b,0x18,0x04,0x90] +esp.vsld.16 q6, q5, q1 +# CHECK: esp.vsld.16 q6, q5, q1 # encoding: [0x5f,0x18,0x20,0x15] +esp.vsld.32 q2, q4, q3 +# CHECK: esp.vsld.32 q2, q4, q3 # encoding: [0x5f,0x08,0x10,0x13] +esp.vsld.8 q1, q1, q4 +# CHECK: esp.vsld.8 q1, q1, q4 # encoding: [0x5f,0x04,0x08,0x04] +esp.vsr.s32 q1, q3 +# CHECK: esp.vsr.s32 q1, q3 # encoding: [0x5b,0x07,0x04,0x8c] +esp.vsr.u32 q5, q2 +# CHECK: esp.vsr.u32 q5, q2 # encoding: [0x5b,0x15,0x04,0x88] +esp.vsrd.16 q1, q5, q5 +# CHECK: esp.vsrd.16 q1, q5, q5 # encoding: [0x5f,0x04,0x68,0x15] +esp.vsrd.32 q1, q6, q3 +# CHECK: esp.vsrd.32 q1, q6, q3 # encoding: [0x5f,0x04,0x50,0x1b] +esp.vsrd.8 q0, q3, q1 +# CHECK: esp.vsrd.8 q0, q3, q1 # encoding: [0x5f,0x00,0x40,0x0d] +esp.st.s.xacc.ip a1, 304 +# CHECK: esp.st.s.xacc.ip a1, 304 # encoding: [0xbb,0xd9,0x41,0xa0] +esp.st.u.xacc.ip a2, 976 +# CHECK: esp.st.u.xacc.ip a2, 976 # encoding: [0xbb,0x49,0xf2,0x20] diff --git a/llvm/test/MC/RISCV/rv64xtheadmemidx-invalid.s b/llvm/test/MC/RISCV/rv64xtheadmemidx-invalid.s index fe6d0de0a4b00..e45c43a50048a 100644 --- a/llvm/test/MC/RISCV/rv64xtheadmemidx-invalid.s +++ b/llvm/test/MC/RISCV/rv64xtheadmemidx-invalid.s @@ -1,7 +1,7 @@ # RUN: not llvm-mc -triple riscv32 -mattr=+xtheadmemidx < %s 2>&1 | FileCheck %s # RUN: not llvm-mc -triple riscv64 -mattr=+xtheadmemidx < %s 2>&1 | FileCheck %s -th.ldia 0(a0), (a1), 0, 0 # CHECK: :[[@LINE]]:23: error: invalid operand for instruction +th.ldia 0(a0), (a1), 0, 0 # CHECK: :[[@LINE]]:26: error: invalid operand for instruction th.ldib a0, 2(a1), 15, 1 # CHECK: :[[@LINE]]:14: error: invalid operand for instruction th.lwia a0, (a1), 30, 2 # CHECK: :[[@LINE]]:20: error: immediate must be an integer in the range [-16, 15] th.lwib a0, (a1), -16, 43 # CHECK: :[[@LINE]]:25: error: immediate must be an integer in the range [0, 3] diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp index bede4e64696c5..7b9446daec511 100644 --- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp +++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp @@ -1002,6 +1002,7 @@ R"(All available -march extensions for RISC-V xcvmac 1.0 xcvmem 1.0 xcvsimd 1.0 + xesppie 1.0 xsfcease 1.0 xsfvcp 1.0 xsfvfnrclipxfqf 1.0 diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp index 5035ef52707f4..cbfdf08f0693b 100644 --- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp +++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp @@ -1997,8 +1997,7 @@ emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName, << "convertToMCInst(unsigned Kind, MCInst &Inst, " << "unsigned Opcode,\n" << " const OperandVector &Operands,\n" - << " const SmallBitVector &OptionalOperandsMask,\n" - << " ArrayRef DefaultsOffset) {\n"; + << " const SmallBitVector &OptionalOperandsMask) {\n"; } else { CvtOS << "void " << Target.getName() << ClassName << "::\n" << "convertToMCInst(unsigned Kind, MCInst &Inst, " @@ -2007,6 +2006,22 @@ emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName, } CvtOS << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"; CvtOS << " const uint8_t *Converter = ConversionTable[Kind];\n"; + if (HasOptionalOperands) { + size_t MaxNumOperands = 0; + for (const auto &MI : Infos) { + MaxNumOperands = std::max(MaxNumOperands, MI->AsmOperands.size()); + } + CvtOS << " unsigned DefaultsOffset[" << (MaxNumOperands + 1) + << "] = { 0 };\n"; + CvtOS << " assert(OptionalOperandsMask.size() == " << (MaxNumOperands) + << ");\n"; + CvtOS << " for (unsigned i = 0, NumDefaults = 0; i < " << (MaxNumOperands) + << "; ++i) {\n"; + CvtOS << " DefaultsOffset[i + 1] = NumDefaults;\n"; + CvtOS << " NumDefaults += (OptionalOperandsMask[i] ? 1 : 0);\n"; + CvtOS << " }\n"; + } + CvtOS << " Inst.setOpcode(Opcode);\n"; CvtOS << " for (const uint8_t *p = Converter; *p; p += 2) {\n"; if (HasOptionalOperands) { @@ -3041,17 +3056,15 @@ emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target, } static void emitAsmTiedOperandConstraints(CodeGenTarget &Target, - AsmMatcherInfo &Info, raw_ostream &OS, - bool HasOptionalOperands) { + AsmMatcherInfo &Info, + raw_ostream &OS) { std::string AsmParserName = std::string(Info.AsmParser->getValueAsString("AsmParserClassName")); OS << "static bool "; OS << "checkAsmTiedOperandConstraints(const " << Target.getName() << AsmParserName << "&AsmParser,\n"; - OS << " unsigned Kind, const OperandVector " - "&Operands,\n"; - if (HasOptionalOperands) - OS << " ArrayRef DefaultsOffset,\n"; + OS << " unsigned Kind,\n"; + OS << " const OperandVector &Operands,\n"; OS << " uint64_t &ErrorInfo) {\n"; OS << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"; OS << " const uint8_t *Converter = ConversionTable[Kind];\n"; @@ -3064,13 +3077,6 @@ static void emitAsmTiedOperandConstraints(CodeGenTarget &Target, OS << " \"Tied operand not found\");\n"; OS << " unsigned OpndNum1 = TiedAsmOperandTable[OpIdx][1];\n"; OS << " unsigned OpndNum2 = TiedAsmOperandTable[OpIdx][2];\n"; - if (HasOptionalOperands) { - // When optional operands are involved, formal and actual operand indices - // may differ. Map the former to the latter by subtracting the number of - // absent optional operands. - OS << " OpndNum1 = OpndNum1 - DefaultsOffset[OpndNum1];\n"; - OS << " OpndNum2 = OpndNum2 - DefaultsOffset[OpndNum2];\n"; - } OS << " if (OpndNum1 != OpndNum2) {\n"; OS << " auto &SrcOp1 = Operands[OpndNum1];\n"; OS << " auto &SrcOp2 = Operands[OpndNum2];\n"; @@ -3313,8 +3319,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { << "unsigned Opcode,\n" << " const OperandVector &Operands,\n" << " const SmallBitVector " - "&OptionalOperandsMask,\n" - << " ArrayRef DefaultsOffset);\n"; + "&OptionalOperandsMask);\n"; } else { OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, " << "unsigned Opcode,\n" @@ -3428,7 +3433,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { Info.SubtargetFeatures, OS); if (!ReportMultipleNearMisses) - emitAsmTiedOperandConstraints(Target, Info, OS, HasOptionalOperands); + emitAsmTiedOperandConstraints(Target, Info, OS); StringToOffsetTable StringTable; @@ -3952,39 +3957,11 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { OS << " }\n\n"; } - // When converting parsed operands to MCInst we need to know whether optional - // operands were parsed or not so that we can choose the correct converter - // function. We also need to know this when checking tied operand constraints. - // DefaultsOffset is an array of deltas between the formal (MCInst) and the - // actual (parsed operand array) operand indices. When all optional operands - // are present, all elements of the array are zeros. If some of the optional - // operands are absent, the array might look like '0, 0, 1, 1, 1, 2, 2, 3', - // where each increment in value reflects the absence of an optional operand. - if (HasOptionalOperands) { - OS << " unsigned DefaultsOffset[" << (MaxNumOperands + 1) - << "] = { 0 };\n"; - OS << " assert(OptionalOperandsMask.size() == " << (MaxNumOperands) - << ");\n"; - OS << " for (unsigned i = 0, NumDefaults = 0; i < " << (MaxNumOperands) - << "; ++i) {\n"; - OS << " DefaultsOffset[i + 1] = NumDefaults;\n"; - OS << " NumDefaults += (OptionalOperandsMask[i] ? 1 : 0);\n"; - OS << " }\n\n"; - } - OS << " if (matchingInlineAsm) {\n"; OS << " convertToMapAndConstraints(it->ConvertFn, Operands);\n"; if (!ReportMultipleNearMisses) { - if (HasOptionalOperands) { - OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, " - "Operands,\n"; - OS << " DefaultsOffset, " - "ErrorInfo))\n"; - } else { - OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, " - "Operands,\n"; - OS << " ErrorInfo))\n"; - } + OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, " + "Operands, ErrorInfo))\n"; OS << " return Match_InvalidTiedOperand;\n"; OS << "\n"; } @@ -3994,7 +3971,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { << " // operands into the appropriate MCInst.\n"; if (HasOptionalOperands) { OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands,\n" - << " OptionalOperandsMask, DefaultsOffset);\n"; + << " OptionalOperandsMask);\n"; } else { OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n"; } @@ -4074,16 +4051,8 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { } if (!ReportMultipleNearMisses) { - if (HasOptionalOperands) { - OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, " - "Operands,\n"; - OS << " DefaultsOffset, " - "ErrorInfo))\n"; - } else { - OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, " - "Operands,\n"; - OS << " ErrorInfo))\n"; - } + OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, " + "Operands, ErrorInfo))\n"; OS << " return Match_InvalidTiedOperand;\n"; OS << "\n"; } From 031fa7b91900cb2e49c7a2c8e31093305d414833 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Sun, 15 Dec 2024 02:55:53 +0300 Subject: [PATCH 247/289] [Xtensa] Fix lowering BRCOND. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 3 ++ llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 32 +++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 3abef2fc97710..3241b665ec87e 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -371,6 +371,9 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, setTargetDAGCombine(ISD::BR_CC); } + // make BRCOND legal, its actually only legal for a subset of conds + setOperationAction(ISD::BRCOND, MVT::Other, Legal); + // Needed so that we don't try to implement f128 constant loads using // a load-and-extend of a f80 constant (in cases where the constant // would fit in an f80). diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index c79d9fab1a3a8..98cd7fd291e5b 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -514,6 +514,38 @@ def : InstAlias<"bbsi.l\t$s, $imm, $target", (BBSI AR:$s, uimm5:$imm, brtarget:$ def : InstAlias<"_bbsi.l\t$s, $imm, $target", (BBSI AR:$s, uimm5:$imm, brtarget:$target)>; +def : Pat<(brcc SETGT, AR:$s, AR:$t, bb:$target), + (BLT AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcc SETUGT, AR:$s, AR:$t, bb:$target), + (BLTU AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcc SETLE, AR:$s, AR:$t, bb:$target), + (BGE AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcc SETULE, AR:$s, AR:$t, bb:$target), + (BGEU AR:$t, AR:$s, bb:$target)>; + +def : Pat<(brcond (i32 (seteq AR:$s, AR:$t)), bb:$target), + (BEQ AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setne AR:$s, AR:$t)), bb:$target), + (BNE AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setge AR:$s, AR:$t)), bb:$target), + (BGE AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setle AR:$s, AR:$t)), bb:$target), + (BLT AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setuge AR:$s, AR:$t)), bb:$target), + (BGEU AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setult AR:$s, AR:$t)), bb:$target), + (BLTU AR:$s, AR:$t, bb:$target)>; +def : Pat<(brcond (i32 (setgt AR:$s, AR:$t)), bb:$target), + (BLT AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcond (i32 (setugt AR:$s, AR:$t)), bb:$target), + (BLTU AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcond (i32 (setle AR:$s, AR:$t)), bb:$target), + (BGE AR:$t, AR:$s, bb:$target)>; +def : Pat<(brcond (i32 (setule AR:$s, AR:$t)), bb:$target), + (BGEU AR:$t, AR:$s, bb:$target)>; + +def : Pat<(brcond AR:$s, bb:$target), (BNEZ AR:$s, bb:$target)>; + //===----------------------------------------------------------------------===// // Call and jump instructions //===----------------------------------------------------------------------===// From 296f18662e2acae87e1ee97f20052d24c6831e10 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Sun, 15 Dec 2024 02:58:36 +0300 Subject: [PATCH 248/289] [Xtensa] Fix lowering SELECT_CC and SETCC. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 3241b665ec87e..47a0df90b5ea9 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -1546,15 +1546,13 @@ SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op, SDValue FalseValue = Op.getOperand(3); ISD::CondCode CC = cast(Op->getOperand(4))->get(); - unsigned BrOpcode = getBranchOpcode(CC); - SDValue TargetCC = DAG.getConstant(BrOpcode, DL, MVT::i32); - SDValue TargetCC_FP = DAG.getConstant(CC, DL, MVT::i32); + SDValue TargetCC = DAG.getConstant( + (LHS.getValueType() == MVT::f32) ? CC : getBranchOpcode(CC), DL, + MVT::i32); if (LHS.getValueType() == MVT::f32 || TrueValue.getValueType() == MVT::f32) return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueValue.getValueType(), - LHS, RHS, TrueValue, FalseValue, - (LHS.getValueType() == MVT::f32) ? TargetCC_FP - : TargetCC); + LHS, RHS, TrueValue, FalseValue, TargetCC); else if (TrueValue.getValueType().isVector()) return Op; @@ -1568,9 +1566,9 @@ SDValue XtensaTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { SDValue RHS = Op.getOperand(1); ISD::CondCode CC = cast(Op.getOperand(2))->get(); - unsigned BrOpcode = getBranchOpcode(CC); - SDValue TargetCC = DAG.getConstant(BrOpcode, DL, MVT::i32); - SDValue TargetCC_FP = DAG.getConstant(CC, DL, MVT::i32); + SDValue TargetCC = DAG.getConstant( + (LHS.getValueType() == MVT::f32) ? CC : getBranchOpcode(CC), DL, + MVT::i32); // Check Op SDNode users // If there are only CALL/CALLW nodes, don't expand Global Address @@ -1595,9 +1593,8 @@ SDValue XtensaTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType()); if (LHS.getValueType() == MVT::f32 || TrueV.getValueType() == MVT::f32) - return DAG.getNode( - XtensaISD::SELECT_CC_FP, DL, TrueV.getValueType(), LHS, RHS, TrueV, - FalseV, (LHS.getValueType() == MVT::f32) ? TargetCC_FP : TargetCC); + return DAG.getNode(XtensaISD::SELECT_CC_FP, DL, TrueV.getValueType(), LHS, + RHS, TrueV, FalseV, TargetCC); else if (TrueV.getValueType().isVector()) return SDValue(); else From 1ba66c25a1e3fd5d69fa49e1b987926081e62bf8 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Sun, 15 Dec 2024 03:03:31 +0300 Subject: [PATCH 249/289] [Xtensa] Fix lowering ConstantPool. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 26 +++++++++++++++---- .../Target/Xtensa/XtensaMachineFunctionInfo.h | 3 +++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 47a0df90b5ea9..91c6ff8e75ab1 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -1776,15 +1776,31 @@ SDValue XtensaTargetLowering::getAddrPCRel(SDValue Op, SDValue XtensaTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { - EVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast(Op); + EVT PtrVT = Op.getValueType(); + auto C = const_cast(CP->getConstVal()); + auto T = const_cast(CP->getType()); SDValue Result; - if (!CP->isMachineConstantPoolEntry()) { - Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(), - CP->getOffset()); + // Do not use constant pool for aggregate or vector constant types, + // in such cases create global variable + if (T->isAggregateType() || T->isVectorTy()) { + auto AFI = DAG.getMachineFunction().getInfo(); + auto M = const_cast( + DAG.getMachineFunction().getFunction().getParent()); + auto GV = new GlobalVariable( + *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C, + Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" + + Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" + + Twine(AFI->createLabelUId())); + Result = DAG.getTargetConstantPool(GV, PtrVT, Align(4)); } else { - report_fatal_error("This constantpool type is not supported yet"); + if (CP->isMachineConstantPoolEntry()) + Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, + CP->getAlign()); + else + Result = + DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), CP->getOffset()); } return getAddrPCRel(Result, DAG); diff --git a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h index 10d11fcb21bf1..86ee81128c34c 100644 --- a/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h +++ b/llvm/lib/Target/Xtensa/XtensaMachineFunctionInfo.h @@ -26,6 +26,7 @@ class XtensaFunctionInfo : public MachineFunctionInfo { int VarArgsStackOffset; unsigned VarArgsFrameIndex; bool SaveFrameRegister = false; + unsigned LabelUId = 0; public: explicit XtensaFunctionInfo(const Function &F, const TargetSubtargetInfo *STI) @@ -43,6 +44,8 @@ class XtensaFunctionInfo : public MachineFunctionInfo { bool isSaveFrameRegister() const { return SaveFrameRegister; } void setSaveFrameRegister() { SaveFrameRegister = true; } + + unsigned createLabelUId() { return LabelUId++; } }; } // namespace llvm From caf476f6f2c7e935ebaa86ceabbdb86c28fed011 Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Sun, 15 Dec 2024 03:23:52 +0300 Subject: [PATCH 250/289] [Xtensa] Fix lowering immediate. --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index 91c6ff8e75ab1..becb746f8b94f 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -1631,11 +1631,6 @@ SDValue XtensaTargetLowering::LowerImmediate(SDValue Op, // Check if use node maybe lowered to the MOVI instruction if (Value > -2048 && Value <= 2047) return Op; - // Check if use node maybe lowered to the ADDMI instruction - SDNode &OpNode = *Op.getNode(); - if ((OpNode.hasOneUse() && OpNode.use_begin()->getOpcode() == ISD::ADD) && - isShiftedInt<16, 8>(Value)) - return Op; Type *Ty = Type::getInt32Ty(*DAG.getContext()); Constant *CV = ConstantInt::get(Ty, Value); SDValue CP = DAG.getConstantPool(CV, MVT::i32); From aa12798cf1b2247dc830e614f07991a934d580fe Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Wed, 18 Dec 2024 00:38:05 +0300 Subject: [PATCH 251/289] [Xtensa] Fix emitting literals section. --- llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp | 4 ++++ llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp index 9b3906a5cb7ce..b096009a8956e 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.cpp @@ -102,6 +102,8 @@ void XtensaTargetELFStreamer::emitLiteral(MCSymbol *LblSym, const MCExpr *Value, OutStreamer.switchSection(ConstSection); } + OutStreamer.emitCodeAlignment(Align(4), + OutStreamer.getContext().getSubtargetInfo()); OutStreamer.emitLabel(LblSym, L); OutStreamer.emitValue(Value, 4, L); @@ -128,6 +130,8 @@ void XtensaTargetELFStreamer::startLiteralSection(MCSection *BaseSection) { SectionName, ELF::SHT_PROGBITS, ELF::SHF_EXECINSTR | ELF::SHF_ALLOC); ConstSection->setAlignment(Align(4)); + MCStreamer &OutStreamer = getStreamer(); + OutStreamer.switchSection(ConstSection); } MCELFStreamer &XtensaTargetELFStreamer::getStreamer() { diff --git a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp index 9c4542001bd93..f1638d39bf258 100644 --- a/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp +++ b/llvm/lib/Target/Xtensa/XtensaAsmPrinter.cpp @@ -182,7 +182,7 @@ void XtensaAsmPrinter::emitConstantPool() { auto *TS = static_cast(OutStreamer->getTargetStreamer()); MCSection *CS = getObjFileLowering().SectionForGlobal(&F, TM); - TS->setTextSectionLiterals(); + // TS->setTextSectionLiterals(); TS->startLiteralSection(CS); int CPIdx = 0; From e5e3894968225449ae4bffbe288110a84e7da12e Mon Sep 17 00:00:00 2001 From: Andrei Safronov Date: Tue, 24 Dec 2024 09:52:48 +0300 Subject: [PATCH 252/289] [Xtensa][Test] Fix compiler-rt test config. --- compiler-rt/test/builtins/Unit/lit.cfg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler-rt/test/builtins/Unit/lit.cfg.py b/compiler-rt/test/builtins/Unit/lit.cfg.py index a020850fddf65..bfb4b9cacaecb 100644 --- a/compiler-rt/test/builtins/Unit/lit.cfg.py +++ b/compiler-rt/test/builtins/Unit/lit.cfg.py @@ -104,7 +104,7 @@ def get_libgcc_file_name(): if sys.platform in ["win32"] and execute_external: # Don't pass dosish path separator to msys bash.exe. base_lib = base_lib.replace("\\", "/") - if config.target_triple in ['xtensa-esp-elf', 'riscv32-esp-elf']: + if config.target_triple in ['xtensa-esp-elf', 'xtensa-esp-unknown-elf', 'riscv32-esp-elf', 'riscv32-esp-unknown-elf']: config.substitutions.append( ("%librt ", "-Wl,--start-group," + base_lib + ',-lc,-lm,--end-group ') ) else: config.substitutions.append( ("%librt ", base_lib + ' -lc -lm ') ) From a01719f5a41eeed2d1df0c9847410e3fb833e425 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Wed, 13 Nov 2024 21:00:02 +0300 Subject: [PATCH 253/289] esp/ci: Fix git dubious ownership error --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b28cf30282426..78e550c7503f2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -63,6 +63,7 @@ before_script: script: - *get_toolchain_build_scripts - LLVM_PROJECT_PATH=$PWD + - git config --global --add safe.directory ${LLVM_PROJECT_PATH} - BUILD_PATH=$PWD/${BUILD_DIR} - mkdir -p ${BUILD_PATH} - BUILD_HOST=$(gcc -dumpmachine) @@ -187,6 +188,8 @@ build_and_test: fi script: - BUILD_PATH=$PWD/${BUILD_DIR} + - LLVM_PROJECT_PATH=$PWD + - git config --global --add safe.directory ${LLVM_PROJECT_PATH} - mkdir -p ${BUILD_PATH} - cmake -G Ninja -S llvm From 54186c43a9f6cb81be304daff2fca2a7147e7f59 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 14 Nov 2024 11:22:48 +0300 Subject: [PATCH 254/289] esp/ci: Use build script repo LLVM 19 release branch --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 78e550c7503f2..53a52b9c47943 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -11,7 +11,7 @@ stages: image: ${CI_DOCKER_REGISTRY}/llvm-build:4 variables: - ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "master" + ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "llvm_release_19" ESP_GNU_TOOLCHAIN_VER: "13.2.0_20240305" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 CROSS_WIN_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-win:2 From 46050f371f703756933954f8d4b333b5e522aecc Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 26 Dec 2024 19:02:48 +0300 Subject: [PATCH 255/289] esp/ci: Update docker images for LLVM 19 release --- .gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 53a52b9c47943..fd1c95cca8671 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -8,12 +8,12 @@ stages: - public_deploy - update_idf_tools -image: ${CI_DOCKER_REGISTRY}/llvm-build:4 +image: ${CI_DOCKER_REGISTRY}/llvm-build:5 variables: ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "llvm_release_19" ESP_GNU_TOOLCHAIN_VER: "13.2.0_20240305" - CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:1 + CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:2 CROSS_WIN_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-win:2 DIST_DIR: "dist" BUILD_DIR: "build" From 12e6f5d29bd767c9f9ad5bc18b5db1e2e09f5930 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 22 Aug 2024 15:48:47 +0300 Subject: [PATCH 256/289] esp/ci: Add esp-dsp build test job --- .gitlab-ci.yml | 81 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 72 insertions(+), 9 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index fd1c95cca8671..79f6d6073d588 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -268,6 +268,17 @@ build_aarch64-apple-darwin: variables: CONF_HOST: "aarch64-apple-darwin21.1" +.unpack_distro: &unpack_distro | + pushd ${DIST_DIR} + ls -l + DISTRO_PACK_FILE=$(cat dist_name_${CONF_HOST}) + echo "DISTRO_PACK_FILE=${DISTRO_PACK_FILE}" + ${UNPACK_TOOL} ${DISTRO_PACK_FILE} + DISTRO_PACK_DIR=$(tar tJf ${DISTRO_PACK_FILE} | sed -e 's@/.*@@' | uniq) + ls -l $PWD/${DISTRO_PACK_DIR}/lib/clang-runtimes/ + echo "DISTRO_PACK_DIR=${DISTRO_PACK_DIR}" + rm -f ${DISTRO_PACK_FILE} + .pack_template: stage: pack tags: [ "amd64", "build" ] @@ -282,15 +293,7 @@ build_aarch64-apple-darwin: script: - *get_toolchain_build_scripts # update distro - - pushd ${DIST_DIR} - - ls -l - - DISTRO_PACK_FILE=$(cat dist_name_${CONF_HOST}) - - echo "DISTRO_PACK_FILE=${DISTRO_PACK_FILE}" - - ${UNPACK_TOOL} ${DISTRO_PACK_FILE} - - DISTRO_PACK_DIR=$(tar tJf ${DISTRO_PACK_FILE} | sed -e 's@/.*@@' | uniq) - - ls -l $PWD/${DISTRO_PACK_DIR}/lib/clang-runtimes/ - - echo "DISTRO_PACK_DIR=${DISTRO_PACK_DIR}" - - rm -f ${DISTRO_PACK_FILE} + - *unpack_distro - TARGET_LIBS_PACK_FILE=$(cat target_libs_arch_name) - rm -f target_libs_arch_name - echo "TARGET_LIBS_PACK_FILE=${TARGET_LIBS_PACK_FILE}" @@ -391,6 +394,66 @@ sign_aarch64-apple-darwin: needs: - pack_aarch64-apple-darwin +.prepare_test_app_build: &prepare_test_app_build | + if [ -z "${TEST_APP_IDF_CUSTOM_BRANCH:-}" ]; then + # Use the same idf branch name if exists + git ls-remote https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/esp-idf.git | grep "refs/heads/$CI_COMMIT_REF_NAME" + test $? -eq 0 && echo "Use IDF branch \"$CI_COMMIT_REF_NAME\"" && TEST_APP_IDF_CUSTOM_BRANCH=$CI_COMMIT_REF_NAME + fi + + # Use custom idf in case custom branch is present + if [ -n "${TEST_APP_IDF_CUSTOM_BRANCH:-}" ]; then + echo "TEST_APP_IDF_CUSTOM_BRANCH=$TEST_APP_IDF_CUSTOM_BRANCH" + #pushd $BUILD_TEST_APP_DIR + # Clone esp-idf + git clone --shallow-submodules --recursive --single-branch --branch $TEST_APP_IDF_CUSTOM_BRANCH -- https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/esp-idf.git esp-idf + export IDF_PATH=$PWD/esp-idf + # Activate pyenv + if [ $(command -v pyenv) ]; then + source /opt/pyenv/activate + pyenv global $(pyenv versions --bare) + fi + # cannot exec '. ${IDF_PATH}/export.sh' here because not all tools distros are presented + # in the image and `export.sh` fails w/o adding tools to $PATH + idf_exports=$(${IDF_PATH}/tools/idf_tools.py export) || true + eval "${idf_exports}" + #popd + fi + idf.py --version || true + pushd $IDF_PATH/components + git clone --shallow-submodules --recursive --single-branch --branch $TEST_APP_ESP_DSP_CUSTOM_BRANCH -- https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/idf/esp-dsp.git esp-dsp + pushd $PWD/esp-dsp/test_app + +test_esp_dsp: + image: espressif/idf:latest + tags: [ "amd64", "build" ] + allow_failure: true + artifacts: + paths: + - ${BUILD_DIR}/*.log + when: always + expire_in: 1 day + parallel: + matrix: + - CHIP: esp32p4 + needs: + - job: "pack_x86_64-linux-gnu" + variables: + TEST_APP_IDF_CUSTOM_BRANCH: "master" + TEST_APP_ESP_DSP_CUSTOM_BRANCH: "master" + CONF_HOST: "x86_64-linux-gnu" + UNPACK_TOOL: "tar xJf" + script: + - mkdir -p $PWD/${BUILD_DIR} + - export BUILD_LOG=$PWD/${BUILD_DIR}/build.log + - *unpack_distro + - export PATH=$PWD/${DISTRO_PACK_DIR}/bin:${PATH} + - which clang + - *prepare_test_app_build + - export IDF_TOOLCHAIN=clang + - idf.py set-target ${CHIP} 2>&1 | tee ${BUILD_LOG} + - idf.py build 2>&1 | tee -a ${BUILD_LOG} + upload_to_http: stage: private_deploy when: manual From 22b58192706380d4cdece2def5dd55bf8412bda3 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Wed, 11 Sep 2024 16:19:02 +0300 Subject: [PATCH 257/289] esp/ci: Use IDF from docker image for esp-dsp test build --- .gitlab-ci.yml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 79f6d6073d588..152f63ecc29bf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -395,16 +395,9 @@ sign_aarch64-apple-darwin: - pack_aarch64-apple-darwin .prepare_test_app_build: &prepare_test_app_build | - if [ -z "${TEST_APP_IDF_CUSTOM_BRANCH:-}" ]; then - # Use the same idf branch name if exists - git ls-remote https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/esp-idf.git | grep "refs/heads/$CI_COMMIT_REF_NAME" - test $? -eq 0 && echo "Use IDF branch \"$CI_COMMIT_REF_NAME\"" && TEST_APP_IDF_CUSTOM_BRANCH=$CI_COMMIT_REF_NAME - fi - # Use custom idf in case custom branch is present if [ -n "${TEST_APP_IDF_CUSTOM_BRANCH:-}" ]; then echo "TEST_APP_IDF_CUSTOM_BRANCH=$TEST_APP_IDF_CUSTOM_BRANCH" - #pushd $BUILD_TEST_APP_DIR # Clone esp-idf git clone --shallow-submodules --recursive --single-branch --branch $TEST_APP_IDF_CUSTOM_BRANCH -- https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/esp-idf.git esp-idf export IDF_PATH=$PWD/esp-idf @@ -417,7 +410,6 @@ sign_aarch64-apple-darwin: # in the image and `export.sh` fails w/o adding tools to $PATH idf_exports=$(${IDF_PATH}/tools/idf_tools.py export) || true eval "${idf_exports}" - #popd fi idf.py --version || true pushd $IDF_PATH/components @@ -439,7 +431,8 @@ test_esp_dsp: needs: - job: "pack_x86_64-linux-gnu" variables: - TEST_APP_IDF_CUSTOM_BRANCH: "master" + # use IDF 'master' from docker image + TEST_APP_IDF_CUSTOM_BRANCH: "" TEST_APP_ESP_DSP_CUSTOM_BRANCH: "master" CONF_HOST: "x86_64-linux-gnu" UNPACK_TOOL: "tar xJf" From 4620b945fdfa105091ad332d64a5f0afd0aeb96e Mon Sep 17 00:00:00 2001 From: Stefan Stipanovic Date: Wed, 18 Sep 2024 12:20:28 +0200 Subject: [PATCH 258/289] [Xtensa] ESP32S3 fix verifier issues in instructions with INOUT operands --- .../lib/Target/Xtensa/XtensaS3DSPInstrInfo.td | 154 +++- .../Target/Xtensa/XtensaS3ISelLowering.cpp | 775 +++++++++--------- llvm/test/CodeGen/Xtensa/xtensa-s3-dsp.ll | 2 +- 3 files changed, 542 insertions(+), 389 deletions(-) diff --git a/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td index afb0abe37ecfb..c884ffd238a37 100644 --- a/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td @@ -52,6 +52,7 @@ def EE_BITREV: EE_Inst24<(outs QR:$qa, AR:$axr), (ins AR:$ax), bits<4> ax; + let Constraints = "$axr = $ax"; let Inst{23-22} = 0x3; let Inst{21-20} = qa{2-1}; let Inst{19-16} = 0xd; @@ -105,6 +106,7 @@ def EE_CMUL_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qz{2-0}; @@ -113,8 +115,6 @@ def EE_CMUL_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let Inst{10-6} = 0x3; let Inst{5-4} = sel4{1-0}; let Inst{3-0} = as{3-0}; - - let Constraints = "$asr = $as"; } let usesCustomInserter = 1 in @@ -134,6 +134,7 @@ def EE_CMUL_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1c8; let Inst{19-17} = qz{2-0}; let Inst{16-14} = qx{2-0}; @@ -142,8 +143,6 @@ def EE_CMUL_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let Inst{7-6} = 0x0; let Inst{5-4} = sel4{1-0}; let Inst{3-0} = as{3-0}; - - let Constraints = "$asr = $as"; } let usesCustomInserter = 1 in @@ -165,6 +164,7 @@ def EE_FFT_AMS_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz, QR:$qz1), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x34; let Inst{22} = sel2{0}; let Inst{21-20} = qz1{2-1}; @@ -196,6 +196,7 @@ def EE_FFT_AMS_S16_LD_INCP_UAUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz, QR:$q let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x35; let Inst{22} = sel2{0}; let Inst{21-20} = qz1{2-1}; @@ -227,6 +228,7 @@ def EE_FFT_AMS_S16_LD_R32_DECP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz, QR:$qz let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x36; let Inst{22} = sel2{0}; let Inst{21-20} = qz1{2-1}; @@ -258,6 +260,7 @@ def EE_FFT_AMS_S16_ST_INCP: EE_Inst32<(outs QR:$qz1, AR:$as0r, AR:$asr), (ins Q let mayStore = 1; + let Constraints = "$as0r = $as0, $asr = $as"; let Inst{28-24} = 0x14; let Inst{23} = sel2{0}; let Inst{22-20} = qz1{2-0}; @@ -287,6 +290,7 @@ def EE_FFT_CMUL_S16_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$a let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x37; let Inst{22-20} = sel8{2-0}; let Inst{19-17} = qz{2-0}; @@ -316,6 +320,7 @@ def EE_FFT_CMUL_S16_ST_XP: EE_Inst32<(outs AR:$asr), (ins QR:$qx, QR:$qy, QR:$q let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-24} = 0x15; let Inst{23-22} = sar4{1-0}; let Inst{21-20} = upd4{1-0}; @@ -372,6 +377,7 @@ def EE_FFT_R2BF_S16_ST_INCP: EE_Inst32<(outs QR:$qa0, AR:$asr), (ins QR:$qx, QR let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1d1; let Inst{19-17} = qa0{2-0}; let Inst{16-14} = qx{2-0}; @@ -396,6 +402,7 @@ def EE_FFT_VST_R32_DECP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, select_ let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x3; let Inst{21-20} = qv{2-1}; let Inst{19-16} = 0xd; @@ -424,6 +431,7 @@ def EE_LDF_128_IP: EE_Inst32<(outs FPR:$fu3, FPR:$fu2, FPR:$fu1, FPR:$fu0, AR:$ let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-24} = 0x10; let Inst{23-20} = fu3{3-0}; let Inst{19-16} = fu2{3-0}; @@ -450,6 +458,7 @@ def EE_LDF_128_XP: EE_Inst32<(outs FPR:$fu3, FPR:$fu2, FPR:$fu1, FPR:$fu0, AR:$ let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-24} = 0x11; let Inst{23-20} = fu3{3-0}; let Inst{19-16} = fu2{3-0}; @@ -474,6 +483,7 @@ def EE_LDF_64_IP: EE_Inst32<(outs FPR:$fu1, FPR:$fu0, AR:$asr), (ins AR:$as, of let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-16} = imm8{7-1}; let Inst{15-12} = fu1{3-0}; @@ -498,6 +508,7 @@ def EE_LDF_64_XP: EE_Inst24<(outs FPR:$fu1, FPR:$fu0, AR:$asr), (ins AR:$as, AR let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-20} = fu0{3-0}; let Inst{19-16} = 0x6; let Inst{15-12} = fu1{3-0}; @@ -519,6 +530,7 @@ def EE_LDQA_S16_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$i let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm16{7}; let Inst{21-15} = 0x2; @@ -540,6 +552,7 @@ def EE_LDQA_S16_128_XP: EE_Inst24<(outs AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-12} = 0x7e4; let Inst{11-8} = ad{3-0}; let Inst{7-4} = as{3-0}; @@ -559,6 +572,7 @@ def EE_LDQA_S8_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$im let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm16{7}; let Inst{21-15} = 0x22; @@ -580,6 +594,7 @@ def EE_LDQA_S8_128_XP: EE_Inst24<(outs AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-12} = 0x714; let Inst{11-8} = ad{3-0}; let Inst{7-4} = as{3-0}; @@ -599,6 +614,7 @@ def EE_LDQA_U16_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$i let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm16{7}; let Inst{21-15} = 0xa; @@ -620,6 +636,7 @@ def EE_LDQA_U16_128_XP: EE_Inst24<(outs AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-12} = 0x7a4; let Inst{11-8} = ad{3-0}; let Inst{7-4} = as{3-0}; @@ -639,6 +656,7 @@ def EE_LDQA_U8_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$im let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm16{7}; let Inst{21-15} = 0x2a; @@ -660,6 +678,7 @@ def EE_LDQA_U8_128_XP: EE_Inst24<(outs AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-12} = 0x704; let Inst{11-8} = ad{3-0}; let Inst{7-4} = as{3-0}; @@ -705,6 +724,7 @@ def EE_LD_128_USAR_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_25 let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x1; let Inst{22} = imm16{7}; let Inst{21-20} = qu{2-1}; @@ -729,6 +749,7 @@ def EE_LD_128_USAR_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x2; let Inst{21-20} = qu{2-1}; let Inst{19-16} = 0xd; @@ -752,6 +773,7 @@ def EE_LD_ACCX_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_8:$imm8), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm8{7}; let Inst{21-15} = 0x1c; @@ -773,6 +795,7 @@ def EE_LD_QACC_H_H_32_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_4:$ let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm4{7}; let Inst{21-15} = 0x3c; @@ -794,6 +817,7 @@ def EE_LD_QACC_H_L_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16 let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm16{7}; let Inst{21-15} = 0xc; @@ -815,6 +839,7 @@ def EE_LD_QACC_L_H_32_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_4:$ let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm4{7}; let Inst{21-15} = 0x2c; @@ -836,6 +861,7 @@ def EE_LD_QACC_L_L_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16 let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm16{7}; let Inst{21-15} = 0x0; @@ -857,6 +883,7 @@ def EE_LD_UA_STATE_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$im let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm16{7}; let Inst{21-15} = 0x20; @@ -1047,6 +1074,7 @@ def EE_SLCI_2Q: EE_Inst24<(outs QR:$qs1r, QR:$qs0r), (ins QR:$qs1, QR:$qs0, sel bits<4> sar16; + let Constraints = "$qs1r = $qs1, $qs0r = $qs0"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0xc; @@ -1071,6 +1099,7 @@ def EE_SLCXXP_2Q: EE_Inst24<(outs QR:$qs1r, QR:$qs0r, AR:$asr), (ins QR:$qs1, Q bits<4> ad; + let Constraints = "$qs1r = $qs1, $qs0r = $qs0, $asr = $as"; let Inst{23-22} = 0x2; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0x6; @@ -1094,6 +1123,7 @@ def EE_SRCI_2Q: EE_Inst24<(outs QR:$qs1r, QR:$qs0r), (ins QR:$qs1, QR:$qs0, sel bits<4> sar16; + let Constraints = "$qs1r = $qs1, $qs0r = $qs0"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0xc; @@ -1166,6 +1196,7 @@ def EE_SRCQ_128_ST_INCP: EE_Inst24<(outs AR:$asr), (ins QR:$qs0, QR:$qs1, AR:$a let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0xc; @@ -1190,6 +1221,7 @@ def EE_SRCXXP_2Q: EE_Inst24<(outs QR:$qs1r, QR:$qs0r, AR:$asr), (ins QR:$qs1, Q bits<4> ad; + let Constraints = "$qs1r = $qs1, $qs0r = $qs0, $asr = $as"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0x6; @@ -1239,6 +1271,7 @@ def EE_SRC_Q_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, of let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x38; let Inst{22-20} = imm16{7-5}; let Inst{19-17} = qu{2-0}; @@ -1266,6 +1299,7 @@ def EE_SRC_Q_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-20} = 0x1d0; let Inst{19-17} = qu{2-0}; let Inst{16-14} = qs0{2-0}; @@ -1288,6 +1322,7 @@ def EE_SRC_Q_QUP: EE_Inst24<(outs QR:$qa, QR:$qs0r), (ins QR:$qs0, QR:$qs1), bits<3> qs1; + let Constraints = "$qs0r = $qs0"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0xc; @@ -1336,6 +1371,7 @@ def EE_STF_128_IP: EE_Inst32<(outs AR:$asr), (ins FPR:$fv3, FPR:$fv2, FPR:$fv1, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-24} = 0x12; let Inst{23-20} = fv3{3-0}; let Inst{19-16} = fv2{3-0}; @@ -1362,6 +1398,7 @@ def EE_STF_128_XP: EE_Inst32<(outs AR:$asr), (ins FPR:$fv3, FPR:$fv2, FPR:$fv1, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-24} = 0x13; let Inst{23-20} = fv3{3-0}; let Inst{19-16} = fv2{3-0}; @@ -1386,6 +1423,7 @@ def EE_STF_64_IP: EE_Inst32<(outs AR:$asr), (ins FPR:$fv1, FPR:$fv0, AR:$as, of let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-16} = imm8{7-1}; let Inst{15-12} = fv1{3-0}; @@ -1410,6 +1448,7 @@ def EE_STF_64_XP: EE_Inst24<(outs AR:$asr), (ins FPR:$fv1, FPR:$fv0, AR:$as, AR let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23-20} = fv0{3-0}; let Inst{19-16} = 0x7; let Inst{15-12} = fv1{3-0}; @@ -1457,6 +1496,7 @@ def EE_ST_ACCX_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_8:$imm8), let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm8{7}; let Inst{21-15} = 0x4; @@ -1478,6 +1518,7 @@ def EE_ST_QACC_H_H_32_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_4:$ let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm4{7}; let Inst{21-15} = 0x24; @@ -1499,6 +1540,7 @@ def EE_ST_QACC_H_L_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16 let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm16{7}; let Inst{21-15} = 0x1a; @@ -1520,6 +1562,7 @@ def EE_ST_QACC_L_H_32_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_4:$ let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm4{7}; let Inst{21-15} = 0x3a; @@ -1541,6 +1584,7 @@ def EE_ST_QACC_L_L_128_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16 let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm16{7}; let Inst{21-15} = 0x18; @@ -1562,6 +1606,7 @@ def EE_ST_UA_STATE_IP: EE_Inst24<(outs AR:$asr), (ins AR:$as, offset_256_16:$im let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x0; let Inst{22} = imm16{7}; let Inst{21-15} = 0x38; @@ -1610,6 +1655,7 @@ def EE_VADDS_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -1635,6 +1681,7 @@ def EE_VADDS_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1c9; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -1684,6 +1731,7 @@ def EE_VADDS_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -1709,6 +1757,7 @@ def EE_VADDS_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1c9; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -1758,6 +1807,7 @@ def EE_VADDS_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -1783,6 +1833,7 @@ def EE_VADDS_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1c9; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -2044,6 +2095,7 @@ def EE_VLDBC_16_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_128_2 let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x2; let Inst{21-20} = qu{2-1}; let Inst{19-16} = 0x5; @@ -2067,6 +2119,7 @@ def EE_VLDBC_16_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x2; let Inst{21-20} = qu{2-1}; let Inst{19-16} = 0xd; @@ -2113,6 +2166,7 @@ def EE_VLDBC_32_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_256_4 let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x1; let Inst{22} = imm4{7}; let Inst{21-20} = qu{2-1}; @@ -2137,6 +2191,7 @@ def EE_VLDBC_32_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x2; let Inst{21-20} = qu{2-1}; let Inst{19-16} = 0xd; @@ -2183,6 +2238,7 @@ def EE_VLDBC_8_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_128_1: let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x3; let Inst{21-20} = qu{2-1}; let Inst{19-16} = 0x5; @@ -2206,6 +2262,7 @@ def EE_VLDBC_8_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x2; let Inst{21-20} = qu{2-1}; let Inst{19-16} = 0xd; @@ -2230,6 +2287,7 @@ def EE_VLDHBC_16_INCP: EE_Inst24<(outs QR:$qu, QR:$qu1, AR:$asr), (ins AR:$as), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x3; let Inst{21-20} = qu{2-1}; let Inst{19-16} = 0xc; @@ -2254,6 +2312,7 @@ def EE_VLD_128_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_256_16 let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x1; let Inst{22} = imm16{7}; let Inst{21-20} = qu{2-1}; @@ -2278,6 +2337,7 @@ def EE_VLD_128_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x2; let Inst{21-20} = qu{2-1}; let Inst{19-16} = 0xd; @@ -2302,6 +2362,7 @@ def EE_VLD_H_64_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_256_8 let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x1; let Inst{22} = imm8{7}; let Inst{21-20} = qu{2-1}; @@ -2326,6 +2387,7 @@ def EE_VLD_H_64_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x2; let Inst{21-20} = qu{2-1}; let Inst{19-16} = 0xd; @@ -2350,6 +2412,7 @@ def EE_VLD_L_64_IP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, offset_256_8 let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x1; let Inst{22} = imm8{7}; let Inst{21-20} = qu{2-1}; @@ -2374,6 +2437,7 @@ def EE_VLD_L_64_XP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, AR:$ad), let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x2; let Inst{21-20} = qu{2-1}; let Inst{19-16} = 0xd; @@ -2424,6 +2488,7 @@ def EE_VMAX_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -2449,6 +2514,7 @@ def EE_VMAX_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1c9; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -2498,6 +2564,7 @@ def EE_VMAX_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -2523,6 +2590,7 @@ def EE_VMAX_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1ca; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -2572,6 +2640,7 @@ def EE_VMAX_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -2597,6 +2666,7 @@ def EE_VMAX_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1cb; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -2646,6 +2716,7 @@ def EE_VMIN_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -2671,6 +2742,7 @@ def EE_VMIN_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1ca; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -2720,6 +2792,7 @@ def EE_VMIN_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -2745,6 +2818,7 @@ def EE_VMIN_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1cb; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -2794,6 +2868,7 @@ def EE_VMIN_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -2819,6 +2894,7 @@ def EE_VMIN_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1ca; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -2864,6 +2940,7 @@ def EE_VMULAS_S16_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, of let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-25} = 0xf; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -2893,6 +2970,7 @@ def EE_VMULAS_S16_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-25} = 0x0; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -2920,6 +2998,7 @@ def EE_VMULAS_S16_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x3c; let Inst{22-20} = qu{2-0}; let Inst{19-17} = 0x0; @@ -2948,6 +3027,7 @@ def EE_VMULAS_S16_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x2c; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -2993,6 +3073,7 @@ def EE_VMULAS_S16_QACC_LDBC_INCP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-21} = 0x4; let Inst{20} = qu{2}; let Inst{19-16} = 0x7; @@ -3022,6 +3103,7 @@ def EE_VMULAS_S16_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -3048,6 +3130,7 @@ def EE_VMULAS_S16_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, of let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-25} = 0xf; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3077,6 +3160,7 @@ def EE_VMULAS_S16_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-25} = 0x1; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3104,6 +3188,7 @@ def EE_VMULAS_S16_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x3c; let Inst{22-20} = qu{2-0}; let Inst{19-17} = 0x1; @@ -3132,6 +3217,7 @@ def EE_VMULAS_S16_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x2d; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -3178,6 +3264,7 @@ def EE_VMULAS_S8_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, off let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-25} = 0xf; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3207,6 +3294,7 @@ def EE_VMULAS_S8_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-25} = 0x2; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3234,6 +3322,7 @@ def EE_VMULAS_S8_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR: let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x3c; let Inst{22-20} = qu{2-0}; let Inst{19-17} = 0x2; @@ -3262,6 +3351,7 @@ def EE_VMULAS_S8_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x2e; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -3307,6 +3397,7 @@ def EE_VMULAS_S8_QACC_LDBC_INCP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-21} = 0x5; let Inst{20} = qu{2}; let Inst{19-16} = 0x7; @@ -3336,6 +3427,7 @@ def EE_VMULAS_S8_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r) let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -3362,6 +3454,7 @@ def EE_VMULAS_S8_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, off let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-25} = 0xf; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3391,6 +3484,7 @@ def EE_VMULAS_S8_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-25} = 0x3; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3418,6 +3512,7 @@ def EE_VMULAS_S8_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR: let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x3c; let Inst{22-20} = qu{2-0}; let Inst{19-17} = 0x3; @@ -3446,6 +3541,7 @@ def EE_VMULAS_S8_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x2f; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -3492,6 +3588,7 @@ def EE_VMULAS_U16_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, of let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-25} = 0xf; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3521,6 +3618,7 @@ def EE_VMULAS_U16_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-25} = 0x4; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3548,6 +3646,7 @@ def EE_VMULAS_U16_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x3c; let Inst{22-20} = qu{2-0}; let Inst{19-17} = 0x4; @@ -3576,6 +3675,7 @@ def EE_VMULAS_U16_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x30; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -3621,6 +3721,7 @@ def EE_VMULAS_U16_QACC_LDBC_INCP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-21} = 0x6; let Inst{20} = qu{2}; let Inst{19-16} = 0x7; @@ -3650,6 +3751,7 @@ def EE_VMULAS_U16_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -3676,6 +3778,7 @@ def EE_VMULAS_U16_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, of let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-25} = 0xf; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3705,6 +3808,7 @@ def EE_VMULAS_U16_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-25} = 0x5; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3732,6 +3836,7 @@ def EE_VMULAS_U16_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x3c; let Inst{22-20} = qu{2-0}; let Inst{19-17} = 0x5; @@ -3760,6 +3865,7 @@ def EE_VMULAS_U16_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x31; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -3806,6 +3912,7 @@ def EE_VMULAS_U8_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, off let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-25} = 0xf; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3835,6 +3942,7 @@ def EE_VMULAS_U8_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-25} = 0x6; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -3862,6 +3970,7 @@ def EE_VMULAS_U8_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR: let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x3c; let Inst{22-20} = qu{2-0}; let Inst{19-17} = 0x6; @@ -3890,6 +3999,7 @@ def EE_VMULAS_U8_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x32; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -3935,6 +4045,7 @@ def EE_VMULAS_U8_QACC_LDBC_INCP: EE_Inst24<(outs QR:$qu, AR:$asr), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{23-21} = 0x7; let Inst{20} = qu{2}; let Inst{19-16} = 0x7; @@ -3964,6 +4075,7 @@ def EE_VMULAS_U8_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r) let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -3990,6 +4102,7 @@ def EE_VMULAS_U8_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, off let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-25} = 0xf; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -4019,6 +4132,7 @@ def EE_VMULAS_U8_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-25} = 0x7; let Inst{24-23} = imm16{5-4}; let Inst{22-20} = qu{2-0}; @@ -4046,6 +4160,7 @@ def EE_VMULAS_U8_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR: let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x3c; let Inst{22-20} = qu{2-0}; let Inst{19-17} = 0x7; @@ -4074,6 +4189,7 @@ def EE_VMULAS_U8_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; + let Constraints = "$asr = $as, $qs0r = $qs0"; let Inst{28-23} = 0x33; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qs1{2-0}; @@ -4124,6 +4240,7 @@ def EE_VMUL_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qz{2-0}; @@ -4149,6 +4266,7 @@ def EE_VMUL_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1cb; let Inst{19-17} = qz{2-0}; let Inst{16-14} = qx{2-0}; @@ -4198,6 +4316,7 @@ def EE_VMUL_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qz{2-0}; @@ -4223,6 +4342,7 @@ def EE_VMUL_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1ca; let Inst{19-17} = qz{2-0}; let Inst{16-14} = qx{2-0}; @@ -4272,6 +4392,7 @@ def EE_VMUL_U16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qz{2-0}; @@ -4297,6 +4418,7 @@ def EE_VMUL_U16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1cb; let Inst{19-17} = qz{2-0}; let Inst{16-14} = qx{2-0}; @@ -4346,6 +4468,7 @@ def EE_VMUL_U8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qz{2-0}; @@ -4371,6 +4494,7 @@ def EE_VMUL_U8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1d1; let Inst{19-17} = qz{2-0}; let Inst{16-14} = qx{2-0}; @@ -4445,6 +4569,7 @@ def EE_VRELU_S16: EE_Inst24<(outs QR:$qsr), (ins QR:$qs, AR:$ax, AR:$ay), bits<4> ay; + let Constraints = "$qsr = $qs"; let Inst{23-22} = 0x3; let Inst{21-20} = qs{2-1}; let Inst{19-16} = 0xd; @@ -4468,6 +4593,7 @@ def EE_VRELU_S8: EE_Inst24<(outs QR:$qsr), (ins QR:$qs, AR:$ax, AR:$ay), bits<4> ay; + let Constraints = "$qsr = $qs"; let Inst{23-22} = 0x3; let Inst{21-20} = qs{2-1}; let Inst{19-16} = 0xd; @@ -4539,6 +4665,7 @@ def EE_VSMULAS_S16_QACC_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = sel8{2-0}; @@ -4590,6 +4717,7 @@ def EE_VSMULAS_S8_QACC_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = sel16{3-1}; @@ -4636,6 +4764,7 @@ def EE_VST_128_IP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, offset_256_16 let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x1; let Inst{22} = imm16{7}; let Inst{21-20} = qv{2-1}; @@ -4660,6 +4789,7 @@ def EE_VST_128_XP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, AR:$ad), let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x2; let Inst{21-20} = qv{2-1}; let Inst{19-16} = 0xd; @@ -4684,6 +4814,7 @@ def EE_VST_H_64_IP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, offset_256_8 let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x1; let Inst{22} = imm8{7}; let Inst{21-20} = qv{2-1}; @@ -4708,6 +4839,7 @@ def EE_VST_H_64_XP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, AR:$ad), let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x3; let Inst{21-20} = qv{2-1}; let Inst{19-16} = 0xd; @@ -4732,6 +4864,7 @@ def EE_VST_L_64_IP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, offset_256_8 let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23} = 0x1; let Inst{22} = imm8{7}; let Inst{21-20} = qv{2-1}; @@ -4756,6 +4889,7 @@ def EE_VST_L_64_XP: EE_Inst24<(outs AR:$asr), (ins QR:$qv, AR:$as, AR:$ad), let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{23-22} = 0x3; let Inst{21-20} = qv{2-1}; let Inst{19-16} = 0xd; @@ -4806,6 +4940,7 @@ def EE_VSUBS_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -4831,6 +4966,7 @@ def EE_VSUBS_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1d1; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -4880,6 +5016,7 @@ def EE_VSUBS_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -4905,6 +5042,7 @@ def EE_VSUBS_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1d1; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -4954,6 +5092,7 @@ def EE_VSUBS_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; + let Constraints = "$asr = $as"; let Inst{28-23} = 0x38; let Inst{22-20} = qu{2-0}; let Inst{19-17} = qa{2-0}; @@ -4979,6 +5118,7 @@ def EE_VSUBS_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; + let Constraints = "$asr = $as"; let Inst{28-20} = 0x1d1; let Inst{19-17} = qa{2-0}; let Inst{16-14} = qx{2-0}; @@ -5000,6 +5140,7 @@ def EE_VUNZIP_16: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), bits<3> qs1; + let Constraints = "$qs0r = $qs0, $qs1r = $qs1"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0xc; @@ -5020,6 +5161,7 @@ def EE_VUNZIP_32: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), bits<3> qs1; + let Constraints = "$qs0r = $qs0, $qs1r = $qs1"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0xc; @@ -5040,6 +5182,7 @@ def EE_VUNZIP_8: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), bits<3> qs1; + let Constraints = "$qs0r = $qs0, $qs1r = $qs1"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0xc; @@ -5060,6 +5203,7 @@ def EE_VZIP_16: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), bits<3> qs1; + let Constraints = "$qs0r = $qs0, $qs1r = $qs1"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0xc; @@ -5080,6 +5224,7 @@ def EE_VZIP_32: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), bits<3> qs1; + let Constraints = "$qs0r = $qs0, $qs1r = $qs1"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0xc; @@ -5100,6 +5245,7 @@ def EE_VZIP_8: EE_Inst24<(outs QR:$qs0r, QR:$qs1r), (ins QR:$qs0, QR:$qs1), bits<3> qs1; + let Constraints = "$qs0r = $qs0, $qs1r = $qs1"; let Inst{23-22} = 0x3; let Inst{21-20} = qs1{2-1}; let Inst{19-16} = 0xc; diff --git a/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp index 181a5ae59ee31..1cf1b80a3826a 100644 --- a/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaS3ISelLowering.cpp @@ -186,10 +186,10 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL2 = MI.getOperand(7); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) - .addReg(Xtensa::Q0 + QZ1Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) + .addReg(Xtensa::Q0 + QZ1Val, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -230,10 +230,10 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL2 = MI.getOperand(7); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) - .addReg(Xtensa::Q0 + QZ1Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) + .addReg(Xtensa::Q0 + QZ1Val, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -274,10 +274,10 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL2 = MI.getOperand(7); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) - .addReg(Xtensa::Q0 + QZ1Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) + .addReg(Xtensa::Q0 + QZ1Val, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -316,9 +316,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL2 = MI.getOperand(7); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QZ1Val) - .addReg(R1, RegState::Undef) - .addReg(R2, RegState::Undef) + .addReg(Xtensa::Q0 + QZ1Val, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(R2, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS0.getReg()) .addReg(AS.getReg()) @@ -354,9 +354,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL8 = MI.getOperand(6); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -388,7 +388,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( MachineOperand &UPD4 = MI.getOperand(6); MachineOperand &SAR4 = MI.getOperand(7); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) .addReg(Xtensa::Q0 + QVVal) @@ -421,8 +421,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "it must be in range [0,7]"); MachineOperand &SEL2 = MI.getOperand(4); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QA0Val) - .addReg(Xtensa::Q0 + QA1Val) + .addReg(Xtensa::Q0 + QA0Val, RegState::Define) + .addReg(Xtensa::Q0 + QA1Val, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) .addImm(SEL2.getImm()); @@ -449,8 +449,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &SAR4 = MI.getOperand(4); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QA0Val) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QA0Val, RegState::Define) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) .addReg(AS.getReg()) @@ -470,7 +470,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &SAR2 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addImm(SAR2.getImm()); @@ -480,20 +480,21 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( } case Xtensa::EE_LDF_128_IP_P: { unsigned Opc = Xtensa::EE_LDF_128_IP; - MachineOperand &FU3 = MI.getOperand(0); - MachineOperand &FU2 = MI.getOperand(1); - MachineOperand &FU1 = MI.getOperand(2); - MachineOperand &FU0 = MI.getOperand(3); - MachineOperand &AS = MI.getOperand(4); const TargetRegisterClass *RC = getRegClassFor(MVT::i32); - unsigned R1 = MRI.createVirtualRegister(RC); + const TargetRegisterClass *RCFR = &Xtensa::FPRRegClass; + unsigned R1 = MRI.createVirtualRegister(RCFR); + unsigned R2 = MRI.createVirtualRegister(RCFR); + unsigned R3 = MRI.createVirtualRegister(RCFR); + unsigned R4 = MRI.createVirtualRegister(RCFR); + MachineOperand &AS = MI.getOperand(4); + unsigned R5 = MRI.createVirtualRegister(RC); MachineOperand &IMM16F = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(FU3.getReg()) - .addReg(FU2.getReg()) - .addReg(FU1.getReg()) - .addReg(FU0.getReg()) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) + .addReg(R2, RegState::Define) + .addReg(R3, RegState::Define) + .addReg(R4, RegState::Define) + .addReg(R5, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16F.getImm()); @@ -502,20 +503,21 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( } case Xtensa::EE_LDF_128_XP_P: { unsigned Opc = Xtensa::EE_LDF_128_XP; - MachineOperand &FU3 = MI.getOperand(0); - MachineOperand &FU2 = MI.getOperand(1); - MachineOperand &FU1 = MI.getOperand(2); - MachineOperand &FU0 = MI.getOperand(3); - MachineOperand &AS = MI.getOperand(4); const TargetRegisterClass *RC = getRegClassFor(MVT::i32); - unsigned R1 = MRI.createVirtualRegister(RC); + const TargetRegisterClass *RCFR = &Xtensa::FPRRegClass; + unsigned R1 = MRI.createVirtualRegister(RCFR); + unsigned R2 = MRI.createVirtualRegister(RCFR); + unsigned R3 = MRI.createVirtualRegister(RCFR); + unsigned R4 = MRI.createVirtualRegister(RCFR); + MachineOperand &AS = MI.getOperand(4); + unsigned R5 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(FU3.getReg()) - .addReg(FU2.getReg()) - .addReg(FU1.getReg()) - .addReg(FU0.getReg()) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) + .addReg(R2, RegState::Define) + .addReg(R3, RegState::Define) + .addReg(R4, RegState::Define) + .addReg(R5, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -524,16 +526,17 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( } case Xtensa::EE_LDF_64_IP_P: { unsigned Opc = Xtensa::EE_LDF_64_IP; - MachineOperand &FU1 = MI.getOperand(0); - MachineOperand &FU0 = MI.getOperand(1); - MachineOperand &AS = MI.getOperand(2); const TargetRegisterClass *RC = getRegClassFor(MVT::i32); - unsigned R1 = MRI.createVirtualRegister(RC); + const TargetRegisterClass *RCFR = getRegClassFor(MVT::f32); + unsigned R1 = MRI.createVirtualRegister(RCFR); + unsigned R2 = MRI.createVirtualRegister(RCFR); + MachineOperand &AS = MI.getOperand(2); + unsigned R3 = MRI.createVirtualRegister(RC); MachineOperand &IMM8 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(FU1.getReg()) - .addReg(FU0.getReg()) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) + .addReg(R2, RegState::Define) + .addReg(R3, RegState::Define) .addReg(AS.getReg()) .addImm(IMM8.getImm()); @@ -542,16 +545,17 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( } case Xtensa::EE_LDF_64_XP_P: { unsigned Opc = Xtensa::EE_LDF_64_XP; - MachineOperand &FU1 = MI.getOperand(0); - MachineOperand &FU0 = MI.getOperand(1); - MachineOperand &AS = MI.getOperand(2); const TargetRegisterClass *RC = getRegClassFor(MVT::i32); - unsigned R1 = MRI.createVirtualRegister(RC); + const TargetRegisterClass *RCFR = &Xtensa::FPRRegClass; + unsigned R1 = MRI.createVirtualRegister(RCFR); + unsigned R2 = MRI.createVirtualRegister(RCFR); + MachineOperand &AS = MI.getOperand(2); + unsigned R3 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(FU1.getReg()) - .addReg(FU0.getReg()) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) + .addReg(R2, RegState::Define) + .addReg(R3, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -565,7 +569,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -579,7 +583,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -593,7 +597,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -607,7 +611,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -621,7 +625,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -635,7 +639,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -649,7 +653,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -663,7 +667,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -684,7 +688,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( MachineOperand &SEL4 = MI.getOperand(3); MachineOperand &SEL8 = MI.getOperand(4); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) .addReg(Xtensa::Q0 + QSVal) .addReg(AS.getReg()) .addImm(SEL4.getImm()) @@ -704,8 +708,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -723,8 +727,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -738,7 +742,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM8 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM8.getImm()); @@ -752,7 +756,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM4 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM4.getImm()); @@ -766,7 +770,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -780,7 +784,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM4 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM4.getImm()); @@ -794,7 +798,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -808,7 +812,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -821,10 +825,11 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned QSVal = QS.getImm(); assert(QSVal < 8 && "Unexpected value of ee_movi_32_a first argument, it " "must be in range [0,7]"); - MachineOperand &AU = MI.getOperand(1); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &SEL4 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(AU.getReg()) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QSVal) .addImm(SEL4.getImm()); @@ -840,7 +845,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( MachineOperand &AS = MI.getOperand(1); MachineOperand &SEL4 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) .addReg(AS.getReg()) .addImm(SEL4.getImm()); @@ -902,7 +907,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QXVal < 8 && "Unexpected value of ee_notq first argument, it must " "be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal); MI.eraseFromParent(); @@ -926,7 +931,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( QYVal < 8 && "Unexpected value of ee_orq first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -945,8 +950,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "must be in range [0,7]"); MachineOperand &SAR16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QS1Val) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(Xtensa::Q0 + QS1Val) .addReg(Xtensa::Q0 + QS0Val) .addImm(SAR16.getImm()); @@ -969,9 +974,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QS1Val) - .addReg(Xtensa::Q0 + QS0Val) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS1Val, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QS1Val) .addReg(Xtensa::Q0 + QS0Val) .addReg(AS.getReg()) @@ -992,8 +997,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "must be in range [0,7]"); MachineOperand &SAR16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QS1Val) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QS1Val, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(Xtensa::Q0 + QS1Val) .addReg(Xtensa::Q0 + QS0Val) .addImm(SAR16.getImm()); @@ -1010,7 +1015,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( MachineOperand &AS = MI.getOperand(1); MachineOperand &SEL2 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) .addReg(AS.getReg()) .addImm(SEL2.getImm()); @@ -1026,7 +1031,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( MachineOperand &AS = MI.getOperand(1); MachineOperand &SEL2 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) .addReg(AS.getReg()) .addImm(SEL2.getImm()); @@ -1047,7 +1052,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( const TargetRegisterClass *RC = getRegClassFor(MVT::i32); unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QS0Val) .addReg(Xtensa::Q0 + QS1Val) .addReg(AS.getReg()); @@ -1070,9 +1075,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QS1Val) - .addReg(Xtensa::Q0 + QS0Val) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QS1Val, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QS1Val) .addReg(Xtensa::Q0 + QS0Val) .addReg(AS.getReg()) @@ -1096,7 +1101,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_src_q first argument, it must " "be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QS0Val) .addReg(Xtensa::Q0 + QS1Val); @@ -1122,9 +1127,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_src_q_ld_ip first argument, " "it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QS0Val) @@ -1152,9 +1157,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_src_q_ld_xp first argument, " "it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QS0Val) @@ -1178,8 +1183,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_src_q_qup first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(Xtensa::Q0 + QS0Val) .addReg(Xtensa::Q0 + QS1Val); @@ -1188,11 +1193,12 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( } case Xtensa::EE_SRS_ACCX_P: { unsigned Opc = Xtensa::EE_SRS_ACCX; - MachineOperand &AU = MI.getOperand(0); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AS = MI.getOperand(1); MachineOperand &SEL2 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(AU.getReg()) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(SEL2.getImm()); @@ -1210,7 +1216,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16F = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(FV3.getReg()) .addReg(FV2.getReg()) .addReg(FV1.getReg()) @@ -1232,7 +1238,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(FV3.getReg()) .addReg(FV2.getReg()) .addReg(FV1.getReg()) @@ -1252,7 +1258,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM8 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(FV1.getReg()) .addReg(FV0.getReg()) .addReg(AS.getReg()) @@ -1270,7 +1276,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(FV1.getReg()) .addReg(FV0.getReg()) .addReg(AS.getReg()) @@ -1309,7 +1315,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM8 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM8.getImm()); @@ -1323,7 +1329,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM4 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM4.getImm()); @@ -1337,7 +1343,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -1351,7 +1357,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM4 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM4.getImm()); @@ -1365,7 +1371,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -1379,7 +1385,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -1401,7 +1407,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vadds_s16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1430,9 +1436,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vadds_s16_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1462,8 +1468,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vadds_s16_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -1487,7 +1493,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vadds_s32 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1516,9 +1522,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vadds_s32_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1548,8 +1554,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vadds_s32_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -1573,7 +1579,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vadds_s8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1602,9 +1608,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vadds_s8_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1634,8 +1640,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vadds_s8_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -1659,7 +1665,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vcmp_eq_s16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1681,7 +1687,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vcmp_eq_s32 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1703,7 +1709,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vcmp_eq_s8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1725,7 +1731,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vcmp_gt_s16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1747,7 +1753,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vcmp_gt_s32 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1769,7 +1775,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vcmp_gt_s8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1791,7 +1797,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vcmp_lt_s16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1813,7 +1819,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vcmp_lt_s32 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1835,7 +1841,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vcmp_lt_s8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -1850,7 +1856,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "must be in range [0,7]"); MachineOperand &AS = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) .addReg(AS.getReg()); MI.eraseFromParent(); @@ -1867,8 +1873,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM2 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM2.getImm()); @@ -1886,8 +1892,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -1902,7 +1908,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "must be in range [0,7]"); MachineOperand &AS = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) .addReg(AS.getReg()); MI.eraseFromParent(); @@ -1919,8 +1925,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM4 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM4.getImm()); @@ -1938,8 +1944,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -1954,7 +1960,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "must be in range [0,7]"); MachineOperand &AS = MI.getOperand(1); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) .addReg(AS.getReg()); MI.eraseFromParent(); @@ -1971,8 +1977,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM1 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM1.getImm()); @@ -1990,8 +1996,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -2012,9 +2018,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( const TargetRegisterClass *RC = getRegClassFor(MVT::i32); unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(Xtensa::Q0 + QU1Val) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(Xtensa::Q0 + QU1Val, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()); MI.eraseFromParent(); @@ -2031,8 +2037,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -2050,8 +2056,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -2069,8 +2075,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM8 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM8.getImm()); @@ -2088,8 +2094,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -2107,8 +2113,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM8 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM8.getImm()); @@ -2126,8 +2132,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -2149,7 +2155,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmax_s16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2178,9 +2184,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmax_s16_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2210,8 +2216,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmax_s16_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -2235,7 +2241,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmax_s32 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2264,9 +2270,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmax_s32_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2296,8 +2302,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmax_s32_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -2321,7 +2327,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmax_s8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2350,9 +2356,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmax_s8_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2382,8 +2388,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmax_s8_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -2407,7 +2413,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmin_s16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2436,9 +2442,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmin_s16_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2468,8 +2474,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmin_s16_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -2493,7 +2499,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmin_s32 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2522,9 +2528,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmin_s32_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2554,8 +2560,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmin_s32_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -2579,7 +2585,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmin_s8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2608,9 +2614,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmin_s8_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2640,8 +2646,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmin_s8_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -2686,8 +2692,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_ip first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -2723,9 +2729,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_ip_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -2755,8 +2761,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_xp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -2792,9 +2798,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s16_accx_ld_xp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -2840,8 +2846,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ldbc_incp " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -2875,9 +2881,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s16_qacc_ldbc_incp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -2906,8 +2912,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_ip first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -2943,9 +2949,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_ip_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -2975,8 +2981,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_xp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3012,9 +3018,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s16_qacc_ld_xp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3061,8 +3067,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_ip first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -3098,9 +3104,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_ip_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -3130,8 +3136,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_xp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3167,9 +3173,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s8_accx_ld_xp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3215,8 +3221,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ldbc_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -3250,9 +3256,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s8_qacc_ldbc_incp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -3281,8 +3287,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_ip first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -3318,9 +3324,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_ip_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -3350,8 +3356,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_xp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3387,9 +3393,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_s8_qacc_ld_xp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3436,8 +3442,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_ip first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -3473,9 +3479,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_ip_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -3505,8 +3511,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_xp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3542,9 +3548,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u16_accx_ld_xp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3590,8 +3596,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ldbc_incp " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -3625,9 +3631,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u16_qacc_ldbc_incp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -3656,8 +3662,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_ip first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -3693,9 +3699,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_ip_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -3725,8 +3731,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_xp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3762,9 +3768,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u16_qacc_ld_xp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3811,8 +3817,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_ip first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -3848,9 +3854,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_ip_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -3880,8 +3886,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_xp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3917,9 +3923,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u8_accx_ld_xp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -3965,8 +3971,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ldbc_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4000,9 +4006,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u8_qacc_ldbc_incp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -4031,8 +4037,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_ip first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -4068,9 +4074,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_ip_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addImm(IMM16.getImm()) .addReg(Xtensa::Q0 + QXVal) @@ -4100,8 +4106,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_xp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -4137,9 +4143,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vmulas_u8_qacc_ld_xp_qup " "first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QS0Val) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) .addReg(AS.getReg()) .addReg(AD.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -4165,7 +4171,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_s16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4194,9 +4200,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_s16_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4226,8 +4232,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_s16_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -4251,7 +4257,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_s8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4280,9 +4286,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_s8_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4312,8 +4318,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_s8_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -4337,7 +4343,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_u16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4366,9 +4372,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_u16_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4398,8 +4404,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_u16_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -4423,7 +4429,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_u8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4452,9 +4458,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_u8_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4484,8 +4490,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vmul_u8_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QZVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -4510,7 +4516,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "must be in range [0,7]"); MachineOperand &AY = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) .addReg(AY.getReg()); @@ -4534,7 +4540,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "must be in range [0,7]"); MachineOperand &AY = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QZVal) + .addReg(Xtensa::Q0 + QZVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) .addReg(AY.getReg()); @@ -4551,7 +4557,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( MachineOperand &AX = MI.getOperand(1); MachineOperand &AY = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QSVal) + .addReg(Xtensa::Q0 + QSVal, RegState::Define) .addReg(Xtensa::Q0 + QSVal) .addReg(AX.getReg()) .addReg(AY.getReg()); @@ -4568,7 +4574,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( MachineOperand &AX = MI.getOperand(1); MachineOperand &AY = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QSVal) + .addReg(Xtensa::Q0 + QSVal, RegState::Define) .addReg(Xtensa::Q0 + QSVal) .addReg(AX.getReg()) .addReg(AY.getReg()); @@ -4587,7 +4593,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QSVal < 8 && "Unexpected value of ee_vsl_32 first argument, it must " "be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QSVal); MI.eraseFromParent(); @@ -4631,8 +4637,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL8 = MI.getOperand(4); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -4679,8 +4685,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( "argument, it must be in range [0,7]"); MachineOperand &SEL16 = MI.getOperand(4); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal) @@ -4700,7 +4706,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QSVal < 8 && "Unexpected value of ee_vsr_32 first argument, it must " "be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QSVal); MI.eraseFromParent(); @@ -4717,7 +4723,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addImm(IMM16.getImm()); @@ -4736,7 +4742,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -4755,7 +4761,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM8 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addImm(IMM8.getImm()); @@ -4774,7 +4780,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -4793,7 +4799,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &IMM8 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addImm(IMM8.getImm()); @@ -4812,7 +4818,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned R1 = MRI.createVirtualRegister(RC); MachineOperand &AD = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) + .addReg(R1, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(AD.getReg()); @@ -4835,7 +4841,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vsubs_s16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4864,9 +4870,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vsubs_s16_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4896,8 +4902,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vsubs_s16_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -4921,7 +4927,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vsubs_s32 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4950,9 +4956,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vsubs_s32_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -4982,8 +4988,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vsubs_s32_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -5007,7 +5013,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vsubs_s8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -5036,9 +5042,9 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vsubs_s8_ld_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QUVal) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -5068,8 +5074,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_vsubs_s8_st_incp first " "argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Undef) - .addReg(Xtensa::Q0 + QAVal) + .addReg(R1, RegState::Define) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QVVal) .addReg(AS.getReg()) .addReg(Xtensa::Q0 + QXVal) @@ -5089,8 +5095,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vunzip_16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QS0Val) - .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) + .addReg(Xtensa::Q0 + QS1Val, RegState::Define) .addReg(Xtensa::Q0 + QS0Val) .addReg(Xtensa::Q0 + QS1Val); @@ -5108,8 +5114,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vunzip_32 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QS0Val) - .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) + .addReg(Xtensa::Q0 + QS1Val, RegState::Define) .addReg(Xtensa::Q0 + QS0Val) .addReg(Xtensa::Q0 + QS1Val); @@ -5127,8 +5133,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vunzip_8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QS0Val) - .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) + .addReg(Xtensa::Q0 + QS1Val, RegState::Define) .addReg(Xtensa::Q0 + QS0Val) .addReg(Xtensa::Q0 + QS1Val); @@ -5146,8 +5152,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vzip_16 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QS0Val) - .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) + .addReg(Xtensa::Q0 + QS1Val, RegState::Define) .addReg(Xtensa::Q0 + QS0Val) .addReg(Xtensa::Q0 + QS1Val); @@ -5165,8 +5171,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vzip_32 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QS0Val) - .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) + .addReg(Xtensa::Q0 + QS1Val, RegState::Define) .addReg(Xtensa::Q0 + QS0Val) .addReg(Xtensa::Q0 + QS1Val); @@ -5184,8 +5190,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QS1Val < 8 && "Unexpected value of ee_vzip_8 first argument, it " "must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QS0Val) - .addReg(Xtensa::Q0 + QS1Val) + .addReg(Xtensa::Q0 + QS0Val, RegState::Define) + .addReg(Xtensa::Q0 + QS1Val, RegState::Define) .addReg(Xtensa::Q0 + QS0Val) .addReg(Xtensa::Q0 + QS1Val); @@ -5207,7 +5213,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( assert(QYVal < 8 && "Unexpected value of ee_xorq first argument, it must " "be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + QAVal) + .addReg(Xtensa::Q0 + QAVal, RegState::Define) .addReg(Xtensa::Q0 + QXVal) .addReg(Xtensa::Q0 + QYVal); @@ -5227,7 +5233,8 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( unsigned QAVal = QA.getImm(); assert(QAVal < 8 && "Unexpected value of ee_zero_q first argument, it must " "be in range [0,7]"); - BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(Xtensa::Q0 + QAVal); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(Xtensa::Q0 + QAVal, RegState::Define); MI.eraseFromParent(); return MBB; @@ -5252,7 +5259,7 @@ MachineBasicBlock *XtensaTargetLowering::EmitDSPInstrWithCustomInserter( BVal < 8 && "Unexpected value of mv_qr first argument, it must be in range [0,7]"); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(Xtensa::Q0 + AVal) + .addReg(Xtensa::Q0 + AVal, RegState::Define) .addReg(Xtensa::Q0 + BVal); MI.eraseFromParent(); diff --git a/llvm/test/CodeGen/Xtensa/xtensa-s3-dsp.ll b/llvm/test/CodeGen/Xtensa/xtensa-s3-dsp.ll index b1782c92dff0b..0ae541f6d6880 100644 --- a/llvm/test/CodeGen/Xtensa/xtensa-s3-dsp.ll +++ b/llvm/test/CodeGen/Xtensa/xtensa-s3-dsp.ll @@ -1,4 +1,4 @@ -; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32s3 %s -o - | FileCheck %s +; RUN: llc -O1 -mtriple=xtensa -mcpu=esp32s3 -verify-machineinstrs %s -o - | FileCheck %s ; CHECK: @test define void @test(){ From d3dbef733a9e75820f2f4bb881d5a9a96b61e171 Mon Sep 17 00:00:00 2001 From: Stefan Stipanovic Date: Thu, 19 Sep 2024 09:21:20 +0200 Subject: [PATCH 259/289] [Xtensa] ESP32S3 TIE fix format_32 encoding and disassembler ambiguities between x24 and format_32 encoding. --- .../Disassembler/XtensaDisassembler.cpp | 22 +- .../lib/Target/Xtensa/XtensaS3DSPInstrInfo.td | 2434 ++++++++++++----- .../test/CodeGen/Xtensa/ee-intrinsics-loop.ll | 113 +- llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s | 200 +- 4 files changed, 1916 insertions(+), 853 deletions(-) diff --git a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp index e613c56e146d0..2048422b27d87 100644 --- a/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp +++ b/llvm/lib/Target/Xtensa/Disassembler/XtensaDisassembler.cpp @@ -865,7 +865,7 @@ static DecodeStatus readInstruction16(ArrayRef Bytes, uint64_t Address, /// Read three bytes from the ArrayRef and return 24 bit data static DecodeStatus readInstruction24(ArrayRef Bytes, uint64_t Address, uint64_t &Size, uint64_t &Insn, - bool IsLittleEndian) { + bool IsLittleEndian, bool CheckTIE = false) { // We want to read exactly 3 Bytes of data. if (Bytes.size() < 3) { Size = 0; @@ -875,6 +875,8 @@ static DecodeStatus readInstruction24(ArrayRef Bytes, uint64_t Address, if (!IsLittleEndian) { report_fatal_error("Big-endian mode currently is not supported!"); } else { + if (CheckTIE && (Bytes[0] & 0x8) != 0) + return MCDisassembler::Fail; Insn = (Bytes[2] << 16) | (Bytes[1] << 8) | (Bytes[0] << 0); } @@ -894,6 +896,8 @@ static DecodeStatus readInstruction32(ArrayRef Bytes, uint64_t Address, if (!IsLittleEndian) { report_fatal_error("Big-endian mode currently is not supported!"); } else { + if ((Bytes[0] & 0x8) == 0) + return MCDisassembler::Fail; Insn = (Bytes[3] << 24) | (Bytes[2] << 16) | (Bytes[1] << 8) | (Bytes[0] << 0); } @@ -959,15 +963,15 @@ DecodeStatus XtensaDisassembler::getInstruction(MCInst &MI, uint64_t &Size, if (hasESP32S3Ops()) { // Parse ESP32S3 24-bit instructions - Result = readInstruction24(Bytes, Address, Size, Insn, IsLittleEndian); - if (Result == MCDisassembler::Fail) - return MCDisassembler::Fail; - LLVM_DEBUG(dbgs() << "Trying ESP32S3 table (24-bit opcodes):\n"); - Result = decodeInstruction(DecoderTableESP32S324, MI, Insn, - Address, this, STI); + Result = readInstruction24(Bytes, Address, Size, Insn, IsLittleEndian, true); if (Result != MCDisassembler::Fail) { - Size = 3; - return Result; + LLVM_DEBUG(dbgs() << "Trying ESP32S3 table (24-bit opcodes):\n"); + Result = decodeInstruction(DecoderTableESP32S324, MI, Insn, Address, this, + STI); + if (Result != MCDisassembler::Fail) { + Size = 3; + return Result; + } } // Parse ESP32S3 32-bit instructions diff --git a/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td index c884ffd238a37..d1f1b9ecee179 100644 --- a/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaS3DSPInstrInfo.td @@ -107,14 +107,27 @@ def EE_CMUL_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-6} = 0x3; - let Inst{5-4} = sel4{1-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{9-8} = sel4{1-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -135,14 +148,26 @@ def EE_CMUL_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1c8; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-6} = 0x0; - let Inst{5-4} = sel4{1-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = qy{0}; + let Inst{19} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{9-8} = sel4{1-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -165,16 +190,25 @@ def EE_FFT_AMS_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz, QR:$qz1), let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x34; - let Inst{22} = sel2{0}; - let Inst{21-20} = qz1{2-1}; - let Inst{19-17} = qm{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qz{2-0}; - let Inst{10-8} = qy{2-0}; - let Inst{7} = qz1{0}; - let Inst{6-4} = qu{2-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = sel2{0}; + let Inst{24} = qz1{2}; + let Inst{23} = qz{0}; + let Inst{22-20} = qy{2-0}; + let Inst{19} = qz1{1}; + let Inst{18-16} = qm{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qz{2-1}; + let Inst{11} = qz1{0}; + let Inst{10-8} = qu{2-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -197,16 +231,25 @@ def EE_FFT_AMS_S16_LD_INCP_UAUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz, QR:$q let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x35; - let Inst{22} = sel2{0}; - let Inst{21-20} = qz1{2-1}; - let Inst{19-17} = qm{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qz{2-0}; - let Inst{10-8} = qy{2-0}; - let Inst{7} = qz1{0}; - let Inst{6-4} = qu{2-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = sel2{0}; + let Inst{24} = qz1{2}; + let Inst{23} = qz{0}; + let Inst{22-20} = qy{2-0}; + let Inst{19} = qz1{1}; + let Inst{18-16} = qm{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qz{2-1}; + let Inst{11} = qz1{0}; + let Inst{10-8} = qu{2-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -229,16 +272,25 @@ def EE_FFT_AMS_S16_LD_R32_DECP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz, QR:$qz let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x36; - let Inst{22} = sel2{0}; - let Inst{21-20} = qz1{2-1}; - let Inst{19-17} = qm{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qz{2-0}; - let Inst{10-8} = qy{2-0}; - let Inst{7} = qz1{0}; - let Inst{6-4} = qu{2-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = sel2{0}; + let Inst{24} = qz1{2}; + let Inst{23} = qz{0}; + let Inst{22-20} = qy{2-0}; + let Inst{19} = qz1{1}; + let Inst{18-16} = qm{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qz{2-1}; + let Inst{11} = qz1{0}; + let Inst{10-8} = qu{2-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -261,15 +313,23 @@ def EE_FFT_AMS_S16_ST_INCP: EE_Inst32<(outs QR:$qz1, AR:$as0r, AR:$asr), (ins Q let mayStore = 1; let Constraints = "$as0r = $as0, $asr = $as"; - let Inst{28-24} = 0x14; - let Inst{23} = sel2{0}; - let Inst{22-20} = qz1{2-0}; - let Inst{19-17} = qx{2-0}; - let Inst{16-14} = qy{2-0}; - let Inst{13-11} = qm{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = as{3-0}; - let Inst{3-0} = as0{3-0}; + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = sel2{0}; + let Inst{25-24} = qz1{2-1}; + let Inst{23} = qm{0}; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qz1{0}; + let Inst{18-16} = qx{2-0}; + let Inst{15-14} = qy{1-0}; + let Inst{13-12} = qm{2-1}; + let Inst{11-8} = as{3-0}; + let Inst{7-4} = as0{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qy{2}; } let usesCustomInserter = 1 in @@ -291,14 +351,23 @@ def EE_FFT_CMUL_S16_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$a let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x37; - let Inst{22-20} = sel8{2-0}; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qu{2-0}; - let Inst{10-8} = qy{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25-24} = sel8{2-1}; + let Inst{23} = qu{0}; + let Inst{22-20} = qy{2-0}; + let Inst{19} = sel8{0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qu{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -321,15 +390,23 @@ def EE_FFT_CMUL_S16_ST_XP: EE_Inst32<(outs AR:$asr), (ins QR:$qx, QR:$qy, QR:$q let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-24} = 0x15; - let Inst{23-22} = sar4{1-0}; - let Inst{21-20} = upd4{1-0}; - let Inst{19-17} = qv{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = sel8{2-0}; - let Inst{10-8} = qy{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26-25} = sar4{1-0}; + let Inst{24} = upd4{1}; + let Inst{23} = sel8{0}; + let Inst{22-20} = qy{2-0}; + let Inst{19} = upd4{0}; + let Inst{18-16} = qv{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = sel8{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -378,14 +455,28 @@ def EE_FFT_R2BF_S16_ST_INCP: EE_Inst32<(outs QR:$qa0, AR:$asr), (ins QR:$qx, QR let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1d1; - let Inst{19-17} = qa0{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13} = 0x0; - let Inst{12-11} = sar4{1-0}; - let Inst{10-8} = qy{2-0}; - let Inst{7-4} = 0x4; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = sar4{0}; + let Inst{19} = 1; + let Inst{22-20} = qy{2-0}; + let Inst{18-16} = qa0{2-0}; + let Inst{13} = 0; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{12} = sar4{1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -432,13 +523,20 @@ def EE_LDF_128_IP: EE_Inst32<(outs FPR:$fu3, FPR:$fu2, FPR:$fu1, FPR:$fu0, AR:$ let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-24} = 0x10; - let Inst{23-20} = fu3{3-0}; - let Inst{19-16} = fu2{3-0}; + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26-24} = fu3{3-1}; + let Inst{23-20} = fu0{3-0}; + let Inst{19} = fu3{0}; + let Inst{18-16} = fu2{3-1}; let Inst{15-12} = fu1{3-0}; - let Inst{11-8} = fu0{3-0}; - let Inst{7-4} = imm16f{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{11-8} = imm16f{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = fu2{0}; } let usesCustomInserter = 1 in @@ -459,13 +557,20 @@ def EE_LDF_128_XP: EE_Inst32<(outs FPR:$fu3, FPR:$fu2, FPR:$fu1, FPR:$fu0, AR:$ let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-24} = 0x11; - let Inst{23-20} = fu3{3-0}; - let Inst{19-16} = fu2{3-0}; + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26-24} = fu3{3-1}; + let Inst{23-20} = fu0{3-0}; + let Inst{19} = fu3{0}; + let Inst{18-16} = fu2{3-1}; let Inst{15-12} = fu1{3-0}; - let Inst{11-8} = fu0{3-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = fu2{0}; } let usesCustomInserter = 1 in @@ -484,13 +589,23 @@ def EE_LDF_64_IP: EE_Inst32<(outs FPR:$fu1, FPR:$fu0, AR:$asr), (ins AR:$as, of let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-16} = imm8{7-1}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = imm8{7-6}; + let Inst{23-20} = fu0{3-0}; + let Inst{19-16} = imm8{5-2}; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; let Inst{15-12} = fu1{3-0}; - let Inst{11-8} = fu0{3-0}; - let Inst{7-5} = 0x2; - let Inst{4} = imm8{0}; - let Inst{3-0} = as{3-0}; + let Inst{8} = imm8{0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = imm8{1}; } let usesCustomInserter = 1 in @@ -701,13 +816,29 @@ def EE_LDXQ_32: EE_Inst32<(outs QR:$qu), (ins QR:$qs, AR:$as, select_4:$sel4, s let mayLoad = 1; - let Inst{28-22} = 0x70; - let Inst{21-20} = sel4{1-0}; - let Inst{19-17} = qu{2-0}; - let Inst{16-14} = qs{2-0}; - let Inst{13-11} = sel8{2-0}; - let Inst{10-4} = 0x7d; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = sel4{1}; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{23} = sel8{0}; + let Inst{19} = sel4{0}; + let Inst{18-16} = qu{2-0}; + let Inst{15-14} = qs{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = sel8{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qs{2}; } let usesCustomInserter = 1 in @@ -1272,15 +1403,25 @@ def EE_SRC_Q_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, of let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x38; - let Inst{22-20} = imm16{7-5}; - let Inst{19-17} = qu{2-0}; - let Inst{16-14} = qs0{2-0}; - let Inst{13-11} = imm16{4-2}; - let Inst{10-8} = qs1{2-0}; - let Inst{7-6} = 0x0; - let Inst{5-4} = imm16{1-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = imm16{7-6}; + let Inst{23} = imm16{2}; + let Inst{22-20} = qs1{2-0}; + let Inst{19} = imm16{5}; + let Inst{18-16} = qu{2-0}; + let Inst{15-14} = qs0{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{13-12} = imm16{4-3}; + let Inst{9-8} = imm16{1-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qs0{2}; } let usesCustomInserter = 1 in @@ -1300,13 +1441,25 @@ def EE_SRC_Q_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (ins AR:$as, AR let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-20} = 0x1d0; - let Inst{19-17} = qu{2-0}; - let Inst{16-14} = qs0{2-0}; - let Inst{13-11} = 0x0; - let Inst{10-8} = qs1{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{19} = 0; + let Inst{22-20} = qs1{2-0}; + let Inst{18-16} = qu{2-0}; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{15-14} = qs0{1-0}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qs0{2}; } let usesCustomInserter = 1 in @@ -1372,13 +1525,20 @@ def EE_STF_128_IP: EE_Inst32<(outs AR:$asr), (ins FPR:$fv3, FPR:$fv2, FPR:$fv1, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-24} = 0x12; - let Inst{23-20} = fv3{3-0}; - let Inst{19-16} = fv2{3-0}; + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26-24} = fv3{3-1}; + let Inst{23-20} = fv0{3-0}; + let Inst{19} = fv3{0}; + let Inst{18-16} = fv2{3-1}; let Inst{15-12} = fv1{3-0}; - let Inst{11-8} = fv0{3-0}; - let Inst{7-4} = imm16f{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{11-8} = imm16f{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = fv2{0}; } let usesCustomInserter = 1 in @@ -1399,13 +1559,20 @@ def EE_STF_128_XP: EE_Inst32<(outs AR:$asr), (ins FPR:$fv3, FPR:$fv2, FPR:$fv1, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-24} = 0x13; - let Inst{23-20} = fv3{3-0}; - let Inst{19-16} = fv2{3-0}; + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26-24} = fv3{3-1}; + let Inst{23-20} = fv0{3-0}; + let Inst{19} = fv3{0}; + let Inst{18-16} = fv2{3-1}; let Inst{15-12} = fv1{3-0}; - let Inst{11-8} = fv0{3-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = fv2{0}; } let usesCustomInserter = 1 in @@ -1424,13 +1591,23 @@ def EE_STF_64_IP: EE_Inst32<(outs AR:$asr), (ins FPR:$fv1, FPR:$fv0, AR:$as, of let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-16} = imm8{7-1}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = imm8{7-6}; + let Inst{23-20} = fv0{3-0}; + let Inst{19-16} = imm8{5-2}; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 1; let Inst{15-12} = fv1{3-0}; - let Inst{11-8} = fv0{3-0}; - let Inst{7-5} = 0x3; - let Inst{4} = imm8{0}; - let Inst{3-0} = as{3-0}; + let Inst{8} = imm8{0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = imm8{1}; } let usesCustomInserter = 1 in @@ -1473,14 +1650,29 @@ def EE_STXQ_32: EE_Inst32<(outs), (ins QR:$qv, QR:$qs, AR:$as, select_4:$sel4, let mayStore = 1; - let Inst{28-22} = 0x73; - let Inst{21-20} = sel4{1-0}; - let Inst{19-17} = 0x0; - let Inst{16-14} = qs{2-0}; - let Inst{13-11} = sel8{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x0; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = sel4{1}; + let Inst{23} = sel8{0}; + let Inst{22-20} = qv{2-0}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{19} = sel4{0}; + let Inst{15-14} = qs{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = sel8{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qs{2}; } let usesCustomInserter = 1 in @@ -1656,13 +1848,28 @@ def EE_VADDS_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x2d; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -1682,13 +1889,27 @@ def EE_VADDS_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1c9; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x0; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -1732,13 +1953,28 @@ def EE_VADDS_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x3d; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -1758,13 +1994,27 @@ def EE_VADDS_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1c9; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x1; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -1808,13 +2058,28 @@ def EE_VADDS_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x1c; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -1834,13 +2099,27 @@ def EE_VADDS_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1c9; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x2; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2489,13 +2768,28 @@ def EE_VMAX_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x1d; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2515,13 +2809,27 @@ def EE_VMAX_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1c9; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x3; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2565,13 +2873,28 @@ def EE_VMAX_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x1e; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2591,13 +2914,27 @@ def EE_VMAX_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1ca; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x0; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = qy{0}; + let Inst{19} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2641,13 +2978,28 @@ def EE_VMAX_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x1f; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2667,13 +3019,27 @@ def EE_VMAX_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1cb; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x0; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2717,13 +3083,28 @@ def EE_VMIN_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x2e; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2743,13 +3124,27 @@ def EE_VMIN_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1ca; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x1; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = qy{0}; + let Inst{19} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2793,13 +3188,28 @@ def EE_VMIN_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x3e; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2819,13 +3229,27 @@ def EE_VMIN_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1cb; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x1; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2869,13 +3293,28 @@ def EE_VMIN_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x2f; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2895,13 +3334,27 @@ def EE_VMIN_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1ca; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x2; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = qy{0}; + let Inst{19} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2941,15 +3394,26 @@ def EE_VMULAS_S16_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, of let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-25} = 0xf; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x0; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x0; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2971,15 +3435,22 @@ def EE_VMULAS_S16_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-25} = 0x0; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -2999,14 +3470,27 @@ def EE_VMULAS_S16_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x3c; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x0; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x1; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3028,14 +3512,23 @@ def EE_VMULAS_S16_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x2c; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3104,14 +3597,26 @@ def EE_VMULAS_S16_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = 0x8; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3131,15 +3636,26 @@ def EE_VMULAS_S16_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, of let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-25} = 0xf; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x1; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x0; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3161,15 +3677,22 @@ def EE_VMULAS_S16_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-25} = 0x1; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3189,14 +3712,27 @@ def EE_VMULAS_S16_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x3c; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x1; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x1; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3218,14 +3754,23 @@ def EE_VMULAS_S16_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x2d; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3265,15 +3810,26 @@ def EE_VMULAS_S8_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, off let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-25} = 0xf; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x2; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x0; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3295,15 +3851,22 @@ def EE_VMULAS_S8_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-25} = 0x2; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3323,14 +3886,27 @@ def EE_VMULAS_S8_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR: let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x3c; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x2; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x1; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3352,14 +3928,23 @@ def EE_VMULAS_S8_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x2e; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3428,14 +4013,26 @@ def EE_VMULAS_S8_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r) let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = 0x9; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3455,15 +4052,26 @@ def EE_VMULAS_S8_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, off let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-25} = 0xf; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x3; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x0; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3485,15 +4093,22 @@ def EE_VMULAS_S8_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-25} = 0x3; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3513,14 +4128,27 @@ def EE_VMULAS_S8_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR: let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x3c; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x3; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x1; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3542,14 +4170,23 @@ def EE_VMULAS_S8_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x2f; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3589,15 +4226,26 @@ def EE_VMULAS_U16_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, of let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-25} = 0xf; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x4; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x0; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3619,15 +4267,22 @@ def EE_VMULAS_U16_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-25} = 0x4; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3647,14 +4302,27 @@ def EE_VMULAS_U16_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x3c; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x4; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x1; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3676,14 +4344,23 @@ def EE_VMULAS_U16_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x30; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3752,14 +4429,26 @@ def EE_VMULAS_U16_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = 0xa; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3779,15 +4468,26 @@ def EE_VMULAS_U16_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, of let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-25} = 0xf; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x5; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x0; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3809,15 +4509,22 @@ def EE_VMULAS_U16_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-25} = 0x5; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3837,14 +4544,27 @@ def EE_VMULAS_U16_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x3c; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x5; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x1; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3866,14 +4586,23 @@ def EE_VMULAS_U16_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), ( let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x31; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3913,15 +4642,26 @@ def EE_VMULAS_U8_ACCX_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, off let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-25} = 0xf; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x6; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x0; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3943,15 +4683,22 @@ def EE_VMULAS_U8_ACCX_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-25} = 0x6; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -3971,14 +4718,27 @@ def EE_VMULAS_U8_ACCX_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR: let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x3c; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x6; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x1; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4000,14 +4760,23 @@ def EE_VMULAS_U8_ACCX_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x32; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4076,14 +4845,26 @@ def EE_VMULAS_U8_QACC_LDBC_INCP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r) let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = 0xb; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4103,15 +4884,26 @@ def EE_VMULAS_U8_QACC_LD_IP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, off let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-25} = 0xf; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x7; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x0; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4133,15 +4925,22 @@ def EE_VMULAS_U8_QACC_LD_IP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-25} = 0x7; - let Inst{24-23} = imm16{5-4}; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = imm16{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27-26} = imm16{5-4}; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = imm16{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4161,14 +4960,27 @@ def EE_VMULAS_U8_QACC_LD_XP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, AR: let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x3c; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = 0x7; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = 0x1; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{19} = qu{0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4190,14 +5002,23 @@ def EE_VMULAS_U8_QACC_LD_XP_QUP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qs0r), (i let mayLoad = 1; let Constraints = "$asr = $as, $qs0r = $qs0"; - let Inst{28-23} = 0x33; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qs1{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qs0{2-0}; - let Inst{7-4} = ad{3-0}; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25-24} = qu{2-1}; + let Inst{23} = qy{0}; + let Inst{22-20} = qs0{2-0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qs1{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{13-12} = qy{2-1}; + let Inst{11-8} = ad{3-0}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4241,13 +5062,28 @@ def EE_VMUL_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x3f; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4267,13 +5103,27 @@ def EE_VMUL_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1cb; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x2; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4317,13 +5167,28 @@ def EE_VMUL_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x4c; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4343,13 +5208,27 @@ def EE_VMUL_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1ca; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x3; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = qy{0}; + let Inst{19} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4393,13 +5272,28 @@ def EE_VMUL_U16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x5c; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4419,13 +5313,27 @@ def EE_VMUL_U16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1cb; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x3; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4469,13 +5377,28 @@ def EE_VMUL_U8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qz), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x6c; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4495,13 +5418,27 @@ def EE_VMUL_U8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qz), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1d1; - let Inst{19-17} = qz{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x0; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qz{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4666,13 +5603,28 @@ def EE_VSMULAS_S16_QACC_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = sel8{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x7c; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = sel8{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4718,15 +5670,28 @@ def EE_VSMULAS_S8_QACC_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = sel16{3-1}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-9} = 0x1; - let Inst{8} = sel16{0}; - let Inst{7-4} = 0xc; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{23} = qy{0}; + let Inst{20} = sel16{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = sel16{3-1}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4941,13 +5906,28 @@ def EE_VSUBS_S16_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x4d; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -4967,13 +5947,27 @@ def EE_VSUBS_S16_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1d1; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x1; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -5017,13 +6011,28 @@ def EE_VSUBS_S32_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x5d; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -5043,13 +6052,27 @@ def EE_VSUBS_S32_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1d1; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x2; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -5093,13 +6116,28 @@ def EE_VSUBS_S8_LD_INCP: EE_Inst32<(outs QR:$qu, AR:$asr, QR:$qa), (ins AR:$as, let mayLoad = 1; let Constraints = "$asr = $as"; - let Inst{28-23} = 0x38; - let Inst{22-20} = qu{2-0}; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-4} = 0x6d; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qu{2-1}; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{23} = qy{0}; + let Inst{19} = qu{0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in @@ -5119,13 +6157,27 @@ def EE_VSUBS_S8_ST_INCP: EE_Inst32<(outs AR:$asr, QR:$qa), (ins QR:$qv, AR:$as, let mayStore = 1; let Constraints = "$asr = $as"; - let Inst{28-20} = 0x1d1; - let Inst{19-17} = qa{2-0}; - let Inst{16-14} = qx{2-0}; - let Inst{13-11} = qy{2-0}; - let Inst{10-8} = qv{2-0}; - let Inst{7-4} = 0x3; - let Inst{3-0} = as{3-0}; + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = qy{0}; + let Inst{19} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{18-16} = qa{2-0}; + let Inst{15-14} = qx{1-0}; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{13-12} = qy{2-1}; + let Inst{7-4} = as{3-0}; + let Inst{3-1} = 0b111; + let Inst{0} = qx{2}; } let usesCustomInserter = 1 in diff --git a/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll b/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll index ff5c5389d83e0..d1b8a77ca4f4d 100644 --- a/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll +++ b/llvm/test/CodeGen/Xtensa/ee-intrinsics-loop.ll @@ -1,7 +1,71 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc -mtriple=xtensa -mcpu=esp32s3 < %s | FileCheck %s ; Function Attrs: nounwind define dso_local void @test(i32 noundef %ptr.coerce, i32 noundef %ptr2.coerce) local_unnamed_addr #0 { +; CHECK-LABEL: test: +; CHECK: entry a1, 32 +; CHECK-NEXT: l32i.n a8, a2, 0 +; CHECK-NEXT: movi.n a9, 16 +; CHECK-NEXT: mov.n a10, a9 +; CHECK-NEXT: ee.vld.128.ip q1, a10, 0 +; CHECK-NEXT: mov.n a10, a9 +; CHECK-NEXT: ee.vld.128.ip q1, a10, 0 +; CHECK-NEXT: mov.n a10, a9 +; CHECK-NEXT: ee.vld.128.ip q1, a10, 0 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: ee.vld.128.ip q1, a10, 0 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: ee.vld.128.ip q2, a10, 0 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: ee.vld.128.ip q3, a10, 0 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: ee.vld.128.ip q3, a10, 0 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: ee.vld.128.ip q3, a10, 0 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: ee.vld.128.ip q3, a10, 0 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: ee.vld.128.ip q3, a10, 0 +; CHECK-NEXT: ee.vmax.s8 q1, q2, q3 +; CHECK-NEXT: ee.vmax.s16 q4, q5, q7 +; CHECK-NEXT: l32r a10, .LCPI0_0 +; CHECK-NEXT: wfr f8, a10 +; CHECK-NEXT: l32r a10, .LCPI0_1 +; CHECK-NEXT: wfr f9, a10 +; CHECK-NEXT: l32r a10, .LCPI0_2 +; CHECK-NEXT: wfr f10, a10 +; CHECK-NEXT: l32r a10, .LCPI0_3 +; CHECK-NEXT: wfr f11, a10 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: ee.stf.128.ip f11, f10, f9, f8, a10, 16 +; CHECK-NEXT: mov.n a10, a8 +; CHECK-NEXT: ee.stf.128.ip f8, f8, f8, f8, a10, 16 +; CHECK-NEXT: mov.n a10, a9 +; CHECK-NEXT: ee.stf.128.xp f8, f8, f8, f8, a10, a8 +; CHECK-NEXT: movi.n a10, 32 +; CHECK-NEXT: .LBB0_1: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: mov.n a11, a8 +; CHECK-NEXT: ee.ld.128.usar.ip q4, a11, 16 +; CHECK-NEXT: mov.n a11, a8 +; CHECK-NEXT: ee.src.q.ld.ip q3, a11, 16, q4, q2 +; CHECK-NEXT: movi.n a11, 10 +; CHECK-NEXT: ee.vmulas.s16.accx.ld.ip q0, a11, 16, q4, q6 +; CHECK-NEXT: mov.n a11, a8 +; CHECK-NEXT: ee.vmulas.s16.accx.ld.xp.qup q4, a11, a9, q0, q4, q2, q3 +; CHECK-NEXT: mov.n a11, a8 +; CHECK-NEXT: ee.ld.128.usar.xp q4, a11, a9 +; CHECK-NEXT: mov.n a11, a8 +; CHECK-NEXT: ee.vmulas.s16.accx.ld.ip.qup q3, a11, 16, q0, q3, q4, q2 +; CHECK-NEXT: addi.n a10, a10, -1 +; CHECK-NEXT: bnez a10, .LBB0_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: movi.n a8, 0 +; CHECK-NEXT: wur.sar_byte a8 +; CHECK-NEXT: wur.accx_0 a8 +; CHECK-NEXT: wur.accx_1 a8 +; CHECK-NEXT: retw.n entry: %coerce.val.ip = inttoptr i32 %ptr.coerce to ptr %0 = load i32, ptr %coerce.val.ip, align 4, !tbaa !2 @@ -41,55 +105,6 @@ for.body: ; preds = %entry, %for.body br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !6 } -; CHECK-LABEL: test: # @test -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: entry a1, 32 -; CHECK-NEXT: l32i.n a8, a2, 0 -; CHECK-NEXT: movi.n a9, 16 -; CHECK-NEXT: ee.vld.128.ip q1, a9, 0 -; CHECK-NEXT: ee.vld.128.ip q1, a9, 0 -; CHECK-NEXT: ee.vld.128.ip q1, a9, 0 -; CHECK-NEXT: ee.vld.128.ip q1, a8, 0 -; CHECK-NEXT: ee.vld.128.ip q2, a8, 0 -; CHECK-NEXT: ee.vld.128.ip q3, a8, 0 -; CHECK-NEXT: ee.vld.128.ip q3, a8, 0 -; CHECK-NEXT: ee.vld.128.ip q3, a8, 0 -; CHECK-NEXT: ee.vld.128.ip q3, a8, 0 -; CHECK-NEXT: ee.vld.128.ip q3, a8, 0 -; CHECK-NEXT: ee.vmax.s8 q1, q2, q3 -; CHECK-NEXT: ee.vmax.s16 q4, q5, q7 -; CHECK-NEXT: l32r a10, .LCPI0_0 -; CHECK-NEXT: wfr f8, a10 -; CHECK-NEXT: l32r a10, .LCPI0_1 -; CHECK-NEXT: wfr f9, a10 -; CHECK-NEXT: l32r a10, .LCPI0_2 -; CHECK-NEXT: wfr f10, a10 -; CHECK-NEXT: l32r a10, .LCPI0_3 -; CHECK-NEXT: wfr f11, a10 -; CHECK-NEXT: ee.stf.128.ip f11, f10, f9, f8, a8, 16 -; CHECK-NEXT: ee.stf.128.ip f8, f8, f8, f8, a8, 16 -; CHECK-NEXT: ee.stf.128.xp f8, f8, f8, f8, a9, a8 -; CHECK-NEXT: movi.n a10, 32 -; CHECK-NEXT: movi.n a11, 10 -; CHECK-NEXT: .LBB0_1: # %for.body -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: ee.ld.128.usar.ip q4, a8, 16 -; CHECK-NEXT: ee.src.q.ld.ip q3, a8, 16, q4, q2 -; CHECK-NEXT: ee.vmulas.s16.accx.ld.ip q0, a11, 16, q4, q6 -; CHECK-NEXT: ee.vmulas.s16.accx.ld.xp.qup q4, a8, a9, q0, q4, q2, q3 -; CHECK-NEXT: ee.ld.128.usar.xp q4, a8, a9 -; CHECK-NEXT: ee.vmulas.s16.accx.ld.ip.qup q3, a8, 16, q0, q3, q4, q2 -; CHECK-NEXT: addi.n a10, a10, -1 -; CHECK-NEXT: bnez a10, .LBB0_1 -; CHECK-NEXT: # %bb.2: # %for.cond.cleanup -; CHECK-NEXT: movi.n a8, 0 -; CHECK-NEXT: wur.sar_byte a8 -; CHECK-NEXT: wur.accx_0 a8 -; CHECK-NEXT: wur.accx_1 a8 -; CHECK-NEXT: retw.n - - - ; Function Attrs: nounwind declare void @llvm.xtensa.ee.vld.128.ip(i32 immarg, i32, i32 immarg) #1 diff --git a/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s b/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s index c5ed22386ec0c..5c55229199d16 100644 --- a/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s +++ b/llvm/test/MC/Xtensa/xtensa-esp32s3-valid.s @@ -1,25 +1,18 @@ # RUN: llvm-mc %s -triple=xtensa -mcpu=esp32s3 -show-encoding \ -# RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s +# RUN: | FileCheck %s .align 4 -LBL0: -# CHECK-INST: ee.clr_bit_gpio_out 52 -# CHECK: encoding: [0x44,0x43,0x76] +LBL0: +# CHECK: LBL0: ee.clr_bit_gpio_out 52 - -# CHECK-INST: ee.get_gpio_in a2 -# CHECK: encoding: [0x24,0x08,0x65] +# CHECK: ee.clr_bit_gpio_out 52 # encoding: [0x44,0x43,0x76] ee.get_gpio_in a2 - -# CHECK-INST: ee.set_bit_gpio_out 18 -# CHECK: encoding: [0x24,0x41,0x75] +# CHECK: ee.get_gpio_in a2 # encoding: [0x24,0x08,0x65] ee.set_bit_gpio_out 18 - -# CHECK-INST: ee.wr_mask_gpio_out a3, a2 -# CHECK: encoding: [0x34,0x42,0x72] +# CHECK: ee.set_bit_gpio_out 18 # encoding: [0x24,0x41,0x75] ee.wr_mask_gpio_out a3, a2 - +# CHECK: ee.wr_mask_gpio_out a3, a2 # encoding: [0x34,0x42,0x72] ee.andq q5, q6, q4 # CHECK: ee.andq q5, q6, q4 # encoding: [0xc4,0xb8,0xed] ee.bitrev q2, a6 @@ -27,33 +20,33 @@ ee.bitrev q2, a6 ee.cmul.s16 q3, q6, q2, 3 # CHECK: ee.cmul.s16 q3, q6, q2, 3 # encoding: [0x34,0x96,0x9e] ee.cmul.s16.ld.incp q2, a7, q5, q1, q4, 2 -# CHECK: ee.cmul.s16.ld.incp q2, a7, q5, q1, q4, 2 # encoding: [0xe7,0x60,0x2a,0x1c] +# CHECK: ee.cmul.s16.ld.incp q2, a7, q5, q1, q4, 2 # encoding: [0x7e,0x6e,0x05,0xe1] ee.cmul.s16.st.incp q7, a11, q1, q5, q2, 3 -# CHECK: ee.cmul.s16.st.incp q7, a11, q1, q5, q2, 3 # encoding: [0x3b,0x57,0x83,0x1c] +# CHECK: ee.cmul.s16.st.incp q7, a11, q1, q5, q2, 3 # encoding: [0xbf,0x53,0x71,0xe4] ee.fft.ams.s16.ld.incp q5, a5, q3, q1, q1, q2, q5, 1 -# CHECK: ee.fft.ams.s16.ld.incp q5, a5, q3, q1, q1, q2, q5, 1 # encoding: [0xd5,0x5a,0x4a,0x1a] +# CHECK: ee.fft.ams.s16.ld.incp q5, a5, q3, q1, q1, q2, q5, 1 # encoding: [0x5e,0x5d,0xa5,0xd2] ee.fft.ams.s16.ld.incp.uaup q7, a12, q4, q1, q5, q6, q3, 0 -# CHECK: ee.fft.ams.s16.ld.incp.uaup q7, a12, q4, q1, q5, q6, q3, 0 # encoding: [0xfc,0x66,0x87,0x1a] +# CHECK: ee.fft.ams.s16.ld.incp.uaup q7, a12, q4, q1, q5, q6, q3, 0 # encoding: [0xcf,0x6f,0x63,0xd4] ee.fft.ams.s16.ld.r32.decp q6, a5, q0, q2, q7, q2, q0, 0 -# CHECK: ee.fft.ams.s16.ld.r32.decp q6, a5, q0, q2, q7, q2, q0, 0 # encoding: [0x65,0xc2,0x11,0x1b] +# CHECK: ee.fft.ams.s16.ld.r32.decp q6, a5, q0, q2, q7, q2, q0, 0 # encoding: [0x5f,0xc6,0x28,0xd8] ee.fft.ams.s16.st.incp q3, q6, a7, a6, q5, q5, q1, 1 -# CHECK: ee.fft.ams.s16.st.incp q3, q6, a7, a6, q5, q5, q1, 1 # encoding: [0x67,0x4b,0xeb,0x14] +# CHECK: ee.fft.ams.s16.st.incp q3, q6, a7, a6, q5, q5, q1, 1 # encoding: [0x7f,0x46,0xb5,0xa7] ee.fft.cmul.s16.ld.xp q3, a12, a6, q7, q0, q7, 2 -# CHECK: ee.fft.cmul.s16.ld.xp q3, a12, a6, q7, q0, q7, 2 # encoding: [0x6c,0x1f,0xae,0x1b] +# CHECK: ee.fft.cmul.s16.ld.xp q3, a12, a6, q7, q0, q7, 2 # encoding: [0xce,0x16,0xf7,0xdd] ee.fft.cmul.s16.st.xp q4, q0, q0, a2, a8, 6, 1, 1 -# CHECK: ee.fft.cmul.s16.st.xp q4, q0, q0, a2, a8, 6, 1, 1 # encoding: [0x82,0x30,0x51,0x15] +# CHECK: ee.fft.cmul.s16.st.xp q4, q0, q0, a2, a8, 6, 1, 1 # encoding: [0x2f,0x38,0x08,0xaa] ee.fft.r2bf.s16 q7, q1, q3, q6, 1 # CHECK: ee.fft.r2bf.s16 q7, q1, q3, q6, 1 # encoding: [0x54,0x9d,0xfc] ee.fft.r2bf.s16.st.incp q7, q3, q7, a2, 2 -# CHECK: ee.fft.r2bf.s16.st.incp q7, q3, q7, a2, 2 # encoding: [0x42,0xd7,0x1e,0x1d] +# CHECK: ee.fft.r2bf.s16.st.incp q7, q3, q7, a2, 2 # encoding: [0x2e,0xd4,0x7f,0xe8] ee.fft.vst.r32.decp q3, a14, 0 # CHECK: ee.fft.vst.r32.decp q3, a14, 0 # encoding: [0xe4,0xb3,0xdd] ee.ldf.128.ip f3, f5, f8, f0, a13, 64 -# CHECK: ee.ldf.128.ip f3, f5, f8, f0, a13, 64 # encoding: [0x4d,0x80,0x35,0x10] +# CHECK: ee.ldf.128.ip f3, f5, f8, f0, a13, 64 # encoding: [0xdf,0x84,0x0a,0x81] ee.ldf.128.xp f5, f2, f4, f4, a7, a8 -# CHECK: ee.ldf.128.xp f5, f2, f4, f4, a7, a8 # encoding: [0x87,0x44,0x52,0x11] +# CHECK: ee.ldf.128.xp f5, f2, f4, f4, a7, a8 # encoding: [0x7e,0x48,0x49,0x8a] ee.ldf.64.ip f6, f5, a1, 488 -# CHECK: ee.ldf.64.ip f6, f5, a1, 488 # encoding: [0x51,0x65,0x1e,0x1c] +# CHECK: ee.ldf.64.ip f6, f5, a1, 488 # encoding: [0x1e,0x65,0x5f,0xe0] ee.ldf.64.xp f0, f6, a3, a8 # CHECK: ee.ldf.64.xp f0, f6, a3, a8 # encoding: [0x30,0x08,0x66] ee.ldqa.s16.128.ip a11, 1904 @@ -73,7 +66,7 @@ ee.ldqa.u8.128.ip a4, 784 ee.ldqa.u8.128.xp a4, a9 # CHECK: ee.ldqa.u8.128.xp a4, a9 # encoding: [0x44,0x49,0x70] ee.ldxq.32 q2, q6, a11, 2, 1 -# CHECK: ee.ldxq.32 q2, q6, a11, 2, 1 # encoding: [0xdb,0x8f,0x25,0x1c] +# CHECK: ee.ldxq.32 q2, q6, a11, 2, 1 # encoding: [0xbf,0x8d,0xf2,0xe1] ee.ld.128.usar.ip q4, a8, -592 # CHECK: ee.ld.128.usar.ip q4, a8, -592 # encoding: [0x84,0x5b,0xe1] ee.ld.128.usar.xp q1, a9, a7 @@ -123,23 +116,23 @@ ee.srcxxp.2q q6, q0, a2, a14 ee.src.q q6, q7, q5 # CHECK: ee.src.q q6, q7, q5 # encoding: [0x64,0xf3,0xec] ee.src.q.ld.ip q2, a2, 1792, q6, q7 -# CHECK: ee.src.q.ld.ip q2, a2, 1792, q6, q7 # encoding: [0x02,0xa7,0x35,0x1c] +# CHECK: ee.src.q.ld.ip q2, a2, 1792, q6, q7 # encoding: [0x2f,0xa0,0x7a,0xe1] ee.src.q.ld.xp q2, a4, a9, q1, q7 -# CHECK: ee.src.q.ld.xp q2, a4, a9, q1, q7 # encoding: [0x94,0x47,0x04,0x1d] +# CHECK: ee.src.q.ld.xp q2, a4, a9, q1, q7 # encoding: [0x4e,0x49,0x72,0xe8] ee.src.q.qup q4, q3, q7 # CHECK: ee.src.q.qup q4, q3, q7 # encoding: [0x44,0xb7,0xfc] ee.srs.accx a12, a1, 0 # CHECK: ee.srs.accx a12, a1, 0 # encoding: [0x14,0x1c,0x7e] ee.stf.128.ip f4, f3, f8, f2, a4, -128 -# CHECK: ee.stf.128.ip f4, f3, f8, f2, a4, -128 # encoding: [0x84,0x82,0x43,0x12] +# CHECK: ee.stf.128.ip f4, f3, f8, f2, a4, -128 # encoding: [0x4f,0x88,0x21,0x92] ee.stf.128.xp f2, f0, f5, f8, a11, a5 -# CHECK: ee.stf.128.xp f2, f0, f5, f8, a11, a5 # encoding: [0x5b,0x58,0x20,0x13] +# CHECK: ee.stf.128.xp f2, f0, f5, f8, a11, a5 # encoding: [0xbe,0x55,0x80,0x99] ee.stf.64.ip f3, f6, a10, -848 -# CHECK: ee.stf.64.ip f3, f6, a10, -848 # encoding: [0x6a,0x36,0x4b,0x1c] +# CHECK: ee.stf.64.ip f3, f6, a10, -848 # encoding: [0xaf,0x36,0x65,0xe2] ee.stf.64.xp f2, f1, a1, a14 # CHECK: ee.stf.64.xp f2, f1, a1, a14 # encoding: [0x10,0x2e,0x17] ee.stxq.32 q5, q2, a5, 0, 1 -# CHECK: ee.stxq.32 q5, q2, a5, 0, 1 # encoding: [0x05,0x8d,0xc0,0x1c] +# CHECK: ee.stxq.32 q5, q2, a5, 0, 1 # encoding: [0x5e,0x80,0xd0,0xe6] ee.st.accx.ip a10, 24 # CHECK: ee.st.accx.ip a10, 24 # encoding: [0xa4,0x03,0x02] ee.st.qacc_h.h.32.ip a14, 380 @@ -155,21 +148,21 @@ ee.st.ua_state.ip a4, -1728 ee.vadds.s16 q5, q1, q4 # CHECK: ee.vadds.s16 q5, q1, q4 # encoding: [0x64,0xc1,0xae] ee.vadds.s16.ld.incp q6, a6, q1, q3, q1 -# CHECK: ee.vadds.s16.ld.incp q6, a6, q1, q3, q1 # encoding: [0xd6,0xca,0x62,0x1c] +# CHECK: ee.vadds.s16.ld.incp q6, a6, q1, q3, q1 # encoding: [0x6e,0xcd,0xa1,0xe3] ee.vadds.s16.st.incp q4, a0, q1, q3, q1 -# CHECK: ee.vadds.s16.st.incp q4, a0, q1, q3, q1 # encoding: [0x00,0xcc,0x92,0x1c] +# CHECK: ee.vadds.s16.st.incp q4, a0, q1, q3, q1 # encoding: [0x0e,0xc0,0xc9,0xe4] ee.vadds.s32 q3, q5, q2 # CHECK: ee.vadds.s32 q3, q5, q2 # encoding: [0x74,0x95,0x9e] ee.vadds.s32.ld.incp q4, a4, q1, q6, q5 -# CHECK: ee.vadds.s32.ld.incp q4, a4, q1, q6, q5 # encoding: [0xd4,0xab,0x43,0x1c] +# CHECK: ee.vadds.s32.ld.incp q4, a4, q1, q6, q5 # encoding: [0x4f,0xad,0xb1,0xe2] ee.vadds.s32.st.incp q5, a1, q0, q6, q0 -# CHECK: ee.vadds.s32.st.incp q5, a1, q0, q6, q0 # encoding: [0x11,0x85,0x91,0x1c] +# CHECK: ee.vadds.s32.st.incp q5, a1, q0, q6, q0 # encoding: [0x1f,0x81,0x58,0xe4] ee.vadds.s8 q4, q4, q5 # CHECK: ee.vadds.s8 q4, q4, q5 # encoding: [0x84,0x4c,0xae] ee.vadds.s8.ld.incp q2, a14, q0, q3, q3 -# CHECK: ee.vadds.s8.ld.incp q2, a14, q0, q3, q3 # encoding: [0xce,0xd9,0x20,0x1c] +# CHECK: ee.vadds.s8.ld.incp q2, a14, q0, q3, q3 # encoding: [0xee,0xdc,0x90,0xe1] ee.vadds.s8.st.incp q0, a9, q4, q7, q0 -# CHECK: ee.vadds.s8.st.incp q0, a9, q4, q7, q0 # encoding: [0x29,0xc0,0x99,0x1c] +# CHECK: ee.vadds.s8.st.incp q0, a9, q4, q7, q0 # encoding: [0x9f,0xc2,0x0c,0xe4] ee.vcmp.eq.s16 q5, q3, q0 # CHECK: ee.vcmp.eq.s16 q5, q3, q0 # encoding: [0x94,0x83,0xae] ee.vcmp.eq.s32 q5, q5, q4 @@ -223,159 +216,159 @@ ee.vld.l.64.xp q1, a2, a9 ee.vmax.s16 q2, q5, q6 # CHECK: ee.vmax.s16 q2, q5, q6 # encoding: [0x24,0x75,0x9e] ee.vmax.s16.ld.incp q0, a0, q6, q1, q2 -# CHECK: ee.vmax.s16.ld.incp q0, a0, q6, q1, q2 # encoding: [0xd0,0x51,0x0c,0x1c] +# CHECK: ee.vmax.s16.ld.incp q0, a0, q6, q1, q2 # encoding: [0x0e,0x5d,0x16,0xe0] ee.vmax.s16.st.incp q5, a10, q6, q6, q7 -# CHECK: ee.vmax.s16.st.incp q5, a10, q6, q6, q7 # encoding: [0x3a,0xbd,0x9d,0x1c] +# CHECK: ee.vmax.s16.st.incp q5, a10, q6, q6, q7 # encoding: [0xaf,0xb3,0xde,0xe4] ee.vmax.s32 q3, q2, q7 # CHECK: ee.vmax.s32 q3, q2, q7 # encoding: [0x34,0xfa,0x9e] ee.vmax.s32.ld.incp q1, a3, q1, q1, q0 -# CHECK: ee.vmax.s32.ld.incp q1, a3, q1, q1, q0 # encoding: [0xe3,0x41,0x12,0x1c] +# CHECK: ee.vmax.s32.ld.incp q1, a3, q1, q1, q0 # encoding: [0x3e,0x4e,0x19,0xe0] ee.vmax.s32.st.incp q3, a12, q4, q6, q3 -# CHECK: ee.vmax.s32.st.incp q3, a12, q4, q6, q3 # encoding: [0x0c,0x9b,0xa9,0x1c] +# CHECK: ee.vmax.s32.st.incp q3, a12, q4, q6, q3 # encoding: [0xcf,0x90,0xb4,0xe5] ee.vmax.s8 q4, q1, q6 # CHECK: ee.vmax.s8 q4, q1, q6 # encoding: [0x44,0x71,0xae] ee.vmax.s8.ld.incp q3, a10, q5, q1, q5 -# CHECK: ee.vmax.s8.ld.incp q3, a10, q5, q1, q5 # encoding: [0xfa,0x69,0x3a,0x1c] +# CHECK: ee.vmax.s8.ld.incp q3, a10, q5, q1, q5 # encoding: [0xae,0x6f,0x9d,0xe1] ee.vmax.s8.st.incp q3, a9, q3, q6, q7 -# CHECK: ee.vmax.s8.st.incp q3, a9, q3, q6, q7 # encoding: [0x09,0xbb,0xb7,0x1c] +# CHECK: ee.vmax.s8.st.incp q3, a9, q3, q6, q7 # encoding: [0x9f,0xb0,0xbb,0xe5] ee.vmin.s16 q6, q2, q5 # CHECK: ee.vmin.s16 q6, q2, q5 # encoding: [0x54,0x6a,0xbe] ee.vmin.s16.ld.incp q5, a3, q2, q4, q0 -# CHECK: ee.vmin.s16.ld.incp q5, a3, q2, q4, q0 # encoding: [0xe3,0x02,0x55,0x1c] +# CHECK: ee.vmin.s16.ld.incp q5, a3, q2, q4, q0 # encoding: [0x3f,0x0e,0x2a,0xe2] ee.vmin.s16.st.incp q4, a9, q4, q6, q0 -# CHECK: ee.vmin.s16.st.incp q4, a9, q4, q6, q0 # encoding: [0x19,0x84,0xa9,0x1c] +# CHECK: ee.vmin.s16.st.incp q4, a9, q4, q6, q0 # encoding: [0x9f,0x81,0x44,0xe5] ee.vmin.s32 q1, q1, q6 # CHECK: ee.vmin.s32 q1, q1, q6 # encoding: [0x64,0xf1,0x8e] ee.vmin.s32.ld.incp q0, a1, q3, q2, q0 -# CHECK: ee.vmin.s32.ld.incp q0, a1, q3, q2, q0 # encoding: [0xe1,0x83,0x06,0x1c] +# CHECK: ee.vmin.s32.ld.incp q0, a1, q3, q2, q0 # encoding: [0x1e,0x8e,0x33,0xe0] ee.vmin.s32.st.incp q0, a12, q4, q4, q3 -# CHECK: ee.vmin.s32.st.incp q0, a12, q4, q4, q3 # encoding: [0x1c,0x18,0xb9,0x1c] +# CHECK: ee.vmin.s32.st.incp q0, a12, q4, q4, q3 # encoding: [0xcf,0x11,0x8c,0xe5] ee.vmin.s8 q7, q6, q0 # CHECK: ee.vmin.s8 q7, q6, q0 # encoding: [0x74,0xa6,0xbe] ee.vmin.s8.ld.incp q2, a13, q7, q7, q3 -# CHECK: ee.vmin.s8.ld.incp q2, a13, q7, q7, q3 # encoding: [0xfd,0xda,0x2f,0x1c] +# CHECK: ee.vmin.s8.ld.incp q2, a13, q7, q7, q3 # encoding: [0xdf,0xdf,0xa7,0xe1] ee.vmin.s8.st.incp q2, a4, q4, q7, q1 -# CHECK: ee.vmin.s8.st.incp q2, a4, q4, q7, q1 # encoding: [0x24,0xca,0xa9,0x1c] +# CHECK: ee.vmin.s8.st.incp q2, a4, q4, q7, q1 # encoding: [0x4f,0xc2,0xa4,0xe5] ee.vmulas.s16.accx q0, q7 # CHECK: ee.vmulas.s16.accx q0, q7 # encoding: [0x84,0x58,0x1a] ee.vmulas.s16.accx.ld.ip q7, a7, -16, q2, q0 -# CHECK: ee.vmulas.s16.accx.ld.ip q7, a7, -16, q2, q0 # encoding: [0xf7,0x80,0xf0,0x1f] +# CHECK: ee.vmulas.s16.accx.ld.ip q7, a7, -16, q2, q0 # encoding: [0x7e,0x8f,0x08,0xff] ee.vmulas.s16.accx.ld.ip.qup q5, a14, 32, q0, q2, q0, q2 -# CHECK: ee.vmulas.s16.accx.ld.ip.qup q5, a14, 32, q0, q2, q0, q2 # encoding: [0x2e,0x10,0x54,0x00] +# CHECK: ee.vmulas.s16.accx.ld.ip.qup q5, a14, 32, q0, q2, q0, q2 # encoding: [0xee,0x12,0x0a,0x02] ee.vmulas.s16.accx.ld.xp q1, a0, a1, q2, q6 -# CHECK: ee.vmulas.s16.accx.ld.xp q1, a0, a1, q2, q6 # encoding: [0x10,0xb1,0x10,0x1e] +# CHECK: ee.vmulas.s16.accx.ld.xp q1, a0, a1, q2, q6 # encoding: [0x0e,0xb1,0x18,0xf0] ee.vmulas.s16.accx.ld.xp.qup q4, a8, a10, q4, q0, q0, q3 -# CHECK: ee.vmulas.s16.accx.ld.xp.qup q4, a8, a10, q4, q0, q0, q3 # encoding: [0xa8,0x00,0x47,0x16] +# CHECK: ee.vmulas.s16.accx.ld.xp.qup q4, a8, a10, q4, q0, q0, q3 # encoding: [0x8f,0x0a,0x03,0xb2] ee.vmulas.s16.qacc q0, q6 # CHECK: ee.vmulas.s16.qacc q0, q6 # encoding: [0x84,0x70,0x1a] ee.vmulas.s16.qacc.ldbc.incp q2, a6, q3, q4 # CHECK: ee.vmulas.s16.qacc.ldbc.incp q2, a6, q3, q4 # encoding: [0x64,0xc3,0x87] ee.vmulas.s16.qacc.ldbc.incp.qup q0, a4, q1, q6, q4, q5 -# CHECK: ee.vmulas.s16.qacc.ldbc.incp.qup q0, a4, q1, q6, q4, q5 # encoding: [0x84,0x74,0x0a,0x1c] +# CHECK: ee.vmulas.s16.qacc.ldbc.incp.qup q0, a4, q1, q6, q4, q5 # encoding: [0x4e,0x78,0x45,0xe0] ee.vmulas.s16.qacc.ld.ip q7, a7, -64, q7, q7 -# CHECK: ee.vmulas.s16.qacc.ld.ip q7, a7, -64, q7, q7 # encoding: [0xc7,0xf8,0xf3,0x1f] +# CHECK: ee.vmulas.s16.qacc.ld.ip q7, a7, -64, q7, q7 # encoding: [0x7f,0xfc,0x89,0xff] ee.vmulas.s16.qacc.ld.ip.qup q0, a10, 48, q3, q6, q3, q6 -# CHECK: ee.vmulas.s16.qacc.ld.ip.qup q0, a10, 48, q3, q6, q3, q6 # encoding: [0x3a,0xf3,0x0c,0x02] +# CHECK: ee.vmulas.s16.qacc.ld.ip.qup q0, a10, 48, q3, q6, q3, q6 # encoding: [0xae,0xf3,0x36,0x10] ee.vmulas.s16.qacc.ld.xp q3, a11, a4, q4, q5 -# CHECK: ee.vmulas.s16.qacc.ld.xp q3, a11, a4, q4, q5 # encoding: [0x4b,0x29,0x33,0x1e] +# CHECK: ee.vmulas.s16.qacc.ld.xp q3, a11, a4, q4, q5 # encoding: [0xbf,0x24,0x99,0xf1] ee.vmulas.s16.qacc.ld.xp.qup q2, a9, a1, q3, q2, q1, q7 -# CHECK: ee.vmulas.s16.qacc.ld.xp.qup q2, a9, a1, q3, q2, q1, q7 # encoding: [0x19,0xd1,0xae,0x16] +# CHECK: ee.vmulas.s16.qacc.ld.xp.qup q2, a9, a1, q3, q2, q1, q7 # encoding: [0x9e,0xd1,0x17,0xb5] ee.vmulas.s8.accx q1, q0 # CHECK: ee.vmulas.s8.accx q1, q0 # encoding: [0xc4,0x01,0x1a] ee.vmulas.s8.accx.ld.ip q2, a8, 80, q3, q0 -# CHECK: ee.vmulas.s8.accx.ld.ip q2, a8, 80, q3, q0 # encoding: [0x58,0xc0,0x24,0x1e] +# CHECK: ee.vmulas.s8.accx.ld.ip q2, a8, 80, q3, q0 # encoding: [0x8e,0xc5,0x02,0xf1] ee.vmulas.s8.accx.ld.ip.qup q2, a9, -80, q1, q2, q6, q3 -# CHECK: ee.vmulas.s8.accx.ld.ip.qup q2, a9, -80, q1, q2, q6, q3 # encoding: [0xb9,0x56,0xa6,0x05] +# CHECK: ee.vmulas.s8.accx.ld.ip.qup q2, a9, -80, q1, q2, q6, q3 # encoding: [0x9e,0x5b,0x63,0x2d] ee.vmulas.s8.accx.ld.xp q3, a3, a4, q4, q7 -# CHECK: ee.vmulas.s8.accx.ld.xp q3, a3, a4, q4, q7 # encoding: [0x43,0x39,0x35,0x1e] +# CHECK: ee.vmulas.s8.accx.ld.xp q3, a3, a4, q4, q7 # encoding: [0x3f,0x34,0x9a,0xf1] ee.vmulas.s8.accx.ld.xp.qup q0, a3, a1, q4, q5, q3, q3 -# CHECK: ee.vmulas.s8.accx.ld.xp.qup q0, a3, a1, q4, q5, q3, q3 # encoding: [0x13,0x2b,0x07,0x17] +# CHECK: ee.vmulas.s8.accx.ld.xp.qup q0, a3, a1, q4, q5, q3, q3 # encoding: [0x3f,0x21,0xb3,0xb8] ee.vmulas.s8.qacc q5, q7 # CHECK: ee.vmulas.s8.qacc q5, q7 # encoding: [0xc4,0x7d,0x1a] ee.vmulas.s8.qacc.ldbc.incp q7, a1, q6, q1 # CHECK: ee.vmulas.s8.qacc.ldbc.incp q7, a1, q6, q1 # encoding: [0x14,0xae,0xb7] ee.vmulas.s8.qacc.ldbc.incp.qup q3, a11, q4, q6, q5, q6 -# CHECK: ee.vmulas.s8.qacc.ldbc.incp.qup q3, a11, q4, q6, q5, q6 # encoding: [0x9b,0x35,0x3d,0x1c] +# CHECK: ee.vmulas.s8.qacc.ldbc.incp.qup q3, a11, q4, q6, q5, q6 # encoding: [0xbf,0x39,0x5e,0xe1] ee.vmulas.s8.qacc.ld.ip q5, a10, -16, q0, q0 -# CHECK: ee.vmulas.s8.qacc.ld.ip q5, a10, -16, q0, q0 # encoding: [0xfa,0x00,0xd6,0x1f] +# CHECK: ee.vmulas.s8.qacc.ld.ip q5, a10, -16, q0, q0 # encoding: [0xae,0x0f,0x0b,0xfe] ee.vmulas.s8.qacc.ld.ip.qup q7, a9, -48, q6, q2, q1, q2 -# CHECK: ee.vmulas.s8.qacc.ld.ip.qup q7, a9, -48, q6, q2, q1, q2 # encoding: [0xd9,0x91,0xf5,0x07] +# CHECK: ee.vmulas.s8.qacc.ld.ip.qup q7, a9, -48, q6, q2, q1, q2 # encoding: [0x9f,0x9d,0x1a,0x3f] ee.vmulas.s8.qacc.ld.xp q1, a1, a12, q5, q0 -# CHECK: ee.vmulas.s8.qacc.ld.xp q1, a1, a12, q5, q0 # encoding: [0xc1,0x41,0x17,0x1e] +# CHECK: ee.vmulas.s8.qacc.ld.xp q1, a1, a12, q5, q0 # encoding: [0x1f,0x4c,0x1b,0xf0] ee.vmulas.s8.qacc.ld.xp.qup q0, a1, a14, q1, q6, q2, q4 -# CHECK: ee.vmulas.s8.qacc.ld.xp.qup q0, a1, a14, q1, q6, q2, q4 # encoding: [0xe1,0x72,0x88,0x17] +# CHECK: ee.vmulas.s8.qacc.ld.xp.qup q0, a1, a14, q1, q6, q2, q4 # encoding: [0x1e,0x7e,0x24,0xbc] ee.vmulas.u16.accx q7, q1 # CHECK: ee.vmulas.u16.accx q7, q1 # encoding: [0x84,0x0f,0x0a] ee.vmulas.u16.accx.ld.ip q5, a8, -32, q1, q4 -# CHECK: ee.vmulas.u16.accx.ld.ip q5, a8, -32, q1, q4 # encoding: [0xe8,0x60,0xd8,0x1f] +# CHECK: ee.vmulas.u16.accx.ld.ip q5, a8, -32, q1, q4 # encoding: [0x8e,0x6e,0x0c,0xfe] ee.vmulas.u16.accx.ld.ip.qup q1, a0, 48, q7, q4, q4, q0 -# CHECK: ee.vmulas.u16.accx.ld.ip.qup q1, a0, 48, q7, q4, q4, q0 # encoding: [0x30,0xe4,0x11,0x08] +# CHECK: ee.vmulas.u16.accx.ld.ip.qup q1, a0, 48, q7, q4, q4, q0 # encoding: [0x0f,0xe3,0x48,0x40] ee.vmulas.u16.accx.ld.xp q3, a14, a4, q5, q4 -# CHECK: ee.vmulas.u16.accx.ld.xp q3, a14, a4, q5, q4 # encoding: [0x4e,0x61,0x39,0x1e] +# CHECK: ee.vmulas.u16.accx.ld.xp q3, a14, a4, q5, q4 # encoding: [0xef,0x64,0x1c,0xf1] ee.vmulas.u16.accx.ld.xp.qup q4, a3, a7, q6, q2, q4, q4 -# CHECK: ee.vmulas.u16.accx.ld.xp.qup q4, a3, a7, q6, q2, q4, q4 # encoding: [0x73,0x94,0x49,0x18] +# CHECK: ee.vmulas.u16.accx.ld.xp.qup q4, a3, a7, q6, q2, q4, q4 # encoding: [0x3f,0x97,0x44,0xc2] ee.vmulas.u16.qacc q5, q5 # CHECK: ee.vmulas.u16.qacc q5, q5 # encoding: [0x84,0x6d,0x0a] ee.vmulas.u16.qacc.ldbc.incp q6, a7, q0, q3 # CHECK: ee.vmulas.u16.qacc.ldbc.incp q6, a7, q0, q3 # encoding: [0x74,0x98,0xd7] ee.vmulas.u16.qacc.ldbc.incp.qup q0, a12, q6, q3, q2, q0 -# CHECK: ee.vmulas.u16.qacc.ldbc.incp.qup q0, a12, q6, q3, q2, q0 # encoding: [0xac,0x9a,0x01,0x1c] +# CHECK: ee.vmulas.u16.qacc.ldbc.incp.qup q0, a12, q6, q3, q2, q0 # encoding: [0xcf,0x9a,0xa0,0xe0] ee.vmulas.u16.qacc.ld.ip q4, a10, 16, q3, q2 -# CHECK: ee.vmulas.u16.qacc.ld.ip q4, a10, 16, q3, q2 # encoding: [0x1a,0xd0,0x4a,0x1e] +# CHECK: ee.vmulas.u16.qacc.ld.ip q4, a10, 16, q3, q2 # encoding: [0xae,0xd1,0x05,0xf2] ee.vmulas.u16.qacc.ld.ip.qup q2, a4, 0, q5, q4, q2, q6 -# CHECK: ee.vmulas.u16.qacc.ld.ip.qup q2, a4, 0, q5, q4, q2, q6 # encoding: [0x04,0x62,0x2d,0x0a] +# CHECK: ee.vmulas.u16.qacc.ld.ip.qup q2, a4, 0, q5, q4, q2, q6 # encoding: [0x4f,0x60,0x26,0x51] ee.vmulas.u16.qacc.ld.xp q6, a14, a2, q4, q0 -# CHECK: ee.vmulas.u16.qacc.ld.xp q6, a14, a2, q4, q0 # encoding: [0x2e,0x01,0x6b,0x1e] +# CHECK: ee.vmulas.u16.qacc.ld.xp q6, a14, a2, q4, q0 # encoding: [0xef,0x02,0x15,0xf3] ee.vmulas.u16.qacc.ld.xp.qup q6, a12, a11, q6, q7, q4, q1 -# CHECK: ee.vmulas.u16.qacc.ld.xp.qup q6, a12, a11, q6, q7, q4, q1 # encoding: [0xbc,0xbc,0xe3,0x18] +# CHECK: ee.vmulas.u16.qacc.ld.xp.qup q6, a12, a11, q6, q7, q4, q1 # encoding: [0xcf,0xbb,0xc1,0xc7] ee.vmulas.u8.accx q2, q1 # CHECK: ee.vmulas.u8.accx q2, q1 # encoding: [0xc4,0x0a,0x0a] ee.vmulas.u8.accx.ld.ip q6, a3, -112, q2, q7 -# CHECK: ee.vmulas.u8.accx.ld.ip q6, a3, -112, q2, q7 # encoding: [0x93,0xb8,0xec,0x1f] +# CHECK: ee.vmulas.u8.accx.ld.ip q6, a3, -112, q2, q7 # encoding: [0x3e,0xb9,0x86,0xff] ee.vmulas.u8.accx.ld.ip.qup q7, a3, -32, q3, q3, q7, q5 -# CHECK: ee.vmulas.u8.accx.ld.ip.qup q7, a3, -32, q3, q3, q7, q5 # encoding: [0xe3,0xdf,0xfa,0x0d] +# CHECK: ee.vmulas.u8.accx.ld.ip.qup q7, a3, -32, q3, q3, q7, q5 # encoding: [0x3e,0xde,0xfd,0x6f] ee.vmulas.u8.accx.ld.xp q4, a4, a9, q4, q0 -# CHECK: ee.vmulas.u8.accx.ld.xp q4, a4, a9, q4, q0 # encoding: [0x94,0x01,0x4d,0x1e] +# CHECK: ee.vmulas.u8.accx.ld.xp q4, a4, a9, q4, q0 # encoding: [0x4f,0x09,0x16,0xf2] ee.vmulas.u8.accx.ld.xp.qup q5, a7, a13, q4, q7, q2, q6 -# CHECK: ee.vmulas.u8.accx.ld.xp.qup q5, a7, a13, q4, q7, q2, q6 # encoding: [0xd7,0x3a,0x5d,0x19] +# CHECK: ee.vmulas.u8.accx.ld.xp.qup q5, a7, a13, q4, q7, q2, q6 # encoding: [0x7f,0x3d,0xae,0xca] ee.vmulas.u8.qacc q3, q6 # CHECK: ee.vmulas.u8.qacc q3, q6 # encoding: [0xc4,0x73,0x0a] ee.vmulas.u8.qacc.ldbc.incp q4, a1, q0, q5 # CHECK: ee.vmulas.u8.qacc.ldbc.incp q4, a1, q0, q5 # encoding: [0x14,0x48,0xf7] ee.vmulas.u8.qacc.ldbc.incp.qup q2, a1, q5, q7, q6, q4 -# CHECK: ee.vmulas.u8.qacc.ldbc.incp.qup q2, a1, q5, q7, q6, q4 # encoding: [0xb1,0x7e,0x29,0x1c] +# CHECK: ee.vmulas.u8.qacc.ldbc.incp.qup q2, a1, q5, q7, q6, q4 # encoding: [0x1f,0x7b,0xe4,0xe1] ee.vmulas.u8.qacc.ld.ip q2, a12, 32, q1, q4 -# CHECK: ee.vmulas.u8.qacc.ld.ip q2, a12, 32, q1, q4 # encoding: [0x2c,0x60,0x2e,0x1e] +# CHECK: ee.vmulas.u8.qacc.ld.ip q2, a12, 32, q1, q4 # encoding: [0xce,0x62,0x07,0xf1] ee.vmulas.u8.qacc.ld.ip.qup q0, a6, 48, q0, q0, q6, q0 -# CHECK: ee.vmulas.u8.qacc.ld.ip.qup q0, a6, 48, q0, q0, q6, q0 # encoding: [0x36,0x06,0x00,0x0e] +# CHECK: ee.vmulas.u8.qacc.ld.ip.qup q0, a6, 48, q0, q0, q6, q0 # encoding: [0x6e,0x03,0x60,0x70] ee.vmulas.u8.qacc.ld.xp q6, a1, a1, q2, q5 -# CHECK: ee.vmulas.u8.qacc.ld.xp q6, a1, a1, q2, q5 # encoding: [0x11,0xa9,0x6e,0x1e] +# CHECK: ee.vmulas.u8.qacc.ld.xp q6, a1, a1, q2, q5 # encoding: [0x1e,0xa1,0x97,0xf3] ee.vmulas.u8.qacc.ld.xp.qup q1, a8, a10, q3, q7, q1, q3 -# CHECK: ee.vmulas.u8.qacc.ld.xp.qup q1, a8, a10, q3, q7, q1, q3 # encoding: [0xa8,0xf9,0x96,0x19] +# CHECK: ee.vmulas.u8.qacc.ld.xp.qup q1, a8, a10, q3, q7, q1, q3 # encoding: [0x8e,0xfa,0x9b,0xcc] ee.vmul.s16 q0, q4, q1 # CHECK: ee.vmul.s16 q0, q4, q1 # encoding: [0x84,0x2c,0x8e] ee.vmul.s16.ld.incp q4, a5, q1, q5, q5 -# CHECK: ee.vmul.s16.ld.incp q4, a5, q1, q5, q5 # encoding: [0xf5,0x6b,0x43,0x1c] +# CHECK: ee.vmul.s16.ld.incp q4, a5, q1, q5, q5 # encoding: [0x5f,0x6f,0xb1,0xe2] ee.vmul.s16.st.incp q4, a4, q2, q5, q0 -# CHECK: ee.vmul.s16.st.incp q4, a4, q2, q5, q0 # encoding: [0x24,0x44,0xb5,0x1c] +# CHECK: ee.vmul.s16.st.incp q4, a4, q2, q5, q0 # encoding: [0x4f,0x42,0x4a,0xe5] ee.vmul.s8 q5, q3, q2 # CHECK: ee.vmul.s8 q5, q3, q2 # encoding: [0x94,0xb3,0xae] ee.vmul.s8.ld.incp q6, a11, q3, q6, q4 -# CHECK: ee.vmul.s8.ld.incp q6, a11, q3, q6, q4 # encoding: [0xcb,0xa4,0x67,0x1c] +# CHECK: ee.vmul.s8.ld.incp q6, a11, q3, q6, q4 # encoding: [0xbf,0xac,0x43,0xe3] ee.vmul.s8.st.incp q5, a5, q5, q2, q4 -# CHECK: ee.vmul.s8.st.incp q5, a5, q5, q2, q4 # encoding: [0x35,0xa5,0xaa,0x1c] +# CHECK: ee.vmul.s8.st.incp q5, a5, q5, q2, q4 # encoding: [0x5e,0xa3,0x55,0xe5] ee.vmul.u16 q0, q0, q5 # CHECK: ee.vmul.u16 q0, q0, q5 # encoding: [0xa4,0x68,0x8e] ee.vmul.u16.ld.incp q4, a2, q0, q1, q1 -# CHECK: ee.vmul.u16.ld.incp q4, a2, q0, q1, q1 # encoding: [0xc2,0x4d,0x40,0x1c] +# CHECK: ee.vmul.u16.ld.incp q4, a2, q0, q1, q1 # encoding: [0x2e,0x4c,0xd0,0xe2] ee.vmul.u16.st.incp q6, a5, q1, q2, q7 -# CHECK: ee.vmul.u16.st.incp q6, a5, q1, q2, q7 # encoding: [0x35,0xbe,0xb2,0x1c] +# CHECK: ee.vmul.u16.st.incp q6, a5, q1, q2, q7 # encoding: [0x5e,0xb3,0xe9,0xe5] ee.vmul.u8 q6, q4, q5 # CHECK: ee.vmul.u8 q6, q4, q5 # encoding: [0xb4,0x6c,0xbe] ee.vmul.u8.ld.incp q1, a5, q4, q1, q1 -# CHECK: ee.vmul.u8.ld.incp q1, a5, q4, q1, q1 # encoding: [0xc5,0x4e,0x18,0x1c] +# CHECK: ee.vmul.u8.ld.incp q1, a5, q4, q1, q1 # encoding: [0x5e,0x4c,0xec,0xe0] ee.vmul.u8.st.incp q4, a12, q5, q0, q4 -# CHECK: ee.vmul.u8.st.incp q4, a12, q5, q0, q4 # encoding: [0x0c,0x24,0x1a,0x1d] +# CHECK: ee.vmul.u8.st.incp q4, a12, q5, q0, q4 # encoding: [0xce,0x20,0x4d,0xe8] ee.vprelu.s16 q2, q7, q0, a1 # CHECK: ee.vprelu.s16 q2, q7, q0, a1 # encoding: [0x14,0x07,0x9c] ee.vprelu.s8 q5, q6, q5, a13 @@ -389,11 +382,11 @@ ee.vsl.32 q0, q1 ee.vsmulas.s16.qacc q2, q7, 2 # CHECK: ee.vsmulas.s16.qacc q2, q7, 2 # encoding: [0xc4,0x7a,0x9e] ee.vsmulas.s16.qacc.ld.incp q7, a3, q3, q4, 3 -# CHECK: ee.vsmulas.s16.qacc.ld.incp q7, a3, q3, q4, 3 # encoding: [0xc3,0xe7,0x76,0x1c] +# CHECK: ee.vsmulas.s16.qacc.ld.incp q7, a3, q3, q4, 3 # encoding: [0x3e,0xec,0x7b,0xe3] ee.vsmulas.s8.qacc q3, q6, 3 # CHECK: ee.vsmulas.s8.qacc q3, q6, 3 # encoding: [0x54,0xd3,0x8e] ee.vsmulas.s8.qacc.ld.incp q1, a8, q1, q1, 4 -# CHECK: ee.vsmulas.s8.qacc.ld.incp q1, a8, q1, q1, 4 # encoding: [0xc8,0x4a,0x14,0x1c] +# CHECK: ee.vsmulas.s8.qacc.ld.incp q1, a8, q1, q1, 4 # encoding: [0x8e,0x4c,0xaa,0xe0] ee.vsr.32 q4, q3 # CHECK: ee.vsr.32 q4, q3 # encoding: [0xc4,0xbf,0xdd] ee.vst.128.ip q3, a6, -816 @@ -411,21 +404,21 @@ ee.vst.l.64.xp q0, a13, a6 ee.vsubs.s16 q5, q1, q4 # CHECK: ee.vsubs.s16 q5, q1, q4 # encoding: [0xd4,0xe1,0xae] ee.vsubs.s16.ld.incp q1, a4, q6, q0, q1 -# CHECK: ee.vsubs.s16.ld.incp q1, a4, q6, q0, q1 # encoding: [0xd4,0x0c,0x1c,0x1c] +# CHECK: ee.vsubs.s16.ld.incp q1, a4, q6, q0, q1 # encoding: [0x4e,0x0d,0xce,0xe0] ee.vsubs.s16.st.incp q7, a13, q7, q5, q2 -# CHECK: ee.vsubs.s16.st.incp q7, a13, q7, q5, q2 # encoding: [0x1d,0x57,0x1f,0x1d] +# CHECK: ee.vsubs.s16.st.incp q7, a13, q7, q5, q2 # encoding: [0xdf,0x51,0x7f,0xe8] ee.vsubs.s32 q2, q7, q6 # CHECK: ee.vsubs.s32 q2, q7, q6 # encoding: [0xe4,0x77,0x9e] ee.vsubs.s32.ld.incp q1, a8, q1, q4, q0 -# CHECK: ee.vsubs.s32.ld.incp q1, a8, q1, q4, q0 # encoding: [0xd8,0x05,0x13,0x1c] +# CHECK: ee.vsubs.s32.ld.incp q1, a8, q1, q4, q0 # encoding: [0x8f,0x0d,0x59,0xe0] ee.vsubs.s32.st.incp q1, a5, q7, q4, q0 -# CHECK: ee.vsubs.s32.st.incp q1, a5, q7, q4, q0 # encoding: [0x25,0x01,0x1f,0x1d] +# CHECK: ee.vsubs.s32.st.incp q1, a5, q7, q4, q0 # encoding: [0x5f,0x02,0x1f,0xe8] ee.vsubs.s8 q7, q1, q5 # CHECK: ee.vsubs.s8 q7, q1, q5 # encoding: [0xf4,0xe9,0xbe] ee.vsubs.s8.ld.incp q4, a2, q6, q1, q6 -# CHECK: ee.vsubs.s8.ld.incp q4, a2, q6, q1, q6 # encoding: [0xd2,0x76,0x4c,0x1c] +# CHECK: ee.vsubs.s8.ld.incp q4, a2, q6, q1, q6 # encoding: [0x2e,0x7d,0x66,0xe2] ee.vsubs.s8.st.incp q6, a1, q6, q2, q3 -# CHECK: ee.vsubs.s8.st.incp q6, a1, q6, q2, q3 # encoding: [0x31,0x9e,0x1c,0x1d] +# CHECK: ee.vsubs.s8.st.incp q6, a1, q6, q2, q3 # encoding: [0x1e,0x93,0xee,0xe8] ee.vunzip.16 q6, q5 # CHECK: ee.vunzip.16 q6, q5 # encoding: [0x84,0xe3,0xec] ee.vunzip.32 q0, q6 @@ -446,7 +439,6 @@ ee.zero.q q0 # CHECK: ee.zero.q q0 # encoding: [0xa4,0x7f,0xcd] ee.zero.qacc # CHECK: ee.zero.qacc # encoding: [0x44,0x08,0x25] - rur.accx_0 a11 # CHECK: rur a11, accx # encoding: [0xd0,0xbe,0xe3] rur.accx_1 a11 From e17dfae235761bc4bae385a784a560c1e7903d1e Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 28 Mar 2024 16:30:43 +0300 Subject: [PATCH 260/289] [Xtensa] Add fp16 conversion support Close https://github.com/espressif/llvm-project/issues/91 --- llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 13 +- llvm/test/CodeGen/Xtensa/fp16.ll | 166 ++++++++++++++++++ 2 files changed, 178 insertions(+), 1 deletion(-) create mode 100644 llvm/test/CodeGen/Xtensa/fp16.ll diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index becb746f8b94f..d7eba66ce6f90 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -377,11 +377,22 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, // Needed so that we don't try to implement f128 constant loads using // a load-and-extend of a f80 constant (in cases where the constant // would fit in an f80). - for (MVT VT : MVT::fp_valuetypes()) + for (MVT VT : MVT::fp_valuetypes()) { + setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); + setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); + } + + setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); + setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); + setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); + setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); // Floating-point truncation and stores need to be done separately. setTruncStoreAction(MVT::f64, MVT::f32, Expand); + setTruncStoreAction(MVT::f64, MVT::f16, Expand); + setTruncStoreAction(MVT::f32, MVT::f16, Expand); // Implement custom stack allocations setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); diff --git a/llvm/test/CodeGen/Xtensa/fp16.ll b/llvm/test/CodeGen/Xtensa/fp16.ll new file mode 100644 index 0000000000000..297bb71830cc4 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/fp16.ll @@ -0,0 +1,166 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -mtriple=xtensa -mcpu=esp32 < %s | FileCheck --check-prefix=CHECK-ESP32 %s +; RUN: llc -mtriple=xtensa -mcpu=esp32s3 < %s | FileCheck --check-prefix=CHECK-ESP32S3 %s +; RUN: llc -mtriple=xtensa -mcpu=esp32s2 < %s | FileCheck --check-prefix=CHECK-ESP32S2 %s + +target datalayout = "e-m:e-p:32:32-v1:8:8-i64:64-i128:128-n32" +target triple = "xtensa" + +@x = global i16 12902 +@y = global i16 0 +@z = common global i16 0 + +define void @foo() nounwind { +; CHECK-ESP32-LABEL: foo: +; CHECK-ESP32: entry a1, 32 +; CHECK-ESP32-NEXT: l32r a6, .LCPI0_0 +; CHECK-ESP32-NEXT: l16ui a10, a6, 0 +; CHECK-ESP32-NEXT: l32r a5, .LCPI0_1 +; CHECK-ESP32-NEXT: callx8 a5 +; CHECK-ESP32-NEXT: mov.n a7, a10 +; CHECK-ESP32-NEXT: l32r a8, .LCPI0_2 +; CHECK-ESP32-NEXT: l16ui a10, a8, 0 +; CHECK-ESP32-NEXT: callx8 a5 +; CHECK-ESP32-NEXT: wfr f8, a10 +; CHECK-ESP32-NEXT: wfr f9, a7 +; CHECK-ESP32-NEXT: add.s f8, f9, f8 +; CHECK-ESP32-NEXT: rfr a10, f8 +; CHECK-ESP32-NEXT: l32r a8, .LCPI0_3 +; CHECK-ESP32-NEXT: callx8 a8 +; CHECK-ESP32-NEXT: s16i a10, a6, 0 +; CHECK-ESP32-NEXT: retw.n +; +; CHECK-ESP32S3-LABEL: foo: +; CHECK-ESP32S3: entry a1, 32 +; CHECK-ESP32S3-NEXT: l32r a6, .LCPI0_0 +; CHECK-ESP32S3-NEXT: l16ui a10, a6, 0 +; CHECK-ESP32S3-NEXT: l32r a5, .LCPI0_1 +; CHECK-ESP32S3-NEXT: callx8 a5 +; CHECK-ESP32S3-NEXT: mov.n a7, a10 +; CHECK-ESP32S3-NEXT: l32r a8, .LCPI0_2 +; CHECK-ESP32S3-NEXT: l16ui a10, a8, 0 +; CHECK-ESP32S3-NEXT: callx8 a5 +; CHECK-ESP32S3-NEXT: wfr f8, a10 +; CHECK-ESP32S3-NEXT: wfr f9, a7 +; CHECK-ESP32S3-NEXT: add.s f8, f9, f8 +; CHECK-ESP32S3-NEXT: rfr a10, f8 +; CHECK-ESP32S3-NEXT: l32r a8, .LCPI0_3 +; CHECK-ESP32S3-NEXT: callx8 a8 +; CHECK-ESP32S3-NEXT: s16i a10, a6, 0 +; CHECK-ESP32S3-NEXT: retw.n +; +; CHECK-ESP32S2-LABEL: foo: +; CHECK-ESP32S2: entry a1, 32 +; CHECK-ESP32S2-NEXT: l32r a6, .LCPI0_0 +; CHECK-ESP32S2-NEXT: l16ui a10, a6, 0 +; CHECK-ESP32S2-NEXT: l32r a5, .LCPI0_1 +; CHECK-ESP32S2-NEXT: callx8 a5 +; CHECK-ESP32S2-NEXT: mov.n a7, a10 +; CHECK-ESP32S2-NEXT: l32r a8, .LCPI0_2 +; CHECK-ESP32S2-NEXT: l16ui a10, a8, 0 +; CHECK-ESP32S2-NEXT: callx8 a5 +; CHECK-ESP32S2-NEXT: mov.n a11, a10 +; CHECK-ESP32S2-NEXT: l32r a8, .LCPI0_3 +; CHECK-ESP32S2-NEXT: mov.n a10, a7 +; CHECK-ESP32S2-NEXT: callx8 a8 +; CHECK-ESP32S2-NEXT: l32r a7, .LCPI0_4 +; CHECK-ESP32S2-NEXT: callx8 a7 +; CHECK-ESP32S2-NEXT: l32r a8, .LCPI0_5 +; CHECK-ESP32S2-NEXT: and a10, a10, a8 +; CHECK-ESP32S2-NEXT: callx8 a5 +; CHECK-ESP32S2-NEXT: callx8 a7 +; CHECK-ESP32S2-NEXT: s16i a10, a6, 0 +; CHECK-ESP32S2-NEXT: retw.n +entry: + %0 = load i16, ptr @x, align 2 + %1 = load i16, ptr @y, align 2 + %2 = tail call float @llvm.convert.from.fp16.f32(i16 %0) + %3 = tail call float @llvm.convert.from.fp16.f32(i16 %1) + %4 = fadd float %2, %3 + %5 = tail call i16 @llvm.convert.to.fp16.f32(float %4) + store i16 %5, ptr @x, align 2 + ret void +} + +define double @test_from_fp16(i16 %in) { +; CHECK-ESP32-LABEL: test_from_fp16: +; CHECK-ESP32: entry a1, 32 +; CHECK-ESP32-NEXT: l32r a8, .LCPI1_0 +; CHECK-ESP32-NEXT: mov.n a10, a2 +; CHECK-ESP32-NEXT: callx8 a8 +; CHECK-ESP32-NEXT: l32r a8, .LCPI1_1 +; CHECK-ESP32-NEXT: callx8 a8 +; CHECK-ESP32-NEXT: mov.n a2, a10 +; CHECK-ESP32-NEXT: mov.n a3, a11 +; CHECK-ESP32-NEXT: retw.n +; +; CHECK-ESP32S3-LABEL: test_from_fp16: +; CHECK-ESP32S3: entry a1, 32 +; CHECK-ESP32S3-NEXT: l32r a8, .LCPI1_0 +; CHECK-ESP32S3-NEXT: mov.n a10, a2 +; CHECK-ESP32S3-NEXT: callx8 a8 +; CHECK-ESP32S3-NEXT: l32r a8, .LCPI1_1 +; CHECK-ESP32S3-NEXT: callx8 a8 +; CHECK-ESP32S3-NEXT: mov.n a2, a10 +; CHECK-ESP32S3-NEXT: mov.n a3, a11 +; CHECK-ESP32S3-NEXT: retw.n +; +; CHECK-ESP32S2-LABEL: test_from_fp16: +; CHECK-ESP32S2: entry a1, 32 +; CHECK-ESP32S2-NEXT: l32r a8, .LCPI1_0 +; CHECK-ESP32S2-NEXT: and a10, a2, a8 +; CHECK-ESP32S2-NEXT: l32r a8, .LCPI1_1 +; CHECK-ESP32S2-NEXT: callx8 a8 +; CHECK-ESP32S2-NEXT: l32r a8, .LCPI1_2 +; CHECK-ESP32S2-NEXT: callx8 a8 +; CHECK-ESP32S2-NEXT: mov.n a2, a10 +; CHECK-ESP32S2-NEXT: mov.n a3, a11 +; CHECK-ESP32S2-NEXT: retw.n + %val = call double @llvm.convert.from.fp16.f64(i16 %in) + ret double %val +} + +define i16 @test_to_fp16(double %in) { +; CHECK-ESP32-LABEL: test_to_fp16: +; CHECK-ESP32: entry a1, 32 +; CHECK-ESP32-NEXT: l32r a8, .LCPI2_0 +; CHECK-ESP32-NEXT: mov.n a10, a2 +; CHECK-ESP32-NEXT: mov.n a11, a3 +; CHECK-ESP32-NEXT: callx8 a8 +; CHECK-ESP32-NEXT: l32r a8, .LCPI2_1 +; CHECK-ESP32-NEXT: and a2, a10, a8 +; CHECK-ESP32-NEXT: retw.n +; +; CHECK-ESP32S3-LABEL: test_to_fp16: +; CHECK-ESP32S3: entry a1, 32 +; CHECK-ESP32S3-NEXT: l32r a8, .LCPI2_0 +; CHECK-ESP32S3-NEXT: mov.n a10, a2 +; CHECK-ESP32S3-NEXT: mov.n a11, a3 +; CHECK-ESP32S3-NEXT: callx8 a8 +; CHECK-ESP32S3-NEXT: l32r a8, .LCPI2_1 +; CHECK-ESP32S3-NEXT: and a2, a10, a8 +; CHECK-ESP32S3-NEXT: retw.n +; +; CHECK-ESP32S2-LABEL: test_to_fp16: +; CHECK-ESP32S2: entry a1, 32 +; CHECK-ESP32S2-NEXT: l32r a8, .LCPI2_0 +; CHECK-ESP32S2-NEXT: mov.n a10, a2 +; CHECK-ESP32S2-NEXT: mov.n a11, a3 +; CHECK-ESP32S2-NEXT: callx8 a8 +; CHECK-ESP32S2-NEXT: l32r a8, .LCPI2_1 +; CHECK-ESP32S2-NEXT: and a10, a10, a8 +; CHECK-ESP32S2-NEXT: l32r a8, .LCPI2_2 +; CHECK-ESP32S2-NEXT: callx8 a8 +; CHECK-ESP32S2-NEXT: l32r a8, .LCPI2_3 +; CHECK-ESP32S2-NEXT: callx8 a8 +; CHECK-ESP32S2-NEXT: mov.n a2, a10 +; CHECK-ESP32S2-NEXT: retw.n + %val = call i16 @llvm.convert.to.fp16.f64(double %in) + ret i16 %val +} + +declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone +declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone + +declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone +declare i16 @llvm.convert.to.fp16.f64(double) nounwind readnone From d7ea59ee2b31cac17183bf7d475a572f97118cb8 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 13 Sep 2024 20:37:32 +0300 Subject: [PATCH 261/289] esp/ci: Add package tests --- .gitlab-ci.yml | 70 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 54 insertions(+), 16 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 152f63ecc29bf..f0d98624c1cc0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -46,7 +46,9 @@ before_script: TARGET: "Xtensa;RISCV" USE_LINKER: "ld" CROSS_BUILD_MINGW: "OFF" + BUILD_TARGET_LIBS: "OFF" RUN_CORE_TESTS: "OFF" + RUN_PKG_TESTS: "OFF" RUN_TARGET_LIB_TESTS: "OFF" PACK_TOOLCHAIN: "ON" PACK_STANDALONE_LIBS: "ON" @@ -59,6 +61,7 @@ before_script: [ ! -f "${BUILD_DIR}/tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/tests.log || true [ ! -f "${BUILD_DIR}/compiler-rt-tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/compiler-rt-tests.log || true [ ! -f "${BUILD_DIR}/lld-tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/lld-tests.log || true + [ ! -f "${BUILD_DIR}/pkg-tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/pkg-tests.log || true fi script: - *get_toolchain_build_scripts @@ -73,7 +76,7 @@ before_script: # Re-use core tools built in another job. # LLVM-xxx - > - if [ "${PACK_TARGET_LIBS}" == "ON" ]; then + if [ "${PACK_TARGET_LIBS}" == "ON" ] || [ "${BUILD_TARGET_LIBS}" == "ON" ]; then echo "Enable target libraries build" export USE_LIBC="newlib"; export USE_LIBCXX="libstdcxx"; @@ -135,6 +138,15 @@ before_script: chmod o+w ${BUILD_PATH}/compiler-rt-tests.log; runuser -u test_runner -- ninja -C ${BUILD_PATH} check-compiler-rt 2>&1 > ${BUILD_PATH}/compiler-rt-tests.log; fi + if [[ "${RUN_PKG_TESTS}" == "ON" ]]; then + echo "Run package tests"; + # to avoid test failure "fatal: detected dubious ownership in repository at '/builds/llvm-project'" + chown -R test_runner $LLVM_PROJECT_PATH; + touch ${BUILD_PATH}/pkg-tests.log; + chmod o+w ${BUILD_PATH}/pkg-tests.log; + runuser -u test_runner -- ninja -C ${BUILD_PATH} check-package-llvm-toolchain 2>&1 > ${BUILD_PATH}/pkg-tests.log; + chown -R ${CUR_USER} $LLVM_PROJECT_PATH; + fi chown -R ${CUR_USER} ${BUILD_PATH}; fi # pack distro @@ -168,7 +180,11 @@ before_script: ARCHIVE_NAME=$(basename ${DISTRO_PACK_PATH}) echo "${ARCHIVE_NAME}" > ${PWD}/${DIST_DIR}/target_libs_arch_name fi + - ls -l ${PWD}/${DIST_DIR} +# some Clang/LLVM unit tests fail if we build Clang for RISCV+Xtensa only +# this job is intended to run unit tests only, so it builds Clang with all backends +# TODO: LLVM-326 and LLVM-401 build_and_test: tags: [ "amd64", "build" ] stage: test_build @@ -217,12 +233,21 @@ build_and_test: variables: USE_LINKER: "gold" +# Actually this job builds and packs distro for x86_64-linux-gnu, +# but also it runs package tests. So keep it in 'test_build' stage build_x86_64-linux-gnu: extends: .build_linux-gnu_template - stage: build + stage: test_build variables: CONF_HOST: "x86_64-linux-gnu" - + # Build complete distro it is necessary for running package tests + BUILD_TARGET_LIBS: "ON" + RUN_PKG_TESTS: "ON" + +# Target libs are built in build_x86_64-linux-gnu, but due to artifacts +# size limit we have to produce target libs archive in this job. +# Archive with target libs from this job will be used in pack jobs +# for all platform except for x86_64-linux-gnu. build_target_libs: extends: .build_linux-gnu_template stage: build @@ -286,7 +311,7 @@ build_aarch64-apple-darwin: paths: - ${DIST_DIR}/ when: always - expire_in: 1 day + expire_in: 3 day variables: PACK_TOOL: "tar cJf" UNPACK_TOOL: "tar xJf" @@ -308,15 +333,6 @@ build_aarch64-apple-darwin: - rm -rf ${DISTRO_PACK_DIR} - ls -l -pack_x86_64-linux-gnu: - extends: .pack_template - needs: - # needs target libs archive from native build job - - job: "build_target_libs" - - job: "build_x86_64-linux-gnu" - variables: - CONF_HOST: "x86_64-linux-gnu" - pack_x86_64-w64-mingw32: extends: .pack_template needs: @@ -429,7 +445,7 @@ test_esp_dsp: matrix: - CHIP: esp32p4 needs: - - job: "pack_x86_64-linux-gnu" + - job: "build_x86_64-linux-gnu" variables: # use IDF 'master' from docker image TEST_APP_IDF_CUSTOM_BRANCH: "" @@ -447,6 +463,28 @@ test_esp_dsp: - idf.py set-target ${CHIP} 2>&1 | tee ${BUILD_LOG} - idf.py build 2>&1 | tee -a ${BUILD_LOG} +test_xesppie: + stage: test + dependencies: + - build_x86_64-linux-gnu + allow_failure: true + only: + - tags + script: + - cd ${DIST_DIR}/ + - ls -l + - DISTRO_PACK_FILE=$(cat dist_name_x86_64-linux-gnu) + - tar -xf ${DISTRO_PACK_FILE} + - ls -l + - cd esp-clang + - ls -l + - pwd + - export CC="$(pwd)/bin/clang" + - export OBJDUMP="$(pwd)/bin/llvm-objdump" + - git clone -q --depth=1 "${GITLAB_SSH_SERVER}/idf/esp-compiler-tests.git" + - cd esp-compiler-tests/build-only/xesppie + - ./test_xesppie.py + upload_to_http: stage: private_deploy when: manual @@ -456,7 +494,7 @@ upload_to_http: # force the fetch strategy to clean old archives up in dist/ dir GIT_STRATEGY: fetch needs: - - job: pack_x86_64-linux-gnu + - job: build_x86_64-linux-gnu script: - cit_add_ssh_key "${HTTP_UPLOAD_KEY}" # List of archives @@ -494,7 +532,7 @@ upload_to_github: TOOLCHAIN_SHA256_FILE: clang-${CI_COMMIT_TAG}-checksum.sha256 LIBS_SHA256_FILE: libs-clang-${CI_COMMIT_TAG}-checksum.sha256 needs: - - job: pack_x86_64-linux-gnu + - job: build_x86_64-linux-gnu - job: pack_arm-linux-gnueabihf - job: pack_aarch64-linux-gnu - job: pack_x86_64-w64-mingw32 From 2751bada92243b15c3fefe2d76c55f19852063a9 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 24 Oct 2024 17:28:39 +0300 Subject: [PATCH 262/289] esp/ci: Use CI_JOB_TOKEN instead of BOT_TOKEN to clone repos --- .gitlab-ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f0d98624c1cc0..7952b6641fd37 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -415,7 +415,7 @@ sign_aarch64-apple-darwin: if [ -n "${TEST_APP_IDF_CUSTOM_BRANCH:-}" ]; then echo "TEST_APP_IDF_CUSTOM_BRANCH=$TEST_APP_IDF_CUSTOM_BRANCH" # Clone esp-idf - git clone --shallow-submodules --recursive --single-branch --branch $TEST_APP_IDF_CUSTOM_BRANCH -- https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/esp-idf.git esp-idf + git clone --shallow-submodules --recursive --single-branch --branch $TEST_APP_IDF_CUSTOM_BRANCH -- https://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/esp-idf.git esp-idf export IDF_PATH=$PWD/esp-idf # Activate pyenv if [ $(command -v pyenv) ]; then @@ -429,7 +429,7 @@ sign_aarch64-apple-darwin: fi idf.py --version || true pushd $IDF_PATH/components - git clone --shallow-submodules --recursive --single-branch --branch $TEST_APP_ESP_DSP_CUSTOM_BRANCH -- https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/idf/esp-dsp.git esp-dsp + git clone --shallow-submodules --recursive --single-branch --branch $TEST_APP_ESP_DSP_CUSTOM_BRANCH -- https://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/idf/esp-dsp.git esp-dsp pushd $PWD/esp-dsp/test_app test_esp_dsp: From 6c74e2df9360e9708d6fdfd81ccc339aa81bee03 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Wed, 30 Oct 2024 23:49:22 +0300 Subject: [PATCH 263/289] [Xtensa] Add '+forced-atomics' target feature support --- llvm/lib/Target/Xtensa/Xtensa.td | 11 + llvm/lib/Target/Xtensa/XtensaISelLowering.cpp | 2 + llvm/lib/Target/Xtensa/XtensaInstrInfo.td | 19 +- llvm/lib/Target/Xtensa/XtensaSubtarget.cpp | 2 + llvm/lib/Target/Xtensa/XtensaSubtarget.h | 11 + llvm/test/CodeGen/Xtensa/atomic-load-store.ll | 485 +- llvm/test/CodeGen/Xtensa/atomic-rmw.ll | 4528 +++++++++++++++++ llvm/test/CodeGen/Xtensa/atomicrmw.ll | 103 - llvm/test/CodeGen/Xtensa/forced-atomics.ll | 1005 ++++ 9 files changed, 5985 insertions(+), 181 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/atomic-rmw.ll delete mode 100644 llvm/test/CodeGen/Xtensa/atomicrmw.ll create mode 100644 llvm/test/CodeGen/Xtensa/forced-atomics.ll diff --git a/llvm/lib/Target/Xtensa/Xtensa.td b/llvm/lib/Target/Xtensa/Xtensa.td index 2204fff9a7e61..6385b815ee15b 100644 --- a/llvm/lib/Target/Xtensa/Xtensa.td +++ b/llvm/lib/Target/Xtensa/Xtensa.td @@ -183,6 +183,17 @@ def FeatureHIFI3 : SubtargetFeature<"hifi3", "HasHIFI3", "true", def HasHIFI3 : Predicate<"Subtarget->hasHIFI3()">, AssemblerPredicate<(all_of FeatureHIFI3)>; +// Assume that lock-free native-width atomics are available, even if the target +// and operating system combination would not usually provide them. The user +// is responsible for providing any necessary __sync implementations. Code +// built with this feature is not ABI-compatible with code built without this +// feature, if atomic variables are exposed across the ABI boundary. +def FeatureForcedAtomics : SubtargetFeature<"forced-atomics", "HasForcedAtomics", "true", + "Assume that lock-free native-width atomics are available">; +def HasForcedAtomics : Predicate<"Subtarget->hasForcedAtomics()">, + AssemblerPredicate<(all_of FeatureForcedAtomics)>; +def HasAtomicLdSt : Predicate<"Subtarget->hasS32C1I() || Subtarget->hasForcedAtomics()">; + //===----------------------------------------------------------------------===// // Xtensa supported processors. //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp index d7eba66ce6f90..709be4ff2115a 100644 --- a/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp @@ -438,6 +438,8 @@ XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM, if (Subtarget.hasS32C1I()) { setMaxAtomicSizeInBitsSupported(32); setMinCmpXchgSizeInBits(32); + } else if (Subtarget.hasForcedAtomics()) { + setMaxAtomicSizeInBitsSupported(32); } else { setMaxAtomicSizeInBitsSupported(0); } diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td index 98cd7fd291e5b..44396cf81f6e9 100644 --- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.td +++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.td @@ -1820,13 +1820,18 @@ def SIMCALL : RRR_Inst<0x00, 0x00, 0x00, (outs), (ins), // Atomic patterns //===----------------------------------------------------------------------===// -def : Pat<(i32 (atomic_load_8 addr_ish1:$addr)), (L8UI addr_ish1:$addr)>; -def : Pat<(i32 (atomic_load_16 addr_ish2:$addr)), (L16UI addr_ish2:$addr)>; -def : Pat<(i32 (atomic_load_32 addr_ish4:$addr)), (L32I addr_ish4:$addr)>; - -def : Pat<(atomic_store_8 AR:$t, addr_ish1:$addr), (S8I AR:$t, addr_ish1:$addr)>; -def : Pat<(atomic_store_16 AR:$t, addr_ish2:$addr), (S16I AR:$t, addr_ish2:$addr)>; -def : Pat<(atomic_store_32 AR:$t, addr_ish4:$addr), (S32I AR:$t, addr_ish4:$addr)>; +// Atomic load/store are available under both +s32c1i and +force-atomics. +// Fences will be inserted for atomic load/stores according to the logic in +// XtensaTargetLowering. +let Predicates = [HasAtomicLdSt] in { + def : Pat<(i32 (atomic_load_8 addr_ish1:$addr)), (L8UI addr_ish1:$addr)>; + def : Pat<(i32 (atomic_load_16 addr_ish2:$addr)), (L16UI addr_ish2:$addr)>; + def : Pat<(i32 (atomic_load_32 addr_ish4:$addr)), (L32I addr_ish4:$addr)>; + + def : Pat<(atomic_store_8 AR:$t, addr_ish1:$addr), (S8I AR:$t, addr_ish1:$addr)>; + def : Pat<(atomic_store_16 AR:$t, addr_ish2:$addr), (S16I AR:$t, addr_ish2:$addr)>; + def : Pat<(atomic_store_32 AR:$t, addr_ish4:$addr), (S32I AR:$t, addr_ish4:$addr)>; +} let usesCustomInserter = 1, Predicates = [HasS32C1I] in { def ATOMIC_CMP_SWAP_8_P : Pseudo<(outs AR:$dst), (ins AR:$ptr, AR:$cmp, AR:$swap), diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp index 8544ee0352d03..4b06a47a2fbd4 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.cpp @@ -76,6 +76,8 @@ XtensaSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { HasESP32S2Ops = false; HasESP32S3Ops = false; HasHIFI3 = false; + HasForcedAtomics = false; + HasAtomicLdSt = false; // Parse features string. ParseSubtargetFeatures(CPUName, CPUName, FS); diff --git a/llvm/lib/Target/Xtensa/XtensaSubtarget.h b/llvm/lib/Target/Xtensa/XtensaSubtarget.h index c6e054399be26..ba0bb01b47e61 100644 --- a/llvm/lib/Target/Xtensa/XtensaSubtarget.h +++ b/llvm/lib/Target/Xtensa/XtensaSubtarget.h @@ -135,6 +135,13 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { // Enable Xtensa HIFI3 Extension bool HasHIFI3; + // Enable 'forced-atomics' feature + bool HasForcedAtomics; + + // Enable atomic load and stores ops + bool HasAtomicLdSt; + + XtensaSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS); public: @@ -228,6 +235,10 @@ class XtensaSubtarget : public XtensaGenSubtargetInfo { bool hasHIFI3() const { return HasHIFI3; } + bool hasForcedAtomics() const { return HasForcedAtomics; } + + bool hasAtomicLdSt() const { return HasAtomicLdSt; } + // Automatically generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS); }; diff --git a/llvm/test/CodeGen/Xtensa/atomic-load-store.ll b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll index 8047fd2914d21..948f3a6550194 100644 --- a/llvm/test/CodeGen/Xtensa/atomic-load-store.ll +++ b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll @@ -1,107 +1,450 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=XTENSA,XTENSA_OPT -; RUN: llc -mtriple=xtensa -O0 < %s | FileCheck %s --check-prefixes=XTENSA,XTENSA_OPT_NONE +; RUN: llc -mtriple=xtensa -mcpu=esp32s2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC -define void @store32(ptr %ptr, i32 %val1) { -; XTENSA-LABEL: store32: +define i8 @atomic_load_i8_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_unordered: ; XTENSA: entry a1, 32 -; XTENSA-NEXT: memw -; XTENSA-NEXT: s32i.n a3, a2, 0 -; XTENSA-NEXT: memw +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 ; XTENSA-NEXT: retw.n - store atomic i32 %val1, ptr %ptr seq_cst, align 4 +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_unordered: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i8, ptr %a unordered, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i8, ptr %a monotonic, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i8, ptr %a acquire, align 1 + ret i8 %1 +} + +define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i8_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i8_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i8, ptr %a seq_cst, align 1 + ret i8 %1 +} + +define i16 @atomic_load_i16_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_unordered: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_unordered: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i16, ptr %a unordered, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i16, ptr %a monotonic, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i16, ptr %a acquire, align 2 + ret i16 %1 +} + +define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i16_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i16_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i16, ptr %a seq_cst, align 2 + ret i16 %1 +} + +define i32 @atomic_load_i32_unordered(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_unordered: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_unordered: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i32, ptr %a unordered, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_monotonic(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i32, ptr %a monotonic, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_acquire(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i32, ptr %a acquire, align 4 + ret i32 %1 +} + +define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind { +; XTENSA-LABEL: atomic_load_i32_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_load_i32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = load atomic i32, ptr %a seq_cst, align 4 + ret i32 %1 +} + +define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_unordered: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_unordered: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i8 %b, ptr %a unordered, align 1 + ret void +} + +define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i8 %b, ptr %a monotonic, align 1 + ret void +} + +define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i8 %b, ptr %a release, align 1 ret void } -define i32 @load32(ptr %ptr) { -; XTENSA-LABEL: load32: +define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomic_store_i8_seq_cst: ; XTENSA: entry a1, 32 -; XTENSA-NEXT: l32i.n a2, a2, 0 -; XTENSA-NEXT: memw +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 ; XTENSA-NEXT: retw.n - %val = load atomic i32, ptr %ptr seq_cst, align 4 - ret i32 %val +; +; XTENSA-ATOMIC-LABEL: atomic_store_i8_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i8 %b, ptr %a seq_cst, align 1 + ret void +} + +define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_unordered: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_unordered: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i16 %b, ptr %a unordered, align 2 + ret void +} + +define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i16 %b, ptr %a monotonic, align 2 + ret void } -define i8 @load8(ptr %p) { -; XTENSA-LABEL: load8: +define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_release: ; XTENSA: entry a1, 32 -; XTENSA-NEXT: l8ui a2, a2, 0 -; XTENSA-NEXT: memw +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 ; XTENSA-NEXT: retw.n - %v = load atomic i8, ptr %p seq_cst, align 1 - ret i8 %v +; +; XTENSA-ATOMIC-LABEL: atomic_store_i16_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i16 %b, ptr %a release, align 2 + ret void } -define void @store8(ptr %p, i8 %val1) { -; XTENSA_OPT-LABEL: store8: -; XTENSA_OPT: entry a1, 32 -; XTENSA_OPT-NEXT: memw -; XTENSA_OPT-NEXT: s8i a3, a2, 0 -; XTENSA_OPT-NEXT: memw -; XTENSA_OPT-NEXT: retw.n +define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomic_store_i16_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n ; -; XTENSA_OPT_NONE-LABEL: store8: -; XTENSA_OPT_NONE: entry a1, 32 -; XTENSA_OPT_NONE-NEXT: # kill: def $a8 killed $a3 -; XTENSA_OPT_NONE-NEXT: memw -; XTENSA_OPT_NONE-NEXT: s8i a3, a2, 0 -; XTENSA_OPT_NONE-NEXT: memw -; XTENSA_OPT_NONE-NEXT: retw.n - store atomic i8 %val1, ptr %p seq_cst, align 1 +; XTENSA-ATOMIC-LABEL: atomic_store_i16_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i16 %b, ptr %a seq_cst, align 2 ret void } -define i16 @load16(ptr %p) { -; XTENSA-LABEL: load16: +define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_unordered: ; XTENSA: entry a1, 32 -; XTENSA-NEXT: l16ui a2, a2, 0 -; XTENSA-NEXT: memw +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 ; XTENSA-NEXT: retw.n - %v = load atomic i16, ptr %p seq_cst, align 2 - ret i16 %v +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_unordered: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s32i.n a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i32 %b, ptr %a unordered, align 4 + ret void } -define void @store16(ptr %p, i16 %val1) { -; XTENSA_OPT-LABEL: store16: -; XTENSA_OPT: entry a1, 32 -; XTENSA_OPT-NEXT: memw -; XTENSA_OPT-NEXT: s16i a3, a2, 0 -; XTENSA_OPT-NEXT: memw -; XTENSA_OPT-NEXT: retw.n +define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n ; -; XTENSA_OPT_NONE-LABEL: store16: -; XTENSA_OPT_NONE: entry a1, 32 -; XTENSA_OPT_NONE-NEXT: # kill: def $a8 killed $a3 -; XTENSA_OPT_NONE-NEXT: memw -; XTENSA_OPT_NONE-NEXT: s16i a3, a2, 0 -; XTENSA_OPT_NONE-NEXT: memw -; XTENSA_OPT_NONE-NEXT: retw.n - store atomic i16 %val1, ptr %p seq_cst, align 2 +; XTENSA-ATOMIC-LABEL: atomic_store_i32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: s32i.n a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i32 %b, ptr %a monotonic, align 4 ret void } -define void @test1(ptr %ptr1, ptr %ptr2) { -; XTENSA-LABEL: test1: +define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_release: ; XTENSA: entry a1, 32 -; XTENSA-NEXT: l8ui a8, a2, 0 -; XTENSA-NEXT: s8i a8, a3, 0 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 ; XTENSA-NEXT: retw.n - %val = load atomic i8, ptr %ptr1 unordered, align 1 - store atomic i8 %val, ptr %ptr2 unordered, align 1 +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s32i.n a3, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i32 %b, ptr %a release, align 4 ret void } -define void @test2(ptr %ptr1, ptr %ptr2) { -; XTENSA-LABEL: test2: +define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomic_store_i32_seq_cst: ; XTENSA: entry a1, 32 -; XTENSA-NEXT: l8ui a8, a2, 0 -; XTENSA-NEXT: memw -; XTENSA-NEXT: memw -; XTENSA-NEXT: s8i a8, a3, 0 -; XTENSA-NEXT: memw +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI23_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 ; XTENSA-NEXT: retw.n - %val = load atomic i8, ptr %ptr1 seq_cst, align 1 - store atomic i8 %val, ptr %ptr2 seq_cst, align 1 +; +; XTENSA-ATOMIC-LABEL: atomic_store_i32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: s32i.n a3, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i32 %b, ptr %a seq_cst, align 4 ret void } diff --git a/llvm/test/CodeGen/Xtensa/atomic-rmw.ll b/llvm/test/CodeGen/Xtensa/atomic-rmw.ll new file mode 100644 index 0000000000000..5755ded148185 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/atomic-rmw.ll @@ -0,0 +1,4528 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=xtensa -mcpu=esp32s2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mcpu=esp32 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: l32i.n a12, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: and a14, a13, a10 +; XTENSA-ATOMIC-NEXT: .LBB0_1: # =>This Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: # Child Loop BB0_2 Depth 2 +; XTENSA-ATOMIC-NEXT: mov.n a13, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: .LBB0_2: # Parent Loop BB0_1 Depth=1 +; XTENSA-ATOMIC-NEXT: # => This Inner Loop Header: Depth=2 +; XTENSA-ATOMIC-NEXT: mov.n a15, a7 +; XTENSA-ATOMIC-NEXT: or a14, a12, a15 +; XTENSA-ATOMIC-NEXT: or a7, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a9, 0 +; XTENSA-ATOMIC-NEXT: beq a7, a14, .LBB0_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # in Loop: Header=BB0_2 Depth=2 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB0_2 +; XTENSA-ATOMIC-NEXT: .LBB0_4: # in Loop: Header=BB0_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a14, a10 +; XTENSA-ATOMIC-NEXT: bne a14, a13, .LBB0_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: sext a2, a8, 7 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i8 %b monotonic + ret i8 %1 +} + +define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: l32i.n a12, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: and a14, a13, a10 +; XTENSA-ATOMIC-NEXT: .LBB1_1: # =>This Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: # Child Loop BB1_2 Depth 2 +; XTENSA-ATOMIC-NEXT: mov.n a13, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: .LBB1_2: # Parent Loop BB1_1 Depth=1 +; XTENSA-ATOMIC-NEXT: # => This Inner Loop Header: Depth=2 +; XTENSA-ATOMIC-NEXT: mov.n a15, a7 +; XTENSA-ATOMIC-NEXT: or a14, a12, a15 +; XTENSA-ATOMIC-NEXT: or a7, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a9, 0 +; XTENSA-ATOMIC-NEXT: beq a7, a14, .LBB1_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # in Loop: Header=BB1_2 Depth=2 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB1_2 +; XTENSA-ATOMIC-NEXT: .LBB1_4: # in Loop: Header=BB1_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a14, a10 +; XTENSA-ATOMIC-NEXT: bne a14, a13, .LBB1_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: sext a2, a8, 7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i8 %b acquire + ret i8 %1 +} + +define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: l32i.n a12, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: and a14, a13, a10 +; XTENSA-ATOMIC-NEXT: .LBB2_1: # =>This Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: # Child Loop BB2_2 Depth 2 +; XTENSA-ATOMIC-NEXT: mov.n a13, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: .LBB2_2: # Parent Loop BB2_1 Depth=1 +; XTENSA-ATOMIC-NEXT: # => This Inner Loop Header: Depth=2 +; XTENSA-ATOMIC-NEXT: mov.n a15, a7 +; XTENSA-ATOMIC-NEXT: or a14, a12, a15 +; XTENSA-ATOMIC-NEXT: or a7, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a9, 0 +; XTENSA-ATOMIC-NEXT: beq a7, a14, .LBB2_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # in Loop: Header=BB2_2 Depth=2 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB2_2 +; XTENSA-ATOMIC-NEXT: .LBB2_4: # in Loop: Header=BB2_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a14, a10 +; XTENSA-ATOMIC-NEXT: bne a14, a13, .LBB2_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: sext a2, a8, 7 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i8 %b release + ret i8 %1 +} + +define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: l32i.n a12, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: and a14, a13, a10 +; XTENSA-ATOMIC-NEXT: .LBB3_1: # =>This Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: # Child Loop BB3_2 Depth 2 +; XTENSA-ATOMIC-NEXT: mov.n a13, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: .LBB3_2: # Parent Loop BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: # => This Inner Loop Header: Depth=2 +; XTENSA-ATOMIC-NEXT: mov.n a15, a7 +; XTENSA-ATOMIC-NEXT: or a14, a12, a15 +; XTENSA-ATOMIC-NEXT: or a7, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a9, 0 +; XTENSA-ATOMIC-NEXT: beq a7, a14, .LBB3_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # in Loop: Header=BB3_2 Depth=2 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB3_2 +; XTENSA-ATOMIC-NEXT: .LBB3_4: # in Loop: Header=BB3_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a14, a10 +; XTENSA-ATOMIC-NEXT: bne a14, a13, .LBB3_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: sext a2, a8, 7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i8 %b acq_rel + ret i8 %1 +} + +define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i8_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a10, 255 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: l32i.n a12, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: and a14, a13, a10 +; XTENSA-ATOMIC-NEXT: .LBB4_1: # =>This Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: # Child Loop BB4_2 Depth 2 +; XTENSA-ATOMIC-NEXT: mov.n a13, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: .LBB4_2: # Parent Loop BB4_1 Depth=1 +; XTENSA-ATOMIC-NEXT: # => This Inner Loop Header: Depth=2 +; XTENSA-ATOMIC-NEXT: mov.n a15, a7 +; XTENSA-ATOMIC-NEXT: or a14, a12, a15 +; XTENSA-ATOMIC-NEXT: or a7, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a9, 0 +; XTENSA-ATOMIC-NEXT: beq a7, a14, .LBB4_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # in Loop: Header=BB4_2 Depth=2 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB4_2 +; XTENSA-ATOMIC-NEXT: .LBB4_4: # in Loop: Header=BB4_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a14, a10 +; XTENSA-ATOMIC-NEXT: bne a14, a13, .LBB4_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: sext a2, a8, 7 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i8 %b seq_cst + ret i8 %1 +} + +define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: add.n a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB5_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i8 %b monotonic + ret i8 %1 +} + +define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: add.n a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB6_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i8 %b acquire + ret i8 %1 +} + +define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: add.n a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB7_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i8 %b release + ret i8 %1 +} + +define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: add.n a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB8_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i8 %b acq_rel + ret i8 %1 +} + +define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i8_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: add.n a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB9_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i8 %b seq_cst + ret i8 %1 +} + +define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB10_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i8 %b monotonic + ret i8 %1 +} + +define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB11_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i8 %b acquire + ret i8 %1 +} + +define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB12_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i8 %b release + ret i8 %1 +} + +define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB13_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i8 %b acq_rel + ret i8 %1 +} + +define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i8_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB14_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i8 %b seq_cst + ret i8 %1 +} + +define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: and a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB15_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i8 %b monotonic + ret i8 %1 +} + +define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: and a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB16_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i8 %b acquire + ret i8 %1 +} + +define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: and a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB17_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i8 %b release + ret i8 %1 +} + +define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: and a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB18_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i8 %b acq_rel + ret i8 %1 +} + +define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i8_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: and a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB19_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i8 %b seq_cst + ret i8 %1 +} + +define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a14, a13 +; XTENSA-ATOMIC-NEXT: and a13, a14, a11 +; XTENSA-ATOMIC-NEXT: and a13, a13, a12 +; XTENSA-ATOMIC-NEXT: and a13, a13, a11 +; XTENSA-ATOMIC-NEXT: xor a15, a14, a11 +; XTENSA-ATOMIC-NEXT: or a15, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a13, a15 +; XTENSA-ATOMIC-NEXT: bne a15, a14, .LBB20_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a13 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw nand ptr %a, i8 %b monotonic + ret i8 %1 +} + +define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a14, a13 +; XTENSA-ATOMIC-NEXT: and a13, a14, a11 +; XTENSA-ATOMIC-NEXT: and a13, a13, a12 +; XTENSA-ATOMIC-NEXT: and a13, a13, a11 +; XTENSA-ATOMIC-NEXT: xor a15, a14, a11 +; XTENSA-ATOMIC-NEXT: or a15, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a13, a15 +; XTENSA-ATOMIC-NEXT: bne a15, a14, .LBB21_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a13 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw nand ptr %a, i8 %b acquire + ret i8 %1 +} + +define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a14, a13 +; XTENSA-ATOMIC-NEXT: and a13, a14, a11 +; XTENSA-ATOMIC-NEXT: and a13, a13, a12 +; XTENSA-ATOMIC-NEXT: and a13, a13, a11 +; XTENSA-ATOMIC-NEXT: xor a15, a14, a11 +; XTENSA-ATOMIC-NEXT: or a15, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a13, a15 +; XTENSA-ATOMIC-NEXT: bne a15, a14, .LBB22_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a13 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw nand ptr %a, i8 %b release + ret i8 %1 +} + +define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI23_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a14, a13 +; XTENSA-ATOMIC-NEXT: and a13, a14, a11 +; XTENSA-ATOMIC-NEXT: and a13, a13, a12 +; XTENSA-ATOMIC-NEXT: and a13, a13, a11 +; XTENSA-ATOMIC-NEXT: xor a15, a14, a11 +; XTENSA-ATOMIC-NEXT: or a15, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a13, a15 +; XTENSA-ATOMIC-NEXT: bne a15, a14, .LBB23_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a13 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw nand ptr %a, i8 %b acq_rel + ret i8 %1 +} + +define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i8_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI24_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a14, a13 +; XTENSA-ATOMIC-NEXT: and a13, a14, a11 +; XTENSA-ATOMIC-NEXT: and a13, a13, a12 +; XTENSA-ATOMIC-NEXT: and a13, a13, a11 +; XTENSA-ATOMIC-NEXT: xor a15, a14, a11 +; XTENSA-ATOMIC-NEXT: or a15, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a13, a15 +; XTENSA-ATOMIC-NEXT: bne a15, a14, .LBB24_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a13 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw nand ptr %a, i8 %b seq_cst + ret i8 %1 +} + +define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI25_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB25_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: or a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB25_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i8 %b monotonic + ret i8 %1 +} + +define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI26_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB26_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: or a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB26_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i8 %b acquire + ret i8 %1 +} + +define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI27_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB27_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: or a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB27_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i8 %b release + ret i8 %1 +} + +define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI28_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB28_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: or a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB28_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i8 %b acq_rel + ret i8 %1 +} + +define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i8_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI29_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: or a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB29_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i8 %b seq_cst + ret i8 %1 +} + +define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI30_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: xor a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB30_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i8 %b monotonic + ret i8 %1 +} + +define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI31_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: xor a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB31_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i8 %b acquire + ret i8 %1 +} + +define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI32_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: xor a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB32_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i8 %b release + ret i8 %1 +} + +define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI33_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: xor a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB33_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i8 %b acq_rel + ret i8 %1 +} + +define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i8_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI34_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a10, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a9, a8, 3 +; XTENSA-ATOMIC-NEXT: movi a8, 255 +; XTENSA-ATOMIC-NEXT: ssl a9 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a8 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a10, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: xor a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a10, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB34_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a9 +; XTENSA-ATOMIC-NEXT: srl a9, a14 +; XTENSA-ATOMIC-NEXT: and a2, a9, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i8 %b seq_cst + ret i8 %1 +} + +;define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw max ptr %a, i8 %b monotonic +; ret i8 %1 +;} + +;define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw max ptr %a, i8 %b acquire +; ret i8 %1 +;} + +;define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw max ptr %a, i8 %b release +; ret i8 %1 +;} + +;define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw max ptr %a, i8 %b acq_rel +; ret i8 %1 +;} + +;define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw max ptr %a, i8 %b seq_cst +; ret i8 %1 +;} + +;define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw min ptr %a, i8 %b monotonic +; ret i8 %1 +;} + +;define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw min ptr %a, i8 %b acquire +; ret i8 %1 +;} +; +;define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw min ptr %a, i8 %b release +; ret i8 %1 +;} +; +;define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw min ptr %a, i8 %b acq_rel +; ret i8 %1 +;} +; +;define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw min ptr %a, i8 %b seq_cst +; ret i8 %1 +;} + +;define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i8 %b monotonic +; ret i8 %1 +;} +; +;define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i8 %b acquire +; ret i8 %1 +;} +; +;define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i8 %b release +; ret i8 %1 +;} +; +;define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i8 %b acq_rel +; ret i8 %1 +;} +; +;define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i8 %b seq_cst +; ret i8 %1 +;} + +;define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i8 %b monotonic +; ret i8 %1 +;} +; +;define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i8 %b acquire +; ret i8 %1 +;} +; +;define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i8 %b release +; ret i8 %1 +;} +; +;define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i8 %b acq_rel +; ret i8 %1 +;} +; +;define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i8 %b seq_cst +; ret i8 %1 +;} + +define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI35_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: l32i.n a12, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: and a14, a13, a10 +; XTENSA-ATOMIC-NEXT: .LBB35_1: # =>This Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: # Child Loop BB35_2 Depth 2 +; XTENSA-ATOMIC-NEXT: mov.n a13, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: .LBB35_2: # Parent Loop BB35_1 Depth=1 +; XTENSA-ATOMIC-NEXT: # => This Inner Loop Header: Depth=2 +; XTENSA-ATOMIC-NEXT: mov.n a15, a7 +; XTENSA-ATOMIC-NEXT: or a14, a12, a15 +; XTENSA-ATOMIC-NEXT: or a7, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a9, 0 +; XTENSA-ATOMIC-NEXT: beq a7, a14, .LBB35_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # in Loop: Header=BB35_2 Depth=2 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB35_2 +; XTENSA-ATOMIC-NEXT: .LBB35_4: # in Loop: Header=BB35_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a14, a10 +; XTENSA-ATOMIC-NEXT: bne a14, a13, .LBB35_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: sext a2, a8, 15 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i16 %b monotonic + ret i16 %1 +} + +define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI36_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: l32i.n a12, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: and a14, a13, a10 +; XTENSA-ATOMIC-NEXT: .LBB36_1: # =>This Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: # Child Loop BB36_2 Depth 2 +; XTENSA-ATOMIC-NEXT: mov.n a13, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: .LBB36_2: # Parent Loop BB36_1 Depth=1 +; XTENSA-ATOMIC-NEXT: # => This Inner Loop Header: Depth=2 +; XTENSA-ATOMIC-NEXT: mov.n a15, a7 +; XTENSA-ATOMIC-NEXT: or a14, a12, a15 +; XTENSA-ATOMIC-NEXT: or a7, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a9, 0 +; XTENSA-ATOMIC-NEXT: beq a7, a14, .LBB36_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # in Loop: Header=BB36_2 Depth=2 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB36_2 +; XTENSA-ATOMIC-NEXT: .LBB36_4: # in Loop: Header=BB36_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a14, a10 +; XTENSA-ATOMIC-NEXT: bne a14, a13, .LBB36_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: sext a2, a8, 15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i16 %b acquire + ret i16 %1 +} + +define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI37_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: l32i.n a12, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: and a14, a13, a10 +; XTENSA-ATOMIC-NEXT: .LBB37_1: # =>This Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: # Child Loop BB37_2 Depth 2 +; XTENSA-ATOMIC-NEXT: mov.n a13, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: .LBB37_2: # Parent Loop BB37_1 Depth=1 +; XTENSA-ATOMIC-NEXT: # => This Inner Loop Header: Depth=2 +; XTENSA-ATOMIC-NEXT: mov.n a15, a7 +; XTENSA-ATOMIC-NEXT: or a14, a12, a15 +; XTENSA-ATOMIC-NEXT: or a7, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a9, 0 +; XTENSA-ATOMIC-NEXT: beq a7, a14, .LBB37_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # in Loop: Header=BB37_2 Depth=2 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB37_2 +; XTENSA-ATOMIC-NEXT: .LBB37_4: # in Loop: Header=BB37_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a14, a10 +; XTENSA-ATOMIC-NEXT: bne a14, a13, .LBB37_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: sext a2, a8, 15 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i16 %b release + ret i16 %1 +} + +define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI38_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: l32i.n a12, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: and a14, a13, a10 +; XTENSA-ATOMIC-NEXT: .LBB38_1: # =>This Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: # Child Loop BB38_2 Depth 2 +; XTENSA-ATOMIC-NEXT: mov.n a13, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: .LBB38_2: # Parent Loop BB38_1 Depth=1 +; XTENSA-ATOMIC-NEXT: # => This Inner Loop Header: Depth=2 +; XTENSA-ATOMIC-NEXT: mov.n a15, a7 +; XTENSA-ATOMIC-NEXT: or a14, a12, a15 +; XTENSA-ATOMIC-NEXT: or a7, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a9, 0 +; XTENSA-ATOMIC-NEXT: beq a7, a14, .LBB38_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # in Loop: Header=BB38_2 Depth=2 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB38_2 +; XTENSA-ATOMIC-NEXT: .LBB38_4: # in Loop: Header=BB38_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a14, a10 +; XTENSA-ATOMIC-NEXT: bne a14, a13, .LBB38_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: sext a2, a8, 15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i16 %b acq_rel + ret i16 %1 +} + +define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i16_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI39_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: sll a10, a10 +; XTENSA-ATOMIC-NEXT: xor a11, a10, a11 +; XTENSA-ATOMIC-NEXT: l32i.n a12, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: and a14, a13, a10 +; XTENSA-ATOMIC-NEXT: .LBB39_1: # =>This Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: # Child Loop BB39_2 Depth 2 +; XTENSA-ATOMIC-NEXT: mov.n a13, a14 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: .LBB39_2: # Parent Loop BB39_1 Depth=1 +; XTENSA-ATOMIC-NEXT: # => This Inner Loop Header: Depth=2 +; XTENSA-ATOMIC-NEXT: mov.n a15, a7 +; XTENSA-ATOMIC-NEXT: or a14, a12, a15 +; XTENSA-ATOMIC-NEXT: or a7, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a7, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a14, a9, 0 +; XTENSA-ATOMIC-NEXT: beq a7, a14, .LBB39_4 +; XTENSA-ATOMIC-NEXT: # %bb.3: # in Loop: Header=BB39_2 Depth=2 +; XTENSA-ATOMIC-NEXT: and a7, a14, a11 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB39_2 +; XTENSA-ATOMIC-NEXT: .LBB39_4: # in Loop: Header=BB39_1 Depth=1 +; XTENSA-ATOMIC-NEXT: and a14, a14, a10 +; XTENSA-ATOMIC-NEXT: bne a14, a13, .LBB39_1 +; XTENSA-ATOMIC-NEXT: # %bb.5: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: sext a2, a8, 15 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i16 %b seq_cst + ret i16 %1 +} + +define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI40_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: add.n a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB40_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i16 %b monotonic + ret i16 %1 +} + +define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI41_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: add.n a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB41_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i16 %b acquire + ret i16 %1 +} + +define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI42_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: add.n a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB42_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i16 %b release + ret i16 %1 +} + +define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI43_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: add.n a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB43_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i16 %b acq_rel + ret i16 %1 +} + +define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i16_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI44_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: add.n a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB44_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i16 %b seq_cst + ret i16 %1 +} + +define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI45_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB45_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i16 %b monotonic + ret i16 %1 +} + +define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI46_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB46_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB46_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i16 %b acquire + ret i16 %1 +} + +define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI47_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB47_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB47_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i16 %b release + ret i16 %1 +} + +define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI48_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB48_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i16 %b acq_rel + ret i16 %1 +} + +define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i16_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI49_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: sub a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB49_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i16 %b seq_cst + ret i16 %1 +} + +define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI50_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: and a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB50_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i16 %b monotonic + ret i16 %1 +} + +define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI51_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB51_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: and a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB51_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i16 %b acquire + ret i16 %1 +} + +define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI52_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: and a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB52_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i16 %b release + ret i16 %1 +} + +define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI53_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB53_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: and a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB53_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i16 %b acq_rel + ret i16 %1 +} + +define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i16_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI54_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB54_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: and a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB54_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i16 %b seq_cst + ret i16 %1 +} + +define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI55_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a14, a13 +; XTENSA-ATOMIC-NEXT: and a13, a14, a11 +; XTENSA-ATOMIC-NEXT: and a13, a13, a12 +; XTENSA-ATOMIC-NEXT: and a13, a13, a11 +; XTENSA-ATOMIC-NEXT: xor a15, a14, a11 +; XTENSA-ATOMIC-NEXT: or a15, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a13, a15 +; XTENSA-ATOMIC-NEXT: bne a15, a14, .LBB55_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a13 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw nand ptr %a, i16 %b monotonic + ret i16 %1 +} + +define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI56_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a14, a13 +; XTENSA-ATOMIC-NEXT: and a13, a14, a11 +; XTENSA-ATOMIC-NEXT: and a13, a13, a12 +; XTENSA-ATOMIC-NEXT: and a13, a13, a11 +; XTENSA-ATOMIC-NEXT: xor a15, a14, a11 +; XTENSA-ATOMIC-NEXT: or a15, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a13, a15 +; XTENSA-ATOMIC-NEXT: bne a15, a14, .LBB56_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a13 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw nand ptr %a, i16 %b acquire + ret i16 %1 +} + +define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI57_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a14, a13 +; XTENSA-ATOMIC-NEXT: and a13, a14, a11 +; XTENSA-ATOMIC-NEXT: and a13, a13, a12 +; XTENSA-ATOMIC-NEXT: and a13, a13, a11 +; XTENSA-ATOMIC-NEXT: xor a15, a14, a11 +; XTENSA-ATOMIC-NEXT: or a15, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a13, a15 +; XTENSA-ATOMIC-NEXT: bne a15, a14, .LBB57_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a13 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw nand ptr %a, i16 %b release + ret i16 %1 +} + +define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI58_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a14, a13 +; XTENSA-ATOMIC-NEXT: and a13, a14, a11 +; XTENSA-ATOMIC-NEXT: and a13, a13, a12 +; XTENSA-ATOMIC-NEXT: and a13, a13, a11 +; XTENSA-ATOMIC-NEXT: xor a15, a14, a11 +; XTENSA-ATOMIC-NEXT: or a15, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a13, a15 +; XTENSA-ATOMIC-NEXT: bne a15, a14, .LBB58_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a13 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw nand ptr %a, i16 %b acq_rel + ret i16 %1 +} + +define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_nand_i16_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI59_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: l32i.n a13, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a12, a3 +; XTENSA-ATOMIC-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a14, a13 +; XTENSA-ATOMIC-NEXT: and a13, a14, a11 +; XTENSA-ATOMIC-NEXT: and a13, a13, a12 +; XTENSA-ATOMIC-NEXT: and a13, a13, a11 +; XTENSA-ATOMIC-NEXT: xor a15, a14, a11 +; XTENSA-ATOMIC-NEXT: or a15, a13, a15 +; XTENSA-ATOMIC-NEXT: wsr a14, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a15, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a13, a15 +; XTENSA-ATOMIC-NEXT: bne a15, a14, .LBB59_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a13 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw nand ptr %a, i16 %b seq_cst + ret i16 %1 +} + +define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI60_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: or a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB60_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i16 %b monotonic + ret i16 %1 +} + +define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI61_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: or a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB61_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i16 %b acquire + ret i16 %1 +} + +define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI62_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: or a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB62_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i16 %b release + ret i16 %1 +} + +define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI63_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: or a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB63_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i16 %b acq_rel + ret i16 %1 +} + +define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i16_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI64_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: or a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB64_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i16 %b seq_cst + ret i16 %1 +} + +define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI65_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB65_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: xor a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB65_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i16 %b monotonic + ret i16 %1 +} + +define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI66_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB66_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: xor a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB66_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i16 %b acquire + ret i16 %1 +} + +define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI67_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB67_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: xor a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB67_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i16 %b release + ret i16 %1 +} + +define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI68_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB68_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: xor a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB68_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i16 %b acq_rel + ret i16 %1 +} + +define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i16_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI69_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 3 +; XTENSA-ATOMIC-NEXT: and a8, a8, a2 +; XTENSA-ATOMIC-NEXT: sub a9, a2, a8 +; XTENSA-ATOMIC-NEXT: slli a8, a8, 3 +; XTENSA-ATOMIC-NEXT: movi.n a10, 1 +; XTENSA-ATOMIC-NEXT: slli a10, a10, 16 +; XTENSA-ATOMIC-NEXT: addi.n a10, a10, -1 +; XTENSA-ATOMIC-NEXT: ssl a8 +; XTENSA-ATOMIC-NEXT: movi.n a12, -1 +; XTENSA-ATOMIC-NEXT: sll a11, a10 +; XTENSA-ATOMIC-NEXT: xor a12, a11, a12 +; XTENSA-ATOMIC-NEXT: l32i.n a14, a9, 0 +; XTENSA-ATOMIC-NEXT: sll a13, a3 +; XTENSA-ATOMIC-NEXT: .LBB69_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a15, a14 +; XTENSA-ATOMIC-NEXT: and a14, a15, a11 +; XTENSA-ATOMIC-NEXT: xor a14, a14, a13 +; XTENSA-ATOMIC-NEXT: and a14, a14, a11 +; XTENSA-ATOMIC-NEXT: and a7, a15, a12 +; XTENSA-ATOMIC-NEXT: or a7, a14, a7 +; XTENSA-ATOMIC-NEXT: wsr a15, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a7, a9, 0 +; XTENSA-ATOMIC-NEXT: mov.n a14, a7 +; XTENSA-ATOMIC-NEXT: bne a7, a15, .LBB69_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: ssr a8 +; XTENSA-ATOMIC-NEXT: srl a8, a14 +; XTENSA-ATOMIC-NEXT: and a2, a8, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i16 %b seq_cst + ret i16 %1 +} + +;define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw max ptr %a, i16 %b monotonic +; ret i16 %1 +;} +; +;define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw max ptr %a, i16 %b acquire +; ret i16 %1 +;} +; +;define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw max ptr %a, i16 %b release +; ret i16 %1 +;} +; +;define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw max ptr %a, i16 %b acq_rel +; ret i16 %1 +;} +; +;define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw max ptr %a, i16 %b seq_cst +; ret i16 %1 +;} + +;define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw min ptr %a, i16 %b monotonic +; ret i16 %1 +;} +; +;define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw min ptr %a, i16 %b acquire +; ret i16 %1 +;} +; +;define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw min ptr %a, i16 %b release +; ret i16 %1 +;} +; +;define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw min ptr %a, i16 %b acq_rel +; ret i16 %1 +;} +; +;define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw min ptr %a, i16 %b seq_cst +; ret i16 %1 +;} + +;define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i16 %b monotonic +; ret i16 %1 +;} +; +;define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i16 %b acquire +; ret i16 %1 +;} +; +;define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i16 %b release +; ret i16 %1 +;} +; +;define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i16 %b acq_rel +; ret i16 %1 +;} +; +;define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i16 %b seq_cst +; ret i16 %1 +;} + +;define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i16 %b monotonic +; ret i16 %1 +;} +; +;define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i16 %b acquire +; ret i16 %1 +;} +; +;define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i16 %b release +; ret i16 %1 +;} +; +;define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i16 %b acq_rel +; ret i16 %1 +;} +; +;define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i16 %b seq_cst +; ret i16 %1 +;} + +define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI70_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB70_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a8, a9 +; XTENSA-ATOMIC-NEXT: wsr a8, scompare1 +; XTENSA-ATOMIC-NEXT: mov.n a9, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a9, a2, 0 +; XTENSA-ATOMIC-NEXT: bne a9, a8, .LBB70_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i32 %b monotonic + ret i32 %1 +} + +define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI71_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB71_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a8, a9 +; XTENSA-ATOMIC-NEXT: wsr a8, scompare1 +; XTENSA-ATOMIC-NEXT: mov.n a9, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a9, a2, 0 +; XTENSA-ATOMIC-NEXT: bne a9, a8, .LBB71_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i32 %b acquire + ret i32 %1 +} + +define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI72_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB72_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a8, a9 +; XTENSA-ATOMIC-NEXT: wsr a8, scompare1 +; XTENSA-ATOMIC-NEXT: mov.n a9, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a9, a2, 0 +; XTENSA-ATOMIC-NEXT: bne a9, a8, .LBB72_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i32 %b release + ret i32 %1 +} + +define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI73_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB73_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a8, a9 +; XTENSA-ATOMIC-NEXT: wsr a8, scompare1 +; XTENSA-ATOMIC-NEXT: mov.n a9, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a9, a2, 0 +; XTENSA-ATOMIC-NEXT: bne a9, a8, .LBB73_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i32 %b acq_rel + ret i32 %1 +} + +define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xchg_i32_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI74_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB74_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a8, a9 +; XTENSA-ATOMIC-NEXT: wsr a8, scompare1 +; XTENSA-ATOMIC-NEXT: mov.n a9, a3 +; XTENSA-ATOMIC-NEXT: s32c1i a9, a2, 0 +; XTENSA-ATOMIC-NEXT: bne a9, a8, .LBB74_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xchg ptr %a, i32 %b seq_cst + ret i32 %1 +} + +define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI75_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB75_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: add.n a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB75_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i32 %b monotonic + ret i32 %1 +} + +define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI76_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB76_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: add.n a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB76_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i32 %b acquire + ret i32 %1 +} + +define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI77_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB77_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: add.n a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB77_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i32 %b release + ret i32 %1 +} + +define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI78_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB78_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: add.n a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB78_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i32 %b acq_rel + ret i32 %1 +} + +define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_add_i32_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI79_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB79_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: add.n a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB79_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw add ptr %a, i32 %b seq_cst + ret i32 %1 +} + +define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI80_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB80_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: sub a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB80_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i32 %b monotonic + ret i32 %1 +} + +define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI81_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB81_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: sub a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB81_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i32 %b acquire + ret i32 %1 +} + +define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI82_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB82_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: sub a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB82_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i32 %b release + ret i32 %1 +} + +define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI83_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB83_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: sub a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB83_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i32 %b acq_rel + ret i32 %1 +} + +define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_sub_i32_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI84_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB84_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: sub a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB84_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw sub ptr %a, i32 %b seq_cst + ret i32 %1 +} + +define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI85_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB85_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: and a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB85_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i32 %b monotonic + ret i32 %1 +} + +define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI86_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB86_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: and a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB86_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i32 %b acquire + ret i32 %1 +} + +define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI87_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB87_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: and a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB87_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i32 %b release + ret i32 %1 +} + +define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI88_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB88_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: and a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB88_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i32 %b acq_rel + ret i32 %1 +} + +define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_and_i32_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI89_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB89_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: and a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB89_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw and ptr %a, i32 %b seq_cst + ret i32 %1 +} + +;define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw nand ptr %a, i32 %b monotonic +; ret i32 %1 +;} +; +;define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw nand ptr %a, i32 %b acquire +; ret i32 %1 +;} +; +;define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw nand ptr %a, i32 %b release +; ret i32 %1 +;} +; +;define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw nand ptr %a, i32 %b acq_rel +; ret i32 %1 +;} +; +;define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw nand ptr %a, i32 %b seq_cst +; ret i32 %1 +;} + +define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI90_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: or a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB90_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i32 %b monotonic + ret i32 %1 +} + +define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI91_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: or a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB91_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i32 %b acquire + ret i32 %1 +} + +define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI92_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: or a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB92_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i32 %b release + ret i32 %1 +} + +define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI93_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: or a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB93_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i32 %b acq_rel + ret i32 %1 +} + +define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_or_i32_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI94_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: or a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB94_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw or ptr %a, i32 %b seq_cst + ret i32 %1 +} + +define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI95_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: xor a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB95_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i32 %b monotonic + ret i32 %1 +} + +define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 2 +; XTENSA-NEXT: l32r a8, .LCPI96_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: xor a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB96_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i32 %b acquire + ret i32 %1 +} + +define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI97_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: xor a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB97_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i32 %b release + ret i32 %1 +} + +define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_acq_rel: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 4 +; XTENSA-NEXT: l32r a8, .LCPI98_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_acq_rel: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: xor a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB98_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i32 %b acq_rel + ret i32 %1 +} + +define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind { +; XTENSA-LABEL: atomicrmw_xor_i32_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI99_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a11, a3 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a9, a2, 0 +; XTENSA-ATOMIC-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a9 +; XTENSA-ATOMIC-NEXT: xor a8, a10, a3 +; XTENSA-ATOMIC-NEXT: wsr a10, scompare1 +; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: mov.n a9, a8 +; XTENSA-ATOMIC-NEXT: bne a10, a8, .LBB99_1 +; XTENSA-ATOMIC-NEXT: # %bb.2: +; XTENSA-ATOMIC-NEXT: mov.n a2, a8 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %1 = atomicrmw xor ptr %a, i32 %b seq_cst + ret i32 %1 +} + +;define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw max ptr %a, i32 %b monotonic +; ret i32 %1 +;} +; +;define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw max ptr %a, i32 %b acquire +; ret i32 %1 +;} +; +;define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw max ptr %a, i32 %b release +; ret i32 %1 +;} +; +;define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw max ptr %a, i32 %b acq_rel +; ret i32 %1 +;} +; +;define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw max ptr %a, i32 %b seq_cst +; ret i32 %1 +;} + +;define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw min ptr %a, i32 %b monotonic +; ret i32 %1 +;} +; +;define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw min ptr %a, i32 %b acquire +; ret i32 %1 +;} +; +;define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw min ptr %a, i32 %b release +; ret i32 %1 +;} +; +;define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw min ptr %a, i32 %b acq_rel +; ret i32 %1 +;} +; +;define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw min ptr %a, i32 %b seq_cst +; ret i32 %1 +;} + +;define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i32 %b monotonic +; ret i32 %1 +;} +; +;define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i32 %b acquire +; ret i32 %1 +;} +; +;define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i32 %b release +; ret i32 %1 +;} +; +;define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i32 %b acq_rel +; ret i32 %1 +;} +; +;define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw umax ptr %a, i32 %b seq_cst +; ret i32 %1 +;} +; +;define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i32 %b monotonic +; ret i32 %1 +;} +; +;define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i32 %b acquire +; ret i32 %1 +;} +; +;define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i32 %b release +; ret i32 %1 +;} +; +;define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i32 %b acq_rel +; ret i32 %1 +;} +; +;define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind { +; %1 = atomicrmw umin ptr %a, i32 %b seq_cst +; ret i32 %1 +;} diff --git a/llvm/test/CodeGen/Xtensa/atomicrmw.ll b/llvm/test/CodeGen/Xtensa/atomicrmw.ll deleted file mode 100644 index f2b7526e33e84..0000000000000 --- a/llvm/test/CodeGen/Xtensa/atomicrmw.ll +++ /dev/null @@ -1,103 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \ -; RUN: | FileCheck -check-prefix=CHECK-XTENSA %s - -define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind { -; CHECK-XTENSA-LABEL: atomicrmw_xchg_i8_seq_cst: -; CHECK-XTENSA: # %bb.0: -; CHECK-XTENSA-NEXT: entry a1, 32 -; CHECK-XTENSA-NEXT: memw -; CHECK-XTENSA-NEXT: movi.n a8, 3 -; CHECK-XTENSA-NEXT: and a8, a8, a2 -; CHECK-XTENSA-NEXT: sub a9, a2, a8 -; CHECK-XTENSA-NEXT: slli a8, a8, 3 -; CHECK-XTENSA-NEXT: movi a10, 255 -; CHECK-XTENSA-NEXT: ssl a8 -; CHECK-XTENSA-NEXT: movi.n a11, -1 -; CHECK-XTENSA-NEXT: sll a10, a10 -; CHECK-XTENSA-NEXT: xor a11, a10, a11 -; CHECK-XTENSA-NEXT: l32i.n a12, a9, 0 -; CHECK-XTENSA-NEXT: sll a12, a3 -; CHECK-XTENSA-NEXT: l32i.n a13, a9, 0 -; CHECK-XTENSA-NEXT: and a14, a13, a10 -; CHECK-XTENSA-NEXT: .LBB0_1: # =>This Loop Header: Depth=1 -; CHECK-XTENSA-NEXT: # Child Loop BB0_2 Depth 2 -; CHECK-XTENSA-NEXT: mov.n a13, a14 -; CHECK-XTENSA-NEXT: memw -; CHECK-XTENSA-NEXT: l32i.n a14, a9, 0 -; CHECK-XTENSA-NEXT: and a7, a14, a11 -; CHECK-XTENSA-NEXT: .LBB0_2: # Parent Loop BB0_1 Depth=1 -; CHECK-XTENSA-NEXT: # => This Inner Loop Header: Depth=2 -; CHECK-XTENSA-NEXT: mov.n a15, a7 -; CHECK-XTENSA-NEXT: or a14, a12, a15 -; CHECK-XTENSA-NEXT: or a7, a13, a15 -; CHECK-XTENSA-NEXT: wsr a7, scompare1 -; CHECK-XTENSA-NEXT: s32c1i a14, a9, 0 -; CHECK-XTENSA-NEXT: beq a7, a14, .LBB0_4 -; CHECK-XTENSA-NEXT: # %bb.3: # in Loop: Header=BB0_2 Depth=2 -; CHECK-XTENSA-NEXT: and a7, a14, a11 -; CHECK-XTENSA-NEXT: bne a7, a15, .LBB0_2 -; CHECK-XTENSA-NEXT: .LBB0_4: # in Loop: Header=BB0_1 Depth=1 -; CHECK-XTENSA-NEXT: and a14, a14, a10 -; CHECK-XTENSA-NEXT: bne a14, a13, .LBB0_1 -; CHECK-XTENSA-NEXT: # %bb.5: -; CHECK-XTENSA-NEXT: ssr a8 -; CHECK-XTENSA-NEXT: srl a8, a14 -; CHECK-XTENSA-NEXT: sext a2, a8, 7 -; CHECK-XTENSA-NEXT: memw -; CHECK-XTENSA-NEXT: retw.n - - %1 = atomicrmw xchg i8* %a, i8 %b seq_cst - ret i8 %1 -} - -define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind { -; CHECK-XTENSA-LABEL: atomicrmw_xchg_i16_seq_cst: -; CHECK-XTENSA: # %bb.0: -; CHECK-XTENSA-NEXT: entry a1, 32 -; CHECK-XTENSA-NEXT: memw -; CHECK-XTENSA-NEXT: movi.n a8, 3 -; CHECK-XTENSA-NEXT: and a8, a8, a2 -; CHECK-XTENSA-NEXT: sub a9, a2, a8 -; CHECK-XTENSA-NEXT: slli a8, a8, 3 -; CHECK-XTENSA-NEXT: movi.n a10, 1 -; CHECK-XTENSA-NEXT: slli a10, a10, 16 -; CHECK-XTENSA-NEXT: addi.n a10, a10, -1 -; CHECK-XTENSA-NEXT: ssl a8 -; CHECK-XTENSA-NEXT: movi.n a11, -1 -; CHECK-XTENSA-NEXT: sll a10, a10 -; CHECK-XTENSA-NEXT: xor a11, a10, a11 -; CHECK-XTENSA-NEXT: l32i.n a12, a9, 0 -; CHECK-XTENSA-NEXT: sll a12, a3 -; CHECK-XTENSA-NEXT: l32i.n a13, a9, 0 -; CHECK-XTENSA-NEXT: and a14, a13, a10 -; CHECK-XTENSA-NEXT: .LBB1_1: # =>This Loop Header: Depth=1 -; CHECK-XTENSA-NEXT: # Child Loop BB1_2 Depth 2 -; CHECK-XTENSA-NEXT: mov.n a13, a14 -; CHECK-XTENSA-NEXT: memw -; CHECK-XTENSA-NEXT: l32i.n a14, a9, 0 -; CHECK-XTENSA-NEXT: and a7, a14, a11 -; CHECK-XTENSA-NEXT: .LBB1_2: # Parent Loop BB1_1 Depth=1 -; CHECK-XTENSA-NEXT: # => This Inner Loop Header: Depth=2 -; CHECK-XTENSA-NEXT: mov.n a15, a7 -; CHECK-XTENSA-NEXT: or a14, a12, a15 -; CHECK-XTENSA-NEXT: or a7, a13, a15 -; CHECK-XTENSA-NEXT: wsr a7, scompare1 -; CHECK-XTENSA-NEXT: s32c1i a14, a9, 0 -; CHECK-XTENSA-NEXT: beq a7, a14, .LBB1_4 -; CHECK-XTENSA-NEXT: # %bb.3: # in Loop: Header=BB1_2 Depth=2 -; CHECK-XTENSA-NEXT: and a7, a14, a11 -; CHECK-XTENSA-NEXT: bne a7, a15, .LBB1_2 -; CHECK-XTENSA-NEXT: .LBB1_4: # in Loop: Header=BB1_1 Depth=1 -; CHECK-XTENSA-NEXT: and a14, a14, a10 -; CHECK-XTENSA-NEXT: bne a14, a13, .LBB1_1 -; CHECK-XTENSA-NEXT: # %bb.5: -; CHECK-XTENSA-NEXT: ssr a8 -; CHECK-XTENSA-NEXT: srl a8, a14 -; CHECK-XTENSA-NEXT: sext a2, a8, 15 -; CHECK-XTENSA-NEXT: memw -; CHECK-XTENSA-NEXT: retw.n - - %1 = atomicrmw xchg i16* %a, i16 %b seq_cst - ret i16 %1 -} diff --git a/llvm/test/CodeGen/Xtensa/forced-atomics.ll b/llvm/test/CodeGen/Xtensa/forced-atomics.ll new file mode 100644 index 0000000000000..e11ae5329b091 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/forced-atomics.ll @@ -0,0 +1,1005 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=xtensa -mcpu=esp32s2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=XTENSA +; RUN: llc -mtriple=xtensa -mcpu=esp32s2 -mattr=+forced-atomics -verify-machineinstrs < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC + +define i8 @load8(ptr %p) nounwind { +; XTENSA-LABEL: load8: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: load8: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = load atomic i8, ptr %p seq_cst, align 1 + ret i8 %v +} + +define void @store8(ptr %p) nounwind { +; XTENSA-LABEL: store8: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI1_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: store8: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 0 +; XTENSA-ATOMIC-NEXT: s8i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i8 0, ptr %p seq_cst, align 1 + ret void +} + +define i8 @rmw8(ptr %p) nounwind { +; XTENSA-LABEL: rmw8: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 1 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI2_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw8: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI2_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw add ptr %p, i8 1 seq_cst, align 1 + ret i8 %v +} + +define i8 @cmpxchg8(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg8: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: movi.n a8, 0 +; XTENSA-NEXT: s8i a8, a1, 3 +; XTENSA-NEXT: addi a11, a1, 3 +; XTENSA-NEXT: movi.n a12, 1 +; XTENSA-NEXT: movi.n a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI3_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a14, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l8ui a2, a1, 3 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: cmpxchg8: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 0 +; XTENSA-ATOMIC-NEXT: movi.n a12, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI3_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %res = cmpxchg ptr %p, i8 0, i8 1 seq_cst seq_cst + %res.0 = extractvalue { i8, i1 } %res, 0 + ret i8 %res.0 +} + +define i16 @load16(ptr %p) nounwind { +; XTENSA-LABEL: load16: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI4_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: load16: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = load atomic i16, ptr %p seq_cst, align 2 + ret i16 %v +} + +define void @store16(ptr %p) nounwind { +; XTENSA-LABEL: store16: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI5_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: store16: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 0 +; XTENSA-ATOMIC-NEXT: s16i a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i16 0, ptr %p seq_cst, align 2 + ret void +} + +define i16 @rmw16(ptr %p) nounwind { +; XTENSA-LABEL: rmw16: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 1 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI6_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw16: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI6_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw add ptr %p, i16 1 seq_cst, align 2 + ret i16 %v +} + +define i16 @cmpxchg16(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg16: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: movi.n a8, 0 +; XTENSA-NEXT: s16i a8, a1, 2 +; XTENSA-NEXT: addi a11, a1, 2 +; XTENSA-NEXT: movi.n a12, 1 +; XTENSA-NEXT: movi.n a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI7_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a14, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l16ui a2, a1, 2 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: cmpxchg16: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 0 +; XTENSA-ATOMIC-NEXT: movi.n a12, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI7_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %res = cmpxchg ptr %p, i16 0, i16 1 seq_cst seq_cst + %res.0 = extractvalue { i16, i1 } %res, 0 + ret i16 %res.0 +} + +define i32 @load32_unordered(ptr %p) nounwind { +; XTENSA-LABEL: load32_unordered: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI8_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: load32_unordered: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + %v = load atomic i32, ptr %p unordered, align 4 + ret i32 %v +} + +define i32 @load32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: load32_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI9_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: load32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a2, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + %v = load atomic i32, ptr %p monotonic, align 4 + ret i32 %v +} + +define i32 @load32_acquire(ptr %p) nounwind { +; XTENSA-LABEL: load32_acquire: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 2 +; XTENSA-NEXT: l32r a8, .LCPI10_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: load32_acquire: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = load atomic i32, ptr %p acquire, align 4 + ret i32 %v +} + +define i32 @load32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: load32_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 5 +; XTENSA-NEXT: l32r a8, .LCPI11_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: load32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: l32i.n a2, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = load atomic i32, ptr %p seq_cst, align 4 + ret i32 %v +} + +define void @store32_unordered(ptr %p) nounwind { +; XTENSA-LABEL: store32_unordered: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI12_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a12, a11 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: store32_unordered: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 0 +; XTENSA-ATOMIC-NEXT: s32i.n a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i32 0, ptr %p unordered, align 4 + ret void +} + +define void @store32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: store32_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: l32r a8, .LCPI13_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a12, a11 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: store32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a8, 0 +; XTENSA-ATOMIC-NEXT: s32i.n a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i32 0, ptr %p monotonic, align 4 + ret void +} + +define void @store32_release(ptr %p) nounwind { +; XTENSA-LABEL: store32_release: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: movi.n a12, 3 +; XTENSA-NEXT: l32r a8, .LCPI14_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: store32_release: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 0 +; XTENSA-ATOMIC-NEXT: s32i.n a8, a2, 0 +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i32 0, ptr %p release, align 4 + ret void +} + +define void @store32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: store32_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI15_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: store32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a8, 0 +; XTENSA-ATOMIC-NEXT: s32i.n a8, a2, 0 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + store atomic i32 0, ptr %p seq_cst, align 4 + ret void +} + +define i32 @rmw32_add_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_add_monotonic: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 1 +; XTENSA-NEXT: movi.n a12, 0 +; XTENSA-NEXT: l32r a8, .LCPI16_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_add_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI16_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw add ptr %p, i32 1 monotonic, align 4 + ret i32 %v +} + +define i32 @rmw32_add_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_add_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 1 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI17_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_add_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI17_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw add ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_sub_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_sub_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 1 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI18_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_sub_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, -1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI18_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw sub ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_and_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_and_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 1 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI19_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_and_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI19_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw and ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_nand_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_nand_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 1 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI20_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_nand_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI20_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw nand ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_or_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_or_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 1 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI21_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_or_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI21_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw or ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_xor_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_xor_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 1 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI22_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_xor_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI22_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw xor ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_max_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_max_seq_cst: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: mov.n a6, a2 +; XTENSA-NEXT: l32i.n a2, a6, 0 +; XTENSA-NEXT: movi.n a5, 1 +; XTENSA-NEXT: movi.n a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI23_0 +; XTENSA-NEXT: .LBB23_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i.n a2, a1, 0 +; XTENSA-NEXT: max a12, a2, a5 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: mov.n a10, a6 +; XTENSA-NEXT: mov.n a13, a7 +; XTENSA-NEXT: mov.n a14, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i.n a2, a1, 0 +; XTENSA-NEXT: beqz a10, .LBB23_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_max_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI23_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw max ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_min_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_min_seq_cst: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: mov.n a6, a2 +; XTENSA-NEXT: l32i.n a2, a6, 0 +; XTENSA-NEXT: movi.n a5, 1 +; XTENSA-NEXT: movi.n a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI24_0 +; XTENSA-NEXT: .LBB24_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i.n a2, a1, 0 +; XTENSA-NEXT: min a12, a2, a5 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: mov.n a10, a6 +; XTENSA-NEXT: mov.n a13, a7 +; XTENSA-NEXT: mov.n a14, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i.n a2, a1, 0 +; XTENSA-NEXT: beqz a10, .LBB24_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_min_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI24_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw min ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_umax_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_umax_seq_cst: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: mov.n a6, a2 +; XTENSA-NEXT: l32i.n a2, a6, 0 +; XTENSA-NEXT: movi.n a5, 1 +; XTENSA-NEXT: movi.n a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI25_0 +; XTENSA-NEXT: .LBB25_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i.n a2, a1, 0 +; XTENSA-NEXT: maxu a12, a2, a5 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: mov.n a10, a6 +; XTENSA-NEXT: mov.n a13, a7 +; XTENSA-NEXT: mov.n a14, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i.n a2, a1, 0 +; XTENSA-NEXT: beqz a10, .LBB25_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_umax_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI25_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw umax ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_umin_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_umin_seq_cst: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: mov.n a6, a2 +; XTENSA-NEXT: l32i.n a2, a6, 0 +; XTENSA-NEXT: movi.n a5, 1 +; XTENSA-NEXT: movi.n a7, 5 +; XTENSA-NEXT: l32r a4, .LCPI26_0 +; XTENSA-NEXT: .LBB26_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i.n a2, a1, 0 +; XTENSA-NEXT: minu a12, a2, a5 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: mov.n a10, a6 +; XTENSA-NEXT: mov.n a13, a7 +; XTENSA-NEXT: mov.n a14, a7 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: l32i.n a2, a1, 0 +; XTENSA-NEXT: beqz a10, .LBB26_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_umin_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI26_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw umin ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_xchg_seq_cst: +; XTENSA: entry a1, 32 +; XTENSA-NEXT: movi.n a11, 1 +; XTENSA-NEXT: movi.n a12, 5 +; XTENSA-NEXT: l32r a8, .LCPI27_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_xchg_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI27_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw xchg ptr %p, i32 1 seq_cst, align 4 + ret i32 %v +} + +define float @rmw32_fadd_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fadd_seq_cst: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: l32i.n a10, a2, 0 +; XTENSA-NEXT: l32r a7, .LCPI28_0 +; XTENSA-NEXT: l32r a5, .LCPI28_1 +; XTENSA-NEXT: movi.n a6, 5 +; XTENSA-NEXT: l32r a4, .LCPI28_2 +; XTENSA-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i.n a10, a1, 0 +; XTENSA-NEXT: mov.n a11, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: mov.n a12, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a13, a6 +; XTENSA-NEXT: mov.n a14, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: mov.n a8, a10 +; XTENSA-NEXT: l32i.n a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB28_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_fadd_seq_cst: +; XTENSA-ATOMIC: entry a1, 48 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a6, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a7, .LCPI28_0 +; XTENSA-ATOMIC-NEXT: l32r a5, .LCPI28_1 +; XTENSA-ATOMIC-NEXT: l32r a4, .LCPI28_2 +; XTENSA-ATOMIC-NEXT: movi.n a8, 0 +; XTENSA-ATOMIC-NEXT: s32i.n a8, a1, 0 +; XTENSA-ATOMIC-NEXT: movi.n a3, 1 +; XTENSA-ATOMIC-NEXT: j .LBB28_2 +; XTENSA-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a6, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB28_4 +; XTENSA-ATOMIC-NEXT: .LBB28_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a6 +; XTENSA-ATOMIC-NEXT: mov.n a11, a7 +; XTENSA-ATOMIC-NEXT: callx8 a5 +; XTENSA-ATOMIC-NEXT: mov.n a12, a10 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: mov.n a11, a6 +; XTENSA-ATOMIC-NEXT: callx8 a4 +; XTENSA-ATOMIC-NEXT: mov.n a8, a3 +; XTENSA-ATOMIC-NEXT: beq a10, a6, .LBB28_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32i.n a8, a1, 0 +; XTENSA-ATOMIC-NEXT: j .LBB28_1 +; XTENSA-ATOMIC-NEXT: .LBB28_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw fadd ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fsub_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fsub_seq_cst: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: l32i.n a10, a2, 0 +; XTENSA-NEXT: l32r a7, .LCPI29_0 +; XTENSA-NEXT: l32r a5, .LCPI29_1 +; XTENSA-NEXT: movi.n a6, 5 +; XTENSA-NEXT: l32r a4, .LCPI29_2 +; XTENSA-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i.n a10, a1, 0 +; XTENSA-NEXT: mov.n a11, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: mov.n a12, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a13, a6 +; XTENSA-NEXT: mov.n a14, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: mov.n a8, a10 +; XTENSA-NEXT: l32i.n a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB29_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_fsub_seq_cst: +; XTENSA-ATOMIC: entry a1, 48 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a6, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a7, .LCPI29_0 +; XTENSA-ATOMIC-NEXT: l32r a5, .LCPI29_1 +; XTENSA-ATOMIC-NEXT: l32r a4, .LCPI29_2 +; XTENSA-ATOMIC-NEXT: movi.n a8, 0 +; XTENSA-ATOMIC-NEXT: s32i.n a8, a1, 0 +; XTENSA-ATOMIC-NEXT: movi.n a3, 1 +; XTENSA-ATOMIC-NEXT: j .LBB29_2 +; XTENSA-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a6, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB29_4 +; XTENSA-ATOMIC-NEXT: .LBB29_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a6 +; XTENSA-ATOMIC-NEXT: mov.n a11, a7 +; XTENSA-ATOMIC-NEXT: callx8 a5 +; XTENSA-ATOMIC-NEXT: mov.n a12, a10 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: mov.n a11, a6 +; XTENSA-ATOMIC-NEXT: callx8 a4 +; XTENSA-ATOMIC-NEXT: mov.n a8, a3 +; XTENSA-ATOMIC-NEXT: beq a10, a6, .LBB29_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32i.n a8, a1, 0 +; XTENSA-ATOMIC-NEXT: j .LBB29_1 +; XTENSA-ATOMIC-NEXT: .LBB29_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw fsub ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fmin_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fmin_seq_cst: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: l32i.n a10, a2, 0 +; XTENSA-NEXT: l32r a7, .LCPI30_0 +; XTENSA-NEXT: l32r a5, .LCPI30_1 +; XTENSA-NEXT: movi.n a6, 5 +; XTENSA-NEXT: l32r a4, .LCPI30_2 +; XTENSA-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i.n a10, a1, 0 +; XTENSA-NEXT: mov.n a11, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: mov.n a12, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a13, a6 +; XTENSA-NEXT: mov.n a14, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: mov.n a8, a10 +; XTENSA-NEXT: l32i.n a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB30_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_fmin_seq_cst: +; XTENSA-ATOMIC: entry a1, 48 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a6, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a7, .LCPI30_0 +; XTENSA-ATOMIC-NEXT: l32r a5, .LCPI30_1 +; XTENSA-ATOMIC-NEXT: l32r a4, .LCPI30_2 +; XTENSA-ATOMIC-NEXT: movi.n a8, 0 +; XTENSA-ATOMIC-NEXT: s32i.n a8, a1, 0 +; XTENSA-ATOMIC-NEXT: movi.n a3, 1 +; XTENSA-ATOMIC-NEXT: j .LBB30_2 +; XTENSA-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a6, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB30_4 +; XTENSA-ATOMIC-NEXT: .LBB30_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a6 +; XTENSA-ATOMIC-NEXT: mov.n a11, a7 +; XTENSA-ATOMIC-NEXT: callx8 a5 +; XTENSA-ATOMIC-NEXT: mov.n a12, a10 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: mov.n a11, a6 +; XTENSA-ATOMIC-NEXT: callx8 a4 +; XTENSA-ATOMIC-NEXT: mov.n a8, a3 +; XTENSA-ATOMIC-NEXT: beq a10, a6, .LBB30_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32i.n a8, a1, 0 +; XTENSA-ATOMIC-NEXT: j .LBB30_1 +; XTENSA-ATOMIC-NEXT: .LBB30_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw fmin ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define float @rmw32_fmax_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: rmw32_fmax_seq_cst: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: l32i.n a10, a2, 0 +; XTENSA-NEXT: l32r a7, .LCPI31_0 +; XTENSA-NEXT: l32r a5, .LCPI31_1 +; XTENSA-NEXT: movi.n a6, 5 +; XTENSA-NEXT: l32r a4, .LCPI31_2 +; XTENSA-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-NEXT: s32i.n a10, a1, 0 +; XTENSA-NEXT: mov.n a11, a7 +; XTENSA-NEXT: callx8 a5 +; XTENSA-NEXT: mov.n a12, a10 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a13, a6 +; XTENSA-NEXT: mov.n a14, a6 +; XTENSA-NEXT: callx8 a4 +; XTENSA-NEXT: mov.n a8, a10 +; XTENSA-NEXT: l32i.n a10, a1, 0 +; XTENSA-NEXT: beqz a8, .LBB31_1 +; XTENSA-NEXT: # %bb.2: # %atomicrmw.end +; XTENSA-NEXT: mov.n a2, a10 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: rmw32_fmax_seq_cst: +; XTENSA-ATOMIC: entry a1, 48 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: l32i.n a6, a2, 0 +; XTENSA-ATOMIC-NEXT: l32r a7, .LCPI31_0 +; XTENSA-ATOMIC-NEXT: l32r a5, .LCPI31_1 +; XTENSA-ATOMIC-NEXT: l32r a4, .LCPI31_2 +; XTENSA-ATOMIC-NEXT: movi.n a8, 0 +; XTENSA-ATOMIC-NEXT: s32i.n a8, a1, 0 +; XTENSA-ATOMIC-NEXT: movi.n a3, 1 +; XTENSA-ATOMIC-NEXT: j .LBB31_2 +; XTENSA-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a6, a10 +; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB31_4 +; XTENSA-ATOMIC-NEXT: .LBB31_2: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 +; XTENSA-ATOMIC-NEXT: mov.n a10, a6 +; XTENSA-ATOMIC-NEXT: mov.n a11, a7 +; XTENSA-ATOMIC-NEXT: callx8 a5 +; XTENSA-ATOMIC-NEXT: mov.n a12, a10 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: mov.n a11, a6 +; XTENSA-ATOMIC-NEXT: callx8 a4 +; XTENSA-ATOMIC-NEXT: mov.n a8, a3 +; XTENSA-ATOMIC-NEXT: beq a10, a6, .LBB31_1 +; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start +; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1 +; XTENSA-ATOMIC-NEXT: l32i.n a8, a1, 0 +; XTENSA-ATOMIC-NEXT: j .LBB31_1 +; XTENSA-ATOMIC-NEXT: .LBB31_4: # %atomicrmw.end +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %v = atomicrmw fmax ptr %p, float 1.0 seq_cst, align 4 + ret float %v +} + +define i32 @cmpxchg32_monotonic(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg32_monotonic: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: movi.n a13, 0 +; XTENSA-NEXT: s32i.n a13, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi.n a12, 1 +; XTENSA-NEXT: l32r a8, .LCPI32_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a14, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l32i.n a2, a1, 0 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: cmpxchg32_monotonic: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: movi.n a11, 0 +; XTENSA-ATOMIC-NEXT: movi.n a12, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI32_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: retw.n + %res = cmpxchg ptr %p, i32 0, i32 1 monotonic monotonic + %res.0 = extractvalue { i32, i1 } %res, 0 + ret i32 %res.0 +} + +define i32 @cmpxchg32_seq_cst(ptr %p) nounwind { +; XTENSA-LABEL: cmpxchg32_seq_cst: +; XTENSA: entry a1, 48 +; XTENSA-NEXT: movi.n a8, 0 +; XTENSA-NEXT: s32i.n a8, a1, 0 +; XTENSA-NEXT: addi a11, a1, 0 +; XTENSA-NEXT: movi.n a12, 1 +; XTENSA-NEXT: movi.n a13, 5 +; XTENSA-NEXT: l32r a8, .LCPI33_0 +; XTENSA-NEXT: mov.n a10, a2 +; XTENSA-NEXT: mov.n a14, a13 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l32i.n a2, a1, 0 +; XTENSA-NEXT: retw.n +; +; XTENSA-ATOMIC-LABEL: cmpxchg32_seq_cst: +; XTENSA-ATOMIC: entry a1, 32 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: movi.n a11, 0 +; XTENSA-ATOMIC-NEXT: movi.n a12, 1 +; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI33_0 +; XTENSA-ATOMIC-NEXT: mov.n a10, a2 +; XTENSA-ATOMIC-NEXT: callx8 a8 +; XTENSA-ATOMIC-NEXT: mov.n a2, a10 +; XTENSA-ATOMIC-NEXT: memw +; XTENSA-ATOMIC-NEXT: retw.n + %res = cmpxchg ptr %p, i32 0, i32 1 seq_cst seq_cst + %res.0 = extractvalue { i32, i1 } %res, 0 + ret i32 %res.0 +} From ae29ede3f41a1228d2b2ca13762292745f489cb9 Mon Sep 17 00:00:00 2001 From: "chen.qian" Date: Tue, 15 Oct 2024 10:43:01 +0800 Subject: [PATCH 264/289] [Test] add SplitLoopByLength test cases --- .../RISCV/RISCVSplitLoopByLength/add.ll | 67 ++++++++ .../RISCV/RISCVSplitLoopByLength/addc.ll | 57 +++++++ .../RISCV/RISCVSplitLoopByLength/biquad.ll | 91 ++++++++++ .../RISCV/RISCVSplitLoopByLength/dotprod.ll | 46 +++++ .../RISCV/RISCVSplitLoopByLength/dotprode.ll | 50 ++++++ .../RISCV/RISCVSplitLoopByLength/fir.ll | 160 ++++++++++++++++++ .../RISCV/RISCVSplitLoopByLength/mul.ll | 67 ++++++++ .../RISCV/RISCVSplitLoopByLength/mulc.ll | 57 +++++++ .../RISCV/RISCVSplitLoopByLength/sqrt.ll | 55 ++++++ .../RISCV/RISCVSplitLoopByLength/sub.ll | 67 ++++++++ 10 files changed, 717 insertions(+) create mode 100644 llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/add.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/addc.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/biquad.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprod.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprode.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/fir.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mul.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mulc.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sqrt.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sub.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/add.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/add.ll new file mode 100644 index 0000000000000..0a7005b098c41 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/add.ll @@ -0,0 +1,67 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) +define dso_local noundef i32 @dsps_add_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_add_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[INPUT1:%.*]], ptr noundef readonly [[INPUT2:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT1]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[INPUT2]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND19:%.*]] = or i1 [[OR_COND]], [[CMP4]] +; CHECK-NEXT: br i1 [[OR_COND19]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP720:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP720]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_021]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL8:%.*]] = mul nsw i32 [[I_021]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[I_021]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10]] +; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX11]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_021]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input1, null + %cmp1 = icmp eq ptr %input2, null + %or.cond = or i1 %cmp, %cmp1 + %cmp4 = icmp eq ptr %output, null + %or.cond19 = or i1 %or.cond, %cmp4 + br i1 %or.cond19, label %return, label %for.cond.preheader + +for.cond.preheader: ; preds = %entry + %cmp720 = icmp sgt i32 %len, 0 + br i1 %cmp720, label %for.body, label %return + +for.body: ; preds = %for.body, %for.cond.preheader + %i.021 = phi i32 [ %inc, %for.body ], [ 0, %for.cond.preheader ] + %mul = mul nsw i32 %i.021, %step1 + %arrayidx = getelementptr inbounds float, ptr %input1, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %mul8 = mul nsw i32 %i.021, %step2 + %arrayidx9 = getelementptr inbounds float, ptr %input2, i32 %mul8 + %1 = load float, ptr %arrayidx9, align 4 + %add = fadd float %0, %1 + %mul10 = mul nsw i32 %i.021, %step_out + %arrayidx11 = getelementptr inbounds float, ptr %output, i32 %mul10 + store float %add, ptr %arrayidx11, align 4 + %inc = add nuw nsw i32 %i.021, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +return: ; preds = %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/addc.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/addc.ll new file mode 100644 index 0000000000000..135522dc5a0a5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/addc.ll @@ -0,0 +1,57 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) +define dso_local noundef i32 @dsps_addc_f32_ansi(ptr noundef readonly %input, ptr noundef writeonly %output, i32 noundef %len, float noundef %C, i32 noundef %step_in, i32 noundef %step_out) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_addc_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[INPUT:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], float noundef [[C:%.*]], i32 noundef [[STEP_IN:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP412:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP412]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_013:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_013]], [[STEP_IN]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[C]] +; CHECK-NEXT: [[MUL5:%.*]] = mul nsw i32 [[I_013]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL5]] +; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX6]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_013]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input, null + %cmp1 = icmp eq ptr %output, null + %or.cond = or i1 %cmp, %cmp1 + br i1 %or.cond, label %return, label %for.cond.preheader + +for.cond.preheader: ; preds = %entry + %cmp412 = icmp sgt i32 %len, 0 + br i1 %cmp412, label %for.body, label %return + +for.body: ; preds = %for.body, %for.cond.preheader + %i.013 = phi i32 [ %inc, %for.body ], [ 0, %for.cond.preheader ] + %mul = mul nsw i32 %i.013, %step_in + %arrayidx = getelementptr inbounds float, ptr %input, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %add = fadd float %0, %C + %mul5 = mul nsw i32 %i.013, %step_out + %arrayidx6 = getelementptr inbounds float, ptr %output, i32 %mul5 + store float %add, ptr %arrayidx6, align 4 + %inc = add nuw nsw i32 %i.013, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +return: ; preds = %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/biquad.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/biquad.ll new file mode 100644 index 0000000000000..171f7b3a475a6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/biquad.ll @@ -0,0 +1,91 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) +define dso_local noundef i32 @dsps_biquad_f32_ansi(ptr nocapture noundef readonly %input, ptr nocapture noundef writeonly %output, i32 noundef %len, ptr nocapture noundef readonly %coef, ptr nocapture noundef %w) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_biquad_f32_ansi( +; CHECK-SAME: ptr nocapture noundef readonly [[INPUT:%.*]], ptr nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], ptr nocapture noundef readonly [[COEF:%.*]], ptr nocapture noundef [[W:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP30:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP30]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.body.lr.ph: +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 3 +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 4 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[W]], i32 1 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 1 +; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 2 +; CHECK-NEXT: [[DOTPRE:%.*]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: [[DOTPRE32:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret i32 0 +; CHECK: for.body: +; CHECK-NEXT: [[TMP0:%.*]] = phi float [ [[DOTPRE32]], [[FOR_BODY_LR_PH]] ], [ [[TMP12:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi float [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[I_031:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_031]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[NEG:%.*]] = fneg float [[TMP3]] +; CHECK-NEXT: [[TMP4:%.*]] = tail call float @llvm.fmuladd.f32(float [[NEG]], float [[TMP1]], float [[TMP2]]) +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: [[NEG5:%.*]] = fneg float [[TMP5]] +; CHECK-NEXT: [[TMP6]] = tail call float @llvm.fmuladd.f32(float [[NEG5]], float [[TMP0]], float [[TMP4]]) +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[COEF]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +; CHECK-NEXT: [[MUL9:%.*]] = fmul float [[TMP1]], [[TMP8]] +; CHECK-NEXT: [[TMP9:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP7]], float [[TMP6]], float [[MUL9]]) +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP10]], float [[TMP0]], float [[TMP9]]) +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_031]] +; CHECK-NEXT: store float [[TMP11]], ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: [[TMP12]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: store float [[TMP12]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: store float [[TMP6]], ptr [[W]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_031]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; +entry: + %cmp30 = icmp sgt i32 %len, 0 + br i1 %cmp30, label %for.body.lr.ph, label %for.cond.cleanup + +for.body.lr.ph: ; preds = %entry + %arrayidx1 = getelementptr inbounds float, ptr %coef, i32 3 + %arrayidx3 = getelementptr inbounds float, ptr %coef, i32 4 + %arrayidx4 = getelementptr inbounds float, ptr %w, i32 1 + %arrayidx7 = getelementptr inbounds float, ptr %coef, i32 1 + %arrayidx10 = getelementptr inbounds float, ptr %coef, i32 2 + %.pre = load float, ptr %w, align 4 + %.pre32 = load float, ptr %arrayidx4, align 4 + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + ret i32 0 + +for.body: ; preds = %for.body, %for.body.lr.ph + %0 = phi float [ %.pre32, %for.body.lr.ph ], [ %12, %for.body ] + %1 = phi float [ %.pre, %for.body.lr.ph ], [ %6, %for.body ] + %i.031 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] + %arrayidx = getelementptr inbounds float, ptr %input, i32 %i.031 + %2 = load float, ptr %arrayidx, align 4 + %3 = load float, ptr %arrayidx1, align 4 + %neg = fneg float %3 + %4 = tail call float @llvm.fmuladd.f32(float %neg, float %1, float %2) + %5 = load float, ptr %arrayidx3, align 4 + %neg5 = fneg float %5 + %6 = tail call float @llvm.fmuladd.f32(float %neg5, float %0, float %4) + %7 = load float, ptr %coef, align 4 + %8 = load float, ptr %arrayidx7, align 4 + %mul9 = fmul float %1, %8 + %9 = tail call float @llvm.fmuladd.f32(float %7, float %6, float %mul9) + %10 = load float, ptr %arrayidx10, align 4 + %11 = tail call float @llvm.fmuladd.f32(float %10, float %0, float %9) + %arrayidx12 = getelementptr inbounds float, ptr %output, i32 %i.031 + store float %11, ptr %arrayidx12, align 4 + %12 = load float, ptr %w, align 4 + store float %12, ptr %arrayidx4, align 4 + store float %6, ptr %w, align 4 + %inc = add nuw nsw i32 %i.031, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprod.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprod.ll new file mode 100644 index 0000000000000..a0211470be14e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprod.ll @@ -0,0 +1,46 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) +define dso_local noundef i32 @dsps_dotprod_f32_ansi(ptr nocapture noundef readonly %src1, ptr nocapture noundef readonly %src2, ptr nocapture noundef writeonly %dest, i32 noundef %len) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_dotprod_f32_ansi( +; CHECK-SAME: ptr nocapture noundef readonly [[SRC1:%.*]], ptr nocapture noundef readonly [[SRC2:%.*]], ptr nocapture noundef writeonly [[DEST:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: store float [[ACC_0_LCSSA]], ptr [[DEST]], align 4 +; CHECK-NEXT: ret i32 0 +; CHECK: for.body: +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ] +; CHECK-NEXT: [[ACC_07:%.*]] = phi float [ [[TMP2]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[I_08]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[I_08]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[TMP2]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float [[ACC_07]]) +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; +entry: + %cmp6 = icmp sgt i32 %len, 0 + br i1 %cmp6, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.body, %entry + %acc.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %2, %for.body ] + store float %acc.0.lcssa, ptr %dest, align 4 + ret i32 0 + +for.body: ; preds = %for.body, %entry + %i.08 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %acc.07 = phi float [ %2, %for.body ], [ 0.000000e+00, %entry ] + %arrayidx = getelementptr inbounds float, ptr %src1, i32 %i.08 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds float, ptr %src2, i32 %i.08 + %1 = load float, ptr %arrayidx1, align 4 + %2 = tail call float @llvm.fmuladd.f32(float %0, float %1, float %acc.07) + %inc = add nuw nsw i32 %i.08, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprode.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprode.ll new file mode 100644 index 0000000000000..38cce6ffa3e03 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprode.ll @@ -0,0 +1,50 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) +define dso_local noundef i32 @dsps_dotprode_f32_ansi(ptr nocapture noundef readonly %src1, ptr nocapture noundef readonly %src2, ptr nocapture noundef writeonly %dest, i32 noundef %len, i32 noundef %step1, i32 noundef %step2) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_dotprode_f32_ansi( +; CHECK-SAME: ptr nocapture noundef readonly [[SRC1:%.*]], ptr nocapture noundef readonly [[SRC2:%.*]], ptr nocapture noundef writeonly [[DEST:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: store float [[ACC_0_LCSSA]], ptr [[DEST]], align 4 +; CHECK-NEXT: ret i32 0 +; CHECK: for.body: +; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ] +; CHECK-NEXT: [[ACC_09:%.*]] = phi float [ [[TMP2]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_010]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL1:%.*]] = mul nsw i32 [[I_010]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[MUL1]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[TMP2]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float [[ACC_09]]) +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_010]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; +entry: + %cmp8 = icmp sgt i32 %len, 0 + br i1 %cmp8, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.body, %entry + %acc.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %2, %for.body ] + store float %acc.0.lcssa, ptr %dest, align 4 + ret i32 0 + +for.body: ; preds = %for.body, %entry + %i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %acc.09 = phi float [ %2, %for.body ], [ 0.000000e+00, %entry ] + %mul = mul nsw i32 %i.010, %step1 + %arrayidx = getelementptr inbounds float, ptr %src1, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %mul1 = mul nsw i32 %i.010, %step2 + %arrayidx2 = getelementptr inbounds float, ptr %src2, i32 %mul1 + %1 = load float, ptr %arrayidx2, align 4 + %2 = tail call float @llvm.fmuladd.f32(float %0, float %1, float %acc.09) + %inc = add nuw nsw i32 %i.010, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/fir.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/fir.ll new file mode 100644 index 0000000000000..3e188c7a4a973 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/fir.ll @@ -0,0 +1,160 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +%struct.fir_f32_s = type { ptr, ptr, i32, i32, i32, i16 } +; Function Attrs: nofree norecurse nosync nounwind memory(readwrite, inaccessiblemem: none) +define dso_local noundef i32 @dsps_fir_f32_ansi(ptr nocapture noundef %fir, ptr nocapture noundef readonly %input, ptr nocapture noundef writeonly %output, i32 noundef %len) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_fir_f32_ansi( +; CHECK-SAME: ptr nocapture noundef [[FIR:%.*]], ptr nocapture noundef readonly [[INPUT:%.*]], ptr nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP67:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP67]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.body.lr.ph: +; CHECK-NEXT: [[DELAY:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S:%.*]], ptr [[FIR]], i32 0, i32 1 +; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DELAY]], align 4 +; CHECK-NEXT: [[POS:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 3 +; CHECK-NEXT: [[N:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 2 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[N]], align 4 +; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[POS]], align 4 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret i32 0 +; CHECK: for.body: +; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[SPEC_STORE_SELECT:%.*]], [[FOR_COND_CLEANUP21:%.*]] ] +; CHECK-NEXT: [[I_068:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC33:%.*]], [[FOR_COND_CLEANUP21]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_068]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i32 [[TMP2]] +; CHECK-NEXT: store float [[TMP3]], ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 +; CHECK-NEXT: [[CMP4_NOT:%.*]] = icmp slt i32 [[INC]], [[TMP1]] +; CHECK-NEXT: [[SPEC_STORE_SELECT]] = select i1 [[CMP4_NOT]], i32 [[INC]], i32 0 +; CHECK-NEXT: store i32 [[SPEC_STORE_SELECT]], ptr [[POS]], align 4 +; CHECK-NEXT: [[CMP957:%.*]] = icmp slt i32 [[SPEC_STORE_SELECT]], [[TMP1]] +; CHECK-NEXT: br i1 [[CMP957]], label [[FOR_BODY11_LR_PH:%.*]], label [[FOR_COND18_PREHEADER:%.*]] +; CHECK: for.body11.lr.ph: +; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = sub i32 [[TMP1]], [[SPEC_STORE_SELECT]] +; CHECK-NEXT: br label [[FOR_BODY11:%.*]] +; CHECK: for.cond18.preheader: +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP9:%.*]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[TMP5]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[CMP2062:%.*]] = icmp sgt i32 [[SPEC_STORE_SELECT]], 0 +; CHECK-NEXT: br i1 [[CMP2062]], label [[FOR_BODY22_LR_PH:%.*]], label [[FOR_COND_CLEANUP21]] +; CHECK: for.body22.lr.ph: +; CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: br label [[FOR_BODY22:%.*]] +; CHECK: for.body11: +; CHECK-NEXT: [[N_060:%.*]] = phi i32 [ [[SPEC_STORE_SELECT]], [[FOR_BODY11_LR_PH]] ], [ [[INC16:%.*]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[COEFF_POS_059:%.*]] = phi i32 [ 0, [[FOR_BODY11_LR_PH]] ], [ [[INC12:%.*]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[ACC_058:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH]] ], [ [[TMP9]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[INC12]] = add nuw i32 [[COEFF_POS_059]], 1 +; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 [[COEFF_POS_059]] +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX13]], align 4 +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i32 [[N_060]] +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP9]] = tail call float @llvm.fmuladd.f32(float [[TMP7]], float [[TMP8]], float [[ACC_058]]) +; CHECK-NEXT: [[INC16]] = add nsw i32 [[N_060]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC12]], [[TMP5]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND18_PREHEADER]], label [[FOR_BODY11]] +; CHECK: for.cond.cleanup21: +; CHECK-NEXT: [[ACC_1_LCSSA:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND18_PREHEADER]] ], [ [[TMP12:%.*]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_068]] +; CHECK-NEXT: store float [[ACC_1_LCSSA]], ptr [[ARRAYIDX31]], align 4 +; CHECK-NEXT: [[INC33]] = add nuw nsw i32 [[I_068]], 1 +; CHECK-NEXT: [[EXITCOND71_NOT:%.*]] = icmp eq i32 [[INC33]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND71_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; CHECK: for.body22: +; CHECK-NEXT: [[N17_065:%.*]] = phi i32 [ 0, [[FOR_BODY22_LR_PH]] ], [ [[INC29:%.*]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[COEFF_POS_164:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA]], [[FOR_BODY22_LR_PH]] ], [ [[INC24:%.*]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[ACC_163:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_BODY22_LR_PH]] ], [ [[TMP12]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[INC24]] = add nuw nsw i32 [[COEFF_POS_164]], 1 +; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 [[COEFF_POS_164]] +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX25]], align 4 +; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i32 [[N17_065]] +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: [[TMP12]] = tail call float @llvm.fmuladd.f32(float [[TMP10]], float [[TMP11]], float [[ACC_163]]) +; CHECK-NEXT: [[INC29]] = add nuw nsw i32 [[N17_065]], 1 +; CHECK-NEXT: [[EXITCOND70_NOT:%.*]] = icmp eq i32 [[INC29]], [[SPEC_STORE_SELECT]] +; CHECK-NEXT: br i1 [[EXITCOND70_NOT]], label [[FOR_COND_CLEANUP21]], label [[FOR_BODY22]] +; +entry: + %cmp67 = icmp sgt i32 %len, 0 + br i1 %cmp67, label %for.body.lr.ph, label %for.cond.cleanup + +for.body.lr.ph: ; preds = %entry + %delay = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 1 + %0 = load ptr, ptr %delay, align 4 + %pos = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 3 + %N = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 2 + %1 = load i32, ptr %N, align 4 + %.pre = load i32, ptr %pos, align 4 + br label %for.body + +for.cond.cleanup: ; preds = %for.cond.cleanup21, %entry + ret i32 0 + +for.body: ; preds = %for.cond.cleanup21, %for.body.lr.ph + %2 = phi i32 [ %.pre, %for.body.lr.ph ], [ %spec.store.select, %for.cond.cleanup21 ] + %i.068 = phi i32 [ 0, %for.body.lr.ph ], [ %inc33, %for.cond.cleanup21 ] + %arrayidx = getelementptr inbounds float, ptr %input, i32 %i.068 + %3 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds float, ptr %0, i32 %2 + store float %3, ptr %arrayidx1, align 4 + %inc = add nsw i32 %2, 1 + %cmp4.not = icmp slt i32 %inc, %1 + %spec.store.select = select i1 %cmp4.not, i32 %inc, i32 0 + store i32 %spec.store.select, ptr %pos, align 4 + %cmp957 = icmp slt i32 %spec.store.select, %1 + br i1 %cmp957, label %for.body11.lr.ph, label %for.cond18.preheader + +for.body11.lr.ph: ; preds = %for.body + %4 = load ptr, ptr %fir, align 4 + %5 = sub i32 %1, %spec.store.select + br label %for.body11 + +for.cond18.preheader: ; preds = %for.body11, %for.body + %acc.0.lcssa = phi float [ 0.000000e+00, %for.body ], [ %9, %for.body11 ] + %coeff_pos.0.lcssa = phi i32 [ 0, %for.body ], [ %5, %for.body11 ] + %cmp2062 = icmp sgt i32 %spec.store.select, 0 + br i1 %cmp2062, label %for.body22.lr.ph, label %for.cond.cleanup21 + +for.body22.lr.ph: ; preds = %for.cond18.preheader + %6 = load ptr, ptr %fir, align 4 + br label %for.body22 + +for.body11: ; preds = %for.body11, %for.body11.lr.ph + %n.060 = phi i32 [ %spec.store.select, %for.body11.lr.ph ], [ %inc16, %for.body11 ] + %coeff_pos.059 = phi i32 [ 0, %for.body11.lr.ph ], [ %inc12, %for.body11 ] + %acc.058 = phi float [ 0.000000e+00, %for.body11.lr.ph ], [ %9, %for.body11 ] + %inc12 = add nuw i32 %coeff_pos.059, 1 + %arrayidx13 = getelementptr inbounds float, ptr %4, i32 %coeff_pos.059 + %7 = load float, ptr %arrayidx13, align 4 + %arrayidx15 = getelementptr inbounds float, ptr %0, i32 %n.060 + %8 = load float, ptr %arrayidx15, align 4 + %9 = tail call float @llvm.fmuladd.f32(float %7, float %8, float %acc.058) + %inc16 = add nsw i32 %n.060, 1 + %exitcond.not = icmp eq i32 %inc12, %5 + br i1 %exitcond.not, label %for.cond18.preheader, label %for.body11 + +for.cond.cleanup21: ; preds = %for.body22, %for.cond18.preheader + %acc.1.lcssa = phi float [ %acc.0.lcssa, %for.cond18.preheader ], [ %12, %for.body22 ] + %arrayidx31 = getelementptr inbounds float, ptr %output, i32 %i.068 + store float %acc.1.lcssa, ptr %arrayidx31, align 4 + %inc33 = add nuw nsw i32 %i.068, 1 + %exitcond71.not = icmp eq i32 %inc33, %len + br i1 %exitcond71.not, label %for.cond.cleanup, label %for.body + +for.body22: ; preds = %for.body22, %for.body22.lr.ph + %n17.065 = phi i32 [ 0, %for.body22.lr.ph ], [ %inc29, %for.body22 ] + %coeff_pos.164 = phi i32 [ %coeff_pos.0.lcssa, %for.body22.lr.ph ], [ %inc24, %for.body22 ] + %acc.163 = phi float [ %acc.0.lcssa, %for.body22.lr.ph ], [ %12, %for.body22 ] + %inc24 = add nuw nsw i32 %coeff_pos.164, 1 + %arrayidx25 = getelementptr inbounds float, ptr %6, i32 %coeff_pos.164 + %10 = load float, ptr %arrayidx25, align 4 + %arrayidx27 = getelementptr inbounds float, ptr %0, i32 %n17.065 + %11 = load float, ptr %arrayidx27, align 4 + %12 = tail call float @llvm.fmuladd.f32(float %10, float %11, float %acc.163) + %inc29 = add nuw nsw i32 %n17.065, 1 + %exitcond70.not = icmp eq i32 %inc29, %spec.store.select + br i1 %exitcond70.not, label %for.cond.cleanup21, label %for.body22 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mul.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mul.ll new file mode 100644 index 0000000000000..79c7754e70151 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mul.ll @@ -0,0 +1,67 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) +define dso_local noundef i32 @dsps_mul_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_mul_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[INPUT1:%.*]], ptr noundef readonly [[INPUT2:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT1]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[INPUT2]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND20:%.*]] = or i1 [[OR_COND]], [[CMP4]] +; CHECK-NEXT: br i1 [[OR_COND20]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP721:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP721]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_022:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_022]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL8:%.*]] = mul nsw i32 [[I_022]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[MUL10:%.*]] = fmul float [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[MUL11:%.*]] = mul nsw i32 [[I_022]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL11]] +; CHECK-NEXT: store float [[MUL10]], ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_022]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input1, null + %cmp1 = icmp eq ptr %input2, null + %or.cond = or i1 %cmp, %cmp1 + %cmp4 = icmp eq ptr %output, null + %or.cond20 = or i1 %or.cond, %cmp4 + br i1 %or.cond20, label %return, label %for.cond.preheader + +for.cond.preheader: ; preds = %entry + %cmp721 = icmp sgt i32 %len, 0 + br i1 %cmp721, label %for.body, label %return + +for.body: ; preds = %for.body, %for.cond.preheader + %i.022 = phi i32 [ %inc, %for.body ], [ 0, %for.cond.preheader ] + %mul = mul nsw i32 %i.022, %step1 + %arrayidx = getelementptr inbounds float, ptr %input1, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %mul8 = mul nsw i32 %i.022, %step2 + %arrayidx9 = getelementptr inbounds float, ptr %input2, i32 %mul8 + %1 = load float, ptr %arrayidx9, align 4 + %mul10 = fmul float %0, %1 + %mul11 = mul nsw i32 %i.022, %step_out + %arrayidx12 = getelementptr inbounds float, ptr %output, i32 %mul11 + store float %mul10, ptr %arrayidx12, align 4 + %inc = add nuw nsw i32 %i.022, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +return: ; preds = %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mulc.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mulc.ll new file mode 100644 index 0000000000000..ff58e8447f53e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mulc.ll @@ -0,0 +1,57 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) +define dso_local noundef i32 @dsps_mulc_f32_ansi(ptr noalias noundef readonly %input, ptr noalias noundef writeonly %output, i32 noundef %len, float noundef %C, i32 noundef %step_in, i32 noundef %step_out) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_mulc_f32_ansi( +; CHECK-SAME: ptr noalias noundef readonly [[INPUT:%.*]], ptr noalias noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], float noundef [[C:%.*]], i32 noundef [[STEP_IN:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP413:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP413]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_014]], [[STEP_IN]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL5:%.*]] = fmul float [[TMP0]], [[C]] +; CHECK-NEXT: [[MUL6:%.*]] = mul nsw i32 [[I_014]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL6]] +; CHECK-NEXT: store float [[MUL5]], ptr [[ARRAYIDX7]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_014]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input, null + %cmp1 = icmp eq ptr %output, null + %or.cond = or i1 %cmp, %cmp1 + br i1 %or.cond, label %return, label %for.cond.preheader + +for.cond.preheader: ; preds = %entry + %cmp413 = icmp sgt i32 %len, 0 + br i1 %cmp413, label %for.body, label %return + +for.body: ; preds = %for.body, %for.cond.preheader + %i.014 = phi i32 [ %inc, %for.body ], [ 0, %for.cond.preheader ] + %mul = mul nsw i32 %i.014, %step_in + %arrayidx = getelementptr inbounds float, ptr %input, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %mul5 = fmul float %0, %C + %mul6 = mul nsw i32 %i.014, %step_out + %arrayidx7 = getelementptr inbounds float, ptr %output, i32 %mul6 + store float %mul5, ptr %arrayidx7, align 4 + %inc = add nuw nsw i32 %i.014, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +return: ; preds = %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sqrt.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sqrt.ll new file mode 100644 index 0000000000000..414c1dfd43d23 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sqrt.ll @@ -0,0 +1,55 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) +define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, ptr noundef writeonly %output, i32 noundef %len) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_sqrt_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[INPUT:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP411:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP411]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[SHR_I:%.*]] = ashr i32 [[TMP0]], 1 +; CHECK-NEXT: [[ADD_I:%.*]] = add nsw i32 [[SHR_I]], 532365312 +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_012]] +; CHECK-NEXT: store i32 [[ADD_I]], ptr [[ARRAYIDX5]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_012]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input, null + %cmp1 = icmp eq ptr %output, null + %or.cond = or i1 %cmp, %cmp1 + br i1 %or.cond, label %return, label %for.cond.preheader + +for.cond.preheader: ; preds = %entry + %cmp411 = icmp sgt i32 %len, 0 + br i1 %cmp411, label %for.body, label %return + +for.body: ; preds = %for.body, %for.cond.preheader + %i.012 = phi i32 [ %inc, %for.body ], [ 0, %for.cond.preheader ] + %arrayidx = getelementptr inbounds float, ptr %input, i32 %i.012 + %0 = load i32, ptr %arrayidx, align 4 + %shr.i = ashr i32 %0, 1 + %add.i = add nsw i32 %shr.i, 532365312 + %arrayidx5 = getelementptr inbounds float, ptr %output, i32 %i.012 + store i32 %add.i, ptr %arrayidx5, align 4 + %inc = add nuw nsw i32 %i.012, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +return: ; preds = %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sub.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sub.ll new file mode 100644 index 0000000000000..a473a0e83c7a2 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sub.ll @@ -0,0 +1,67 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) +define dso_local noundef i32 @dsps_sub_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_sub_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[INPUT1:%.*]], ptr noundef readonly [[INPUT2:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT1]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[INPUT2]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND19:%.*]] = or i1 [[OR_COND]], [[CMP4]] +; CHECK-NEXT: br i1 [[OR_COND19]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP720:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP720]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_021]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL8:%.*]] = mul nsw i32 [[I_021]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[SUB:%.*]] = fsub float [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[I_021]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10]] +; CHECK-NEXT: store float [[SUB]], ptr [[ARRAYIDX11]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_021]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input1, null + %cmp1 = icmp eq ptr %input2, null + %or.cond = or i1 %cmp, %cmp1 + %cmp4 = icmp eq ptr %output, null + %or.cond19 = or i1 %or.cond, %cmp4 + br i1 %or.cond19, label %return, label %for.cond.preheader + +for.cond.preheader: ; preds = %entry + %cmp720 = icmp sgt i32 %len, 0 + br i1 %cmp720, label %for.body, label %return + +for.body: ; preds = %for.body, %for.cond.preheader + %i.021 = phi i32 [ %inc, %for.body ], [ 0, %for.cond.preheader ] + %mul = mul nsw i32 %i.021, %step1 + %arrayidx = getelementptr inbounds float, ptr %input1, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %mul8 = mul nsw i32 %i.021, %step2 + %arrayidx9 = getelementptr inbounds float, ptr %input2, i32 %mul8 + %1 = load float, ptr %arrayidx9, align 4 + %sub = fsub float %0, %1 + %mul10 = mul nsw i32 %i.021, %step_out + %arrayidx11 = getelementptr inbounds float, ptr %output, i32 %mul10 + store float %sub, ptr %arrayidx11, align 4 + %inc = add nuw nsw i32 %i.021, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +return: ; preds = %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ] + ret i32 %retval.0 +} From da8bf9d759977aafa852b5fe79657428e3bfcb8c Mon Sep 17 00:00:00 2001 From: "chen.qian" Date: Thu, 7 Nov 2024 14:07:24 +0800 Subject: [PATCH 265/289] [Pass] add SplitLoopByLength Pass --- llvm/lib/Target/RISCV/CMakeLists.txt | 2 + .../Target/RISCV/RISCVSplitLoopByLength.cpp | 636 ++++++++++++++++++ .../lib/Target/RISCV/RISCVSplitLoopByLength.h | 39 ++ llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 26 + .../RISCV/RISCVSplitLoopByLength/add.ll | 28 +- .../RISCV/RISCVSplitLoopByLength/addc.ll | 25 +- .../RISCV/RISCVSplitLoopByLength/biquad.ll | 84 ++- .../RISCV/RISCVSplitLoopByLength/dotprod.ll | 34 +- .../RISCV/RISCVSplitLoopByLength/dotprode.ll | 36 +- .../RISCV/RISCVSplitLoopByLength/fir.ll | 133 +++- .../RISCV/RISCVSplitLoopByLength/mul.ll | 28 +- .../RISCV/RISCVSplitLoopByLength/mulc.ll | 25 +- .../RISCV/RISCVSplitLoopByLength/sqrt.ll | 24 +- .../RISCV/RISCVSplitLoopByLength/sub.ll | 28 +- 14 files changed, 1043 insertions(+), 105 deletions(-) create mode 100644 llvm/lib/Target/RISCV/RISCVSplitLoopByLength.cpp create mode 100644 llvm/lib/Target/RISCV/RISCVSplitLoopByLength.h diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt index 47e4fbb41c822..1bb63ffe6d43f 100644 --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -36,6 +36,7 @@ add_llvm_target(RISCVCodeGen RISCVExpandPseudoInsts.cpp RISCVFrameLowering.cpp RISCVGatherScatterLowering.cpp + RISCVSplitLoopByLength.cpp RISCVInsertVSETVLI.cpp RISCVInsertReadWriteCSR.cpp RISCVInsertWriteVXRM.cpp @@ -75,6 +76,7 @@ add_llvm_target(RISCVCodeGen MC RISCVDesc RISCVInfo + Passes Scalar SelectionDAG Support diff --git a/llvm/lib/Target/RISCV/RISCVSplitLoopByLength.cpp b/llvm/lib/Target/RISCV/RISCVSplitLoopByLength.cpp new file mode 100644 index 0000000000000..48ecc99720b52 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVSplitLoopByLength.cpp @@ -0,0 +1,636 @@ +//===-- RISCVSplitLoopByLength.cpp - Loop splitting optimization +//---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This pass splits loops into two parts: one for length > 2 and another for +// length <= 2. It's designed to prepare for the esp.lp.setup instruction. +// +// The pass handles several types of functions: +// - Arithmetic operations: add, addc, mulc, sub, mul +// - Dot product operations: dotprod, dotprode +// - Square root calculation: sqrt +// - Biquadratic filter: biquad +// - Finite Impulse Response filter: fir +// +// Main steps of this pass: +// 1. Identify the function type +// 2. Clone the original loop +// 3. Insert an if-else structure to choose between the original and cloned loop +// 4. Update phi nodes and branch instructions accordingly +// +//===----------------------------------------------------------------------===// + +#include "RISCVSplitLoopByLength.h" +#include "llvm/ADT/MapVector.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Analysis/DomTreeUpdater.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicsRISCV.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Verifier.h" +#include "llvm/InitializePasses.h" +#include "llvm/Pass.h" +#include "llvm/Passes/PassBuilder.h" +#include "llvm/Passes/PassPlugin.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" +#include "llvm/Transforms/Utils.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Cloning.h" +#include "llvm/Transforms/Utils/LoopSimplify.h" +#include "llvm/Transforms/Utils/LoopUtils.h" + +using namespace llvm; + +#define DEBUG_TYPE "riscv-split-loop-by-length" +#define ESP_ERR_DSP_BASE 0x70000 +#define ESP_ERR_DSP_PARAM_OUTOFRANGE (ESP_ERR_DSP_BASE + 3) + +enum class SplitType { DOTPROD, ADDC, BIQUAD_FIR, UNKNOWN }; + +static SplitType currentSplitType = SplitType::UNKNOWN; +// Command line option to enable/disable RISCVSplitLoopByLength optimization +cl::opt llvm::EnableRISCVSplitLoopByLength( + "riscv-split-loop-by-length", cl::init(false), + cl::desc("Enable loop splitting optimization")); + +// Get the length parameter of the function +static Value *getLength(Function *F) { + for (auto &Arg : F->args()) { + for (const User *U : Arg.users()) { + if (const ICmpInst *ICmp = dyn_cast(U)) { + if (ICmp->getPredicate() == ICmpInst::ICMP_EQ && + ICmp->getOperand(1) == &Arg) { + return &Arg; + } + } + } + } + llvm_unreachable("Length parameter not found"); +} + +// Get a basic block by its name +static inline BasicBlock *getBasicBlockByName(Function &F, StringRef Name) { + for (BasicBlock &BB : F) + if (BB.getName() == Name) + return &BB; + return nullptr; +} + +// Move getelementptr instructions from Entry to NewBB +static void moveGEPInstructions(BasicBlock &Entry, BasicBlock *NewBB) { + for (auto I = Entry.begin(); I != Entry.end();) { + if (GetElementPtrInst *GEP = dyn_cast(I)) { + I++; + GEP->moveBefore(NewBB->getTerminator()); + } else { + ++I; + } + } +} + +// Insert if-else structure with cloned basic block (for cases with +// LoopPreHeader) +static void insertIfElseWithClonedBB(Function *F, BasicBlock *ClonedPhBB, + SmallVector &NewBlocks) { + // Rename entry to for.cond.preheader + BasicBlock &Entry = F->getEntryBlock(); + Entry.setName("for.cond.preheader"); + + // Rename for.cond.cleanup to if.end + for (BasicBlock &BB : *F) { + if (BB.getName() == "for.cond.cleanup") + BB.setName("if.end"); + } + + // Create new entry basic block + LLVMContext &Context = F->getContext(); + IRBuilder<> Builder(Context); + BasicBlock *NewEntryBB = + BasicBlock::Create(Context, "entry", F, &F->getEntryBlock()); + + Builder.SetInsertPoint(NewEntryBB); + + // Find icmp sgt instruction + ICmpInst *ICmp = nullptr; + for (Instruction &I : Entry) { + if (ICmpInst *CI = dyn_cast(&I)) { + if (CI->getPredicate() == ICmpInst::ICMP_SGT) { + ICmp = CI; + break; + } + } + } + + assert(ICmp && "icmp sgt instruction not found"); + + // Create new comparison and conditional branch + Value *Length = ICmp->getOperand(0); + Value *Cmp = + Builder.CreateICmpSGT(Length, ConstantInt::get(Length->getType(), 2)); + Builder.CreateCondBr(Cmp, &Entry, ClonedPhBB); + moveGEPInstructions(Entry, NewEntryBB); +} + +// Insert if-else structure (for cases without LoopPreHeader) +static void insertIfElse(Function *F, SmallVector &NewBlocks) { + BasicBlock &Entry = F->getEntryBlock(); + Entry.setName("for.cond.preheader"); + + BasicBlock *ForBodyBB = getBasicBlockByName(*F, "for.body"); + for (BasicBlock &BB : *F) { + if (BB.getName() == "for.cond.cleanup") + BB.setName("if.end"); + } + assert(ForBodyBB && "ForBody must exist"); + + BasicBlock *ForBodyCloneBB = nullptr; + Instruction *FMulAdd = nullptr; + for (BasicBlock *BB : NewBlocks) { + if (BB->getName() == "for.body.clone") { + ForBodyCloneBB = BB; + for (Instruction &I : *BB) { + if (RecurrenceDescriptor::isFMulAddIntrinsic(&I)) { + FMulAdd = &I; + break; + } + } + } + } + assert(ForBodyCloneBB && "ForBodyCloneBB must exist"); + assert(FMulAdd && "FMulAdd must exist"); + + LLVMContext &Context = F->getContext(); + IRBuilder<> Builder(Context); + BasicBlock *NewEntryBB = + BasicBlock::Create(Context, "entry", F, &F->getEntryBlock()); + + Builder.SetInsertPoint(NewEntryBB); + Value *Length = getLength(F); + Value *Cmp = + Builder.CreateICmpSGT(Length, ConstantInt::get(Length->getType(), 2)); + Builder.CreateCondBr(Cmp, ForBodyBB, &Entry); + + // Update phi nodes and successors + for (BasicBlock &BB : *F) { + if (BB.getName() == "if.end") { + if (PHINode *IfEndEntryPhiNode = dyn_cast(&BB.front())) { + IfEndEntryPhiNode->addIncoming(FMulAdd, ForBodyCloneBB); + } + } + if (BB.getName() == "for.cond.preheader") { + BB.getTerminator()->setSuccessor(0, ForBodyCloneBB); + } + } + + if (PHINode *ForBodyEntryPhiNode = dyn_cast(&ForBodyBB->front())) { + ForBodyEntryPhiNode->setIncomingBlock(1, NewEntryBB); + if (PHINode *ForBodyEntryPhiNode2 = dyn_cast( + ForBodyEntryPhiNode->getNextNonDebugInstruction())) { + ForBodyEntryPhiNode2->setIncomingBlock(1, NewEntryBB); + } + } + moveGEPInstructions(Entry, NewEntryBB); +} + +// Insert if-else structure for addc-like functions +static void insertAddcIfElse(Function *F, + SmallVector &NewBlocks) { + LLVMContext &Context = F->getContext(); + BasicBlock &EntryBB = F->getEntryBlock(); + + BasicBlock *OrigForBB = getBasicBlockByName(*F, "for.body"); + BasicBlock *ForCondBB = getBasicBlockByName(*F, "for.cond.preheader"); + BasicBlock *ForBodyCloneBB = getBasicBlockByName(*F, "for.body.clone"); + BasicBlock *ReturnBB = getBasicBlockByName(*F, "return"); + + assert(OrigForBB && ForCondBB && ForBodyCloneBB && ReturnBB && + "Necessary basic blocks not found"); + + BasicBlock *IfEndBB = BasicBlock::Create(Context, "if.end", F, OrigForBB); + + if (BranchInst *EntryBr = dyn_cast(EntryBB.getTerminator())) { + EntryBr->setSuccessor(1, IfEndBB); + } + + IRBuilder<> Builder(IfEndBB); + Value *Length = getLength(F); + Value *Cmp = Builder.CreateICmpSGT( + Length, ConstantInt::get(Length->getType(), 2), "cmp4"); + Builder.CreateCondBr(Cmp, OrigForBB, ForCondBB); + + // Update cloned for.body's terminator + if (BranchInst *Br = dyn_cast(ForBodyCloneBB->getTerminator())) { + if (Br->isConditional() && Br->getSuccessor(1) == OrigForBB) { + Br->setSuccessor(1, ForCondBB); + } + } + + // Update ForCondBB's terminator + if (BranchInst *Br = dyn_cast(ForCondBB->getTerminator())) { + if (Br->isConditional() && Br->getSuccessor(0) == OrigForBB) { + Br->setSuccessor(0, ForBodyCloneBB); + } + } + + // Update PHI nodes + for (PHINode &Phi : ForCondBB->phis()) { + if (Phi.getIncomingBlock(0) == &EntryBB) { + Phi.setIncomingBlock(0, IfEndBB); + } + for (unsigned i = 0; i < Phi.getNumIncomingValues(); ++i) { + if (Phi.getIncomingBlock(i) == OrigForBB) { + Phi.setIncomingBlock(i, ForBodyCloneBB); + } + } + } + + // Update return instruction + if (ReturnInst *Ret = dyn_cast(ReturnBB->getTerminator())) { + if (PHINode *RetPhi = dyn_cast(Ret->getReturnValue())) { + RetPhi->addIncoming(ConstantInt::get(RetPhi->getType(), 0), + ForBodyCloneBB); + } else { + PHINode *NewRetPhi = + PHINode::Create(Ret->getReturnValue()->getType(), 3, "retval.0", Ret); + NewRetPhi->addIncoming(ConstantInt::get(Ret->getReturnValue()->getType(), + ESP_ERR_DSP_PARAM_OUTOFRANGE), + &EntryBB); + NewRetPhi->addIncoming( + ConstantInt::get(Ret->getReturnValue()->getType(), 0), ForCondBB); + NewRetPhi->addIncoming(ConstantInt::get(NewRetPhi->getType(), 0), + ForBodyCloneBB); + Ret->setOperand(0, NewRetPhi); + } + } + + // Update OrigForBB's PHI nodes + for (PHINode &Phi : OrigForBB->phis()) { + if (Phi.getNumIncomingValues() > 1) { + Phi.setIncomingBlock(1, IfEndBB); + } + } + + // Reorder basic blocks + ForBodyCloneBB->moveBefore(ReturnBB); + IfEndBB->moveBefore(ForCondBB); + + LLVM_DEBUG(F->dump()); +} + +// Check if the function is addc-like +static bool isAddcLike(Function *F) { + + if (F->size() != 4) + return false; + + BasicBlock *Entry = getBasicBlockByName(*F, "entry"); + BasicBlock *ForCondPreheader = getBasicBlockByName(*F, "for.cond.preheader"); + BasicBlock *ForBody = getBasicBlockByName(*F, "for.body"); + BasicBlock *Return = getBasicBlockByName(*F, "return"); + + if (!ForBody || !ForCondPreheader || !Entry || !Return) + return false; + + // Check successors + if (Entry->getTerminator()->getSuccessor(0) != Return || + Entry->getTerminator()->getSuccessor(1) != ForCondPreheader) + return false; + + if (ForCondPreheader->getTerminator()->getSuccessor(0) != ForBody || + ForCondPreheader->getTerminator()->getSuccessor(1) != Return) + return false; + + if (ForBody->getTerminator()->getSuccessor(0) != Return || + ForBody->getTerminator()->getSuccessor(1) != ForBody) + return false; + + return true; +} + +// Check if the function is dotprod-like +static bool isDotProdLike(Function *F) { + + if (F->size() != 3) + return false; + + BasicBlock *Entry = getBasicBlockByName(*F, "entry"); + BasicBlock *ForCondCleanup = getBasicBlockByName(*F, "for.cond.cleanup"); + BasicBlock *ForBody = getBasicBlockByName(*F, "for.body"); + + if (!ForBody || !ForCondCleanup || !Entry) + return false; + + // Check successors + if (Entry->getTerminator()->getNumSuccessors() != 2 || + Entry->getTerminator()->getSuccessor(0) != ForBody || + Entry->getTerminator()->getSuccessor(1) != ForCondCleanup) + return false; + + if (ForBody->getTerminator()->getNumSuccessors() != 2 || + ForBody->getTerminator()->getSuccessor(0) != ForCondCleanup || + ForBody->getTerminator()->getSuccessor(1) != ForBody) + return false; + + // Check for float PHI node in ForBody + for (PHINode &Phi : ForBody->phis()) { + if (Phi.getType()->isFloatTy()) + return true; + } + + return false; +} + +// Check if the function should use insertAddcIfElse +static bool shouldInsertAddcIfElse(Function *F) { + if (F->empty() || F->arg_empty() || !isAddcLike(F)) + return false; + + BasicBlock &EntryBB = F->getEntryBlock(); + if (!isa(EntryBB.getTerminator()) || + !cast(EntryBB.getTerminator())->isConditional()) { + return false; + } + + BasicBlock *ReturnBB = nullptr; + for (BasicBlock &BB : *F) { + if (isa(BB.getTerminator())) { + ReturnBB = &BB; + break; + } + } + if (!ReturnBB || !isa(ReturnBB->front())) { + return false; + } + + return true; +} + +// Check if the function is biquad +static bool isBiquad(Function *F) { + + if (F->size() != 4) + return false; + + BasicBlock *ForBody = getBasicBlockByName(*F, "for.body"); + BasicBlock *ForBodyLrPh = getBasicBlockByName(*F, "for.body.lr.ph"); + BasicBlock *ForCondCleanup = getBasicBlockByName(*F, "for.cond.cleanup"); + BasicBlock *Entry = getBasicBlockByName(*F, "entry"); + + if (!ForBody || !ForBodyLrPh || !ForCondCleanup || !Entry) + return false; + + // Check successors + if (Entry->getTerminator()->getNumSuccessors() != 2 || + Entry->getTerminator()->getSuccessor(0) != ForBodyLrPh || + Entry->getTerminator()->getSuccessor(1) != ForCondCleanup) + return false; + + if (ForBodyLrPh->getTerminator()->getNumSuccessors() != 1 || + ForBodyLrPh->getTerminator()->getSuccessor(0) != ForBody) + return false; + + if (ForBody->getTerminator()->getNumSuccessors() != 2 || + ForBody->getTerminator()->getSuccessor(0) != ForCondCleanup || + ForBody->getTerminator()->getSuccessor(1) != ForBody) + return false; + + // Check PHI nodes in ForBody + int floatPhiCount = 0; + int i32PhiCount = 0; + for (PHINode &Phi : ForBody->phis()) { + if (Phi.getType()->isFloatTy()) + floatPhiCount++; + else if (Phi.getType()->isIntegerTy(32)) + i32PhiCount++; + } + + return (floatPhiCount == 2 && i32PhiCount == 1); +} + +// Check if the function is FIR +static bool isFIR(Function *F) { + + if (F->size() != 10) + return false; + + BasicBlock *ForBody = getBasicBlockByName(*F, "for.body"); + BasicBlock *ForBodyLrPh = getBasicBlockByName(*F, "for.body.lr.ph"); + BasicBlock *ForCondCleanup = getBasicBlockByName(*F, "for.cond.cleanup"); + BasicBlock *Entry = getBasicBlockByName(*F, "entry"); + + if (!ForBody || !ForBodyLrPh || !ForCondCleanup || !Entry) + return false; + + // Check successors + if (Entry->getTerminator()->getNumSuccessors() != 2 || + Entry->getTerminator()->getSuccessor(0) != ForBodyLrPh || + Entry->getTerminator()->getSuccessor(1) != ForCondCleanup) + return false; + + if (ForBodyLrPh->getTerminator()->getNumSuccessors() != 1 || + ForBodyLrPh->getTerminator()->getSuccessor(0) != ForBody) + return false; + + return true; +} + +// Check if the function should use insertIfElseWithClonedBB +static bool shouldInsertIfElseWithClonedBB(Function *F) { + if (F->empty() || F->arg_empty() || (!isBiquad(F) && !isFIR(F))) + return false; + + BasicBlock &EntryBB = F->getEntryBlock(); + BranchInst *EntryBr = dyn_cast(EntryBB.getTerminator()); + if (!EntryBr || !EntryBr->isConditional()) + return false; + + // Check for icmp sgt i32 %len, 0 + if (EntryBB.empty()) + return false; + + ICmpInst *ICmp = dyn_cast(&EntryBB.front()); + if (!ICmp || ICmp->getPredicate() != ICmpInst::ICMP_SGT) + return false; + + Value *Op0 = ICmp->getOperand(0); + Value *Op1 = ICmp->getOperand(1); + if (!isa(Op0) || !isa(Op1)) + return false; + + return cast(Op1)->isZero(); + + return false; +} + +// Check if the function should use insertIfElse for dotprod +static bool shouldDotprodInsertIfElse(Function *F) { + if (F->empty() || F->arg_size() < 3 || !isDotProdLike(F)) + return false; + + if (F->size() != 3 && F->size() != 4) + return false; + + bool hasEntry = getBasicBlockByName(*F, "entry") != nullptr; + bool hasForBody = getBasicBlockByName(*F, "for.body") != nullptr; + bool hasForCondPreheader = + getBasicBlockByName(*F, "for.cond.cleanup") != nullptr; + + if (!hasForBody || !hasForCondPreheader || !hasEntry) + return false; + + // Check for fmuladd instruction + bool hasFMulAdd = false; + for (BasicBlock &BB : *F) { + for (Instruction &I : BB) { + if (RecurrenceDescriptor::isFMulAddIntrinsic(&I)) { + hasFMulAdd = true; + // Check operands of fmuladd + if (I.getOperand(0)->getType()->isFloatTy() && + I.getOperand(1)->getType()->isFloatTy()) { + if (!isa(I.getOperand(0)) || + !isa(I.getOperand(1))) { + return false; + } + } + break; + } + } + if (hasFMulAdd) + break; + } + if (!hasFMulAdd) + return false; + + // Check for icmp sgt i32 %len, 0 + BasicBlock &EntryBB = F->getEntryBlock(); + Instruction &FirstInst = EntryBB.front(); + + ICmpInst *ICmp = dyn_cast(&FirstInst); + if (!ICmp) + return false; + + if (ICmp->getPredicate() != ICmpInst::ICMP_SGT) + return false; + + ConstantInt *CI = dyn_cast(ICmp->getOperand(1)); + if (!CI) + return false; + + return CI->isZero(); +} + +// Clone the loop +static Loop *CloneLoop(Loop *L, Function *F, LoopInfo &LI) { + ValueToValueMapTy VMap; + SmallVector NewBlocks; + Loop *NewLoop = LI.AllocateLoop(); + + // Clone LoopPreHeader if it exists + BasicBlock *PhBB = L->getLoopPreheader(); + BasicBlock *ClonedPhBB = nullptr; + if (PhBB) { + ClonedPhBB = CloneBasicBlock(PhBB, VMap, ".clone", F); + VMap[PhBB] = ClonedPhBB; + NewBlocks.push_back(ClonedPhBB); + } + + // Clone all blocks in the loop + for (BasicBlock *BB : L->blocks()) { + BasicBlock *NewBB = CloneBasicBlock(BB, VMap, ".clone", F); + VMap[BB] = NewBB; + NewBlocks.push_back(NewBB); + } + + // Add new blocks to the new loop and update LoopInfo + for (BasicBlock *BB : NewBlocks) { + if (LI.getLoopFor(BB->getUniquePredecessor()) == L) { + NewLoop->addBasicBlockToLoop(BB, LI); + } + } + + // Remap instructions and PHI nodes in the new loop + remapInstructionsInBlocks(NewBlocks, VMap); + + // Add the new loop to the top level + LI.addTopLevelLoop(NewLoop); + + // Apply the appropriate transformation based on the function type + if (PhBB && currentSplitType == SplitType::BIQUAD_FIR) { + LLVM_DEBUG(errs() << "Applying RISCVSplitLoopByLength for FIR/Biquad\n"); + insertIfElseWithClonedBB(F, ClonedPhBB, NewBlocks); + } else if (currentSplitType == SplitType::ADDC) { + LLVM_DEBUG(errs() << "Applying RISCVSplitLoopByLength for Addc-like\n"); + insertAddcIfElse(F, NewBlocks); + } else if (currentSplitType == SplitType::DOTPROD) { + LLVM_DEBUG(errs() << "Applying RISCVSplitLoopByLength for Dotprod\n"); + insertIfElse(F, NewBlocks); + } else if (currentSplitType == SplitType::UNKNOWN) { + LLVM_DEBUG(errs() << "Skipping RISCVSplitLoopByLength\n"); + } else { + llvm_unreachable("Unknown function type"); + } + + LLVM_DEBUG(F->dump()); + verifyFunction(*F, &llvm::errs()); + return NewLoop; +} + +// Main function to run the RISCVSplitLoopByLength pass +PreservedAnalyses +llvm::RISCVSplitLoopByLengthPass::run(Function &F, + FunctionAnalysisManager &AM) { + if (!EnableRISCVSplitLoopByLength) + return PreservedAnalyses::all(); + + if (verifyFunction(F, &llvm::errs())) { + LLVM_DEBUG(errs() << "Function verification failed!\n"); + } + + // Skip if the function already has a for.body.clone basic block + if (getBasicBlockByName(F, "for.body.clone")) + return PreservedAnalyses::all(); + + auto &LI = AM.getResult(F); + auto &DT = AM.getResult(F); + + if (shouldDotprodInsertIfElse(&F)) { + currentSplitType = SplitType::DOTPROD; + } else if (shouldInsertAddcIfElse(&F)) { + currentSplitType = SplitType::ADDC; + } else if (shouldInsertIfElseWithClonedBB(&F)) { + currentSplitType = SplitType::BIQUAD_FIR; + } else { + currentSplitType = SplitType::UNKNOWN; + return PreservedAnalyses::all(); + } + // Clone the first top-level loop + for (Loop *L : LI) { + if (L->getLoopDepth() == 1) { + CloneLoop(L, &F, LI); + break; + } + } + + LLVM_DEBUG(F.dump()); + + PreservedAnalyses PA; + PA.preserve(); + PA.preserve(); + PA.preserveSet(); + return PA; +} \ No newline at end of file diff --git a/llvm/lib/Target/RISCV/RISCVSplitLoopByLength.h b/llvm/lib/Target/RISCV/RISCVSplitLoopByLength.h new file mode 100644 index 0000000000000..cd23a9109fe56 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVSplitLoopByLength.h @@ -0,0 +1,39 @@ +//===- RISCVSplitLoopByLength.h - Function Entry/Exit Instrumentation ------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// RISCVSplitLoopByLength pass - Split loops into two parts: +// 1. Loops with length greater than 2 +// 2. Loops for all other cases +// This pass aims to optimize loops of specific lengths to improve performance +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_RISCVSPLITLOOPBYLENGTH_H +#define LLVM_TRANSFORMS_UTILS_RISCVSPLITLOOPBYLENGTH_H + +#include "llvm/Analysis/IVDescriptors.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/PassManager.h" + +namespace llvm { +class RecurrenceDescriptor; +extern cl::opt EnableRISCVSplitLoopByLength; +class Function; + +struct RISCVSplitLoopByLengthPass : public PassInfoMixin { + RISCVSplitLoopByLengthPass() {} + + PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); + + static bool isRequired() { return true; } +}; + +} // namespace llvm + +#endif // LLVM_TRANSFORMS_UTILS_RISCVSPLITLOOPBYLENGTH_H diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp index 21fbf47875e68..369f40c35028d 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -16,6 +16,7 @@ #include "RISCVMachineFunctionInfo.h" #include "RISCVTargetObjectFile.h" #include "RISCVTargetTransformInfo.h" +#include "RISCVSplitLoopByLength.h" #include "TargetInfo/RISCVTargetInfo.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/TargetTransformInfo.h" @@ -36,6 +37,7 @@ #include "llvm/Passes/PassBuilder.h" #include "llvm/Support/FormattedStream.h" #include "llvm/Target/TargetOptions.h" +#include "llvm/Passes/PassBuilder.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h" @@ -103,6 +105,10 @@ static cl::opt EnableVSETVLIAfterRVVRegAlloc( cl::desc("Insert vsetvls after vector register allocation"), cl::init(true)); +static cl::opt + EnableEsp32P4Optimize("enable-esp32-p4-optimize", cl::init(false), + cl::Hidden, cl::desc("enable esp32 p4 optimize")); + extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() { RegisterTargetMachine X(getTheRISCV32Target()); RegisterTargetMachine Y(getTheRISCV64Target()); @@ -581,6 +587,26 @@ void RISCVTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { OptimizationLevel Level) { LPM.addPass(LoopIdiomVectorizePass(LoopIdiomVectorizeStyle::Predicated)); }); + + PB.registerPipelineParsingCallback( + [](StringRef Name, FunctionPassManager &FPM, + ArrayRef) { + if (Name == "riscv-split-loop-by-length") { + FPM.addPass(RISCVSplitLoopByLengthPass()); + return true; + } + return false; + }); + + PB.registerOptimizerLastEPCallback( + [](ModulePassManager &PM, OptimizationLevel Level) { + if(EnableEsp32P4Optimize && (Level == OptimizationLevel::O3 || Level == OptimizationLevel::O2)){ + EnableRISCVSplitLoopByLength = true; + FunctionPassManager FPM; + FPM.addPass(RISCVSplitLoopByLengthPass()); + PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); + } + }); } yaml::MachineFunctionInfo * diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/add.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/add.ll index 0a7005b098c41..a81ead537e181 100644 --- a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/add.ll +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/add.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=true < %s | FileCheck %s ; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) define dso_local noundef i32 @dsps_add_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_add_f32_ansi( @@ -10,12 +10,15 @@ define dso_local noundef i32 @dsps_add_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] ; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[OUTPUT]], null ; CHECK-NEXT: [[OR_COND19:%.*]] = or i1 [[OR_COND]], [[CMP4]] -; CHECK-NEXT: br i1 [[OR_COND19]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[OR_COND19]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP41:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP720:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP720]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK-NEXT: br i1 [[CMP720]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] ; CHECK: for.body: -; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_021]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] ; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 @@ -29,8 +32,23 @@ define dso_local noundef i32 @dsps_add_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_021]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_021_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL8_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8_CLONE]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 +; CHECK-NEXT: [[ADD_CLONE:%.*]] = fadd float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[MUL10_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX11_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10_CLONE]] +; CHECK-NEXT: store float [[ADD_CLONE]], ptr [[ARRAYIDX11_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_021_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/addc.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/addc.ll index 135522dc5a0a5..423027b40cc2c 100644 --- a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/addc.ll +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/addc.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=true < %s | FileCheck %s ; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) define dso_local noundef i32 @dsps_addc_f32_ansi(ptr noundef readonly %input, ptr noundef writeonly %output, i32 noundef %len, float noundef %C, i32 noundef %step_in, i32 noundef %step_out) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_addc_f32_ansi( @@ -8,12 +8,15 @@ define dso_local noundef i32 @dsps_addc_f32_ansi(ptr noundef readonly %input, pt ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null ; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] -; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP412:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP412]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK-NEXT: br i1 [[CMP412]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] ; CHECK: for.body: -; CHECK-NEXT: [[I_013:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[I_013:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_013]], [[STEP_IN]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL]] ; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 @@ -24,8 +27,20 @@ define dso_local noundef i32 @dsps_addc_f32_ansi(ptr noundef readonly %input, pt ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_013]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_013_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_013_CLONE]], [[STEP_IN]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[ADD_CLONE:%.*]] = fadd float [[TMP1]], [[C]] +; CHECK-NEXT: [[MUL5_CLONE:%.*]] = mul nsw i32 [[I_013_CLONE]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX6_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL5_CLONE]] +; CHECK-NEXT: store float [[ADD_CLONE]], ptr [[ARRAYIDX6_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_013_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/biquad.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/biquad.ll index 171f7b3a475a6..54caa0d9fe5ed 100644 --- a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/biquad.ll +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/biquad.ll @@ -1,12 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=true < %s | FileCheck %s ; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) define dso_local noundef i32 @dsps_biquad_f32_ansi(ptr nocapture noundef readonly %input, ptr nocapture noundef writeonly %output, i32 noundef %len, ptr nocapture noundef readonly %coef, ptr nocapture noundef %w) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_biquad_f32_ansi( ; CHECK-SAME: ptr nocapture noundef readonly [[INPUT:%.*]], ptr nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], ptr nocapture noundef readonly [[COEF:%.*]], ptr nocapture noundef [[W:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_PREHEADER:%.*]], label [[FOR_BODY_LR_PH_CLONE:%.*]] +; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP30:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP30]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK-NEXT: br i1 [[CMP30]], label [[FOR_BODY_LR_PH:%.*]], label [[IF_END:%.*]] ; CHECK: for.body.lr.ph: ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 3 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 4 @@ -16,34 +19,69 @@ define dso_local noundef i32 @dsps_biquad_f32_ansi(ptr nocapture noundef readonl ; CHECK-NEXT: [[DOTPRE:%.*]] = load float, ptr [[W]], align 4 ; CHECK-NEXT: [[DOTPRE32:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.cond.cleanup: +; CHECK: if.end: ; CHECK-NEXT: ret i32 0 ; CHECK: for.body: -; CHECK-NEXT: [[TMP0:%.*]] = phi float [ [[DOTPRE32]], [[FOR_BODY_LR_PH]] ], [ [[TMP12:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = phi float [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi float [ [[DOTPRE32]], [[FOR_BODY_LR_PH]] ], [ [[TMP13:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = phi float [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[TMP7:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[I_031:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_031]] -; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 -; CHECK-NEXT: [[NEG:%.*]] = fneg float [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = tail call float @llvm.fmuladd.f32(float [[NEG]], float [[TMP1]], float [[TMP2]]) -; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 -; CHECK-NEXT: [[NEG5:%.*]] = fneg float [[TMP5]] -; CHECK-NEXT: [[TMP6]] = tail call float @llvm.fmuladd.f32(float [[NEG5]], float [[TMP0]], float [[TMP4]]) -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[COEF]], align 4 -; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 -; CHECK-NEXT: [[MUL9:%.*]] = fmul float [[TMP1]], [[TMP8]] -; CHECK-NEXT: [[TMP9:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP7]], float [[TMP6]], float [[MUL9]]) -; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 -; CHECK-NEXT: [[TMP11:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP10]], float [[TMP0]], float [[TMP9]]) +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[NEG:%.*]] = fneg float [[TMP4]] +; CHECK-NEXT: [[TMP5:%.*]] = tail call float @llvm.fmuladd.f32(float [[NEG]], float [[TMP2]], float [[TMP3]]) +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: [[NEG5:%.*]] = fneg float [[TMP6]] +; CHECK-NEXT: [[TMP7]] = tail call float @llvm.fmuladd.f32(float [[NEG5]], float [[TMP1]], float [[TMP5]]) +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[COEF]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +; CHECK-NEXT: [[MUL9:%.*]] = fmul float [[TMP2]], [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP7]], float [[MUL9]]) +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP1]], float [[TMP10]]) ; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_031]] -; CHECK-NEXT: store float [[TMP11]], ptr [[ARRAYIDX12]], align 4 -; CHECK-NEXT: [[TMP12]] = load float, ptr [[W]], align 4 -; CHECK-NEXT: store float [[TMP12]], ptr [[ARRAYIDX4]], align 4 -; CHECK-NEXT: store float [[TMP6]], ptr [[W]], align 4 +; CHECK-NEXT: store float [[TMP12]], ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: [[TMP13]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: store float [[TMP13]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: store float [[TMP7]], ptr [[W]], align 4 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_031]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK: for.body.lr.ph.clone: +; CHECK-NEXT: [[ARRAYIDX1_CLONE:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 3 +; CHECK-NEXT: [[ARRAYIDX3_CLONE:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 4 +; CHECK-NEXT: [[ARRAYIDX4_CLONE:%.*]] = getelementptr inbounds float, ptr [[W]], i32 1 +; CHECK-NEXT: [[ARRAYIDX7_CLONE:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 1 +; CHECK-NEXT: [[ARRAYIDX10_CLONE:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 2 +; CHECK-NEXT: [[DOTPRE_CLONE:%.*]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: [[DOTPRE32_CLONE:%.*]] = load float, ptr [[ARRAYIDX4_CLONE]], align 4 +; CHECK-NEXT: br label [[FOR_BODY_CLONE:%.*]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[TMP14:%.*]] = phi float [ [[DOTPRE32_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[TMP26:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[TMP15:%.*]] = phi float [ [[DOTPRE_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[TMP20:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[I_031_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH_CLONE]] ], [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_031_CLONE]] +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX1_CLONE]], align 4 +; CHECK-NEXT: [[NEG_CLONE:%.*]] = fneg float [[TMP17]] +; CHECK-NEXT: [[TMP18:%.*]] = tail call float @llvm.fmuladd.f32(float [[NEG_CLONE]], float [[TMP15]], float [[TMP16]]) +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3_CLONE]], align 4 +; CHECK-NEXT: [[NEG5_CLONE:%.*]] = fneg float [[TMP19]] +; CHECK-NEXT: [[TMP20]] = tail call float @llvm.fmuladd.f32(float [[NEG5_CLONE]], float [[TMP14]], float [[TMP18]]) +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[COEF]], align 4 +; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7_CLONE]], align 4 +; CHECK-NEXT: [[MUL9_CLONE:%.*]] = fmul float [[TMP15]], [[TMP22]] +; CHECK-NEXT: [[TMP23:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP20]], float [[MUL9_CLONE]]) +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX10_CLONE]], align 4 +; CHECK-NEXT: [[TMP25:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP24]], float [[TMP14]], float [[TMP23]]) +; CHECK-NEXT: [[ARRAYIDX12_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_031_CLONE]] +; CHECK-NEXT: store float [[TMP25]], ptr [[ARRAYIDX12_CLONE]], align 4 +; CHECK-NEXT: [[TMP26]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: store float [[TMP26]], ptr [[ARRAYIDX4_CLONE]], align 4 +; CHECK-NEXT: store float [[TMP20]], ptr [[W]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_031_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] ; entry: %cmp30 = icmp sgt i32 %len, 0 diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprod.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprod.ll index a0211470be14e..5a25fd4b234f7 100644 --- a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprod.ll +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprod.ll @@ -1,27 +1,41 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=true < %s | FileCheck %s ; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) define dso_local noundef i32 @dsps_dotprod_f32_ansi(ptr nocapture noundef readonly %src1, ptr nocapture noundef readonly %src2, ptr nocapture noundef writeonly %dest, i32 noundef %len) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_dotprod_f32_ansi( ; CHECK-SAME: ptr nocapture noundef readonly [[SRC1:%.*]], ptr nocapture noundef readonly [[SRC2:%.*]], ptr nocapture noundef writeonly [[DEST:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]] -; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY_CLONE:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_PREHEADER]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ], [ [[TMP6:%.*]], [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: store float [[ACC_0_LCSSA]], ptr [[DEST]], align 4 ; CHECK-NEXT: ret i32 0 ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ] -; CHECK-NEXT: [[ACC_07:%.*]] = phi float [ [[TMP2]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[ACC_07:%.*]] = phi float [ [[TMP3]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[I_08]] -; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[I_08]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 -; CHECK-NEXT: [[TMP2]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float [[ACC_07]]) +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[TMP3]] = tail call float @llvm.fmuladd.f32(float [[TMP1]], float [[TMP2]], float [[ACC_07]]) ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_08_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ACC_07_CLONE:%.*]] = phi float [ [[TMP6]], [[FOR_BODY_CLONE]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[I_08_CLONE]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX1_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[I_08_CLONE]] +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX1_CLONE]], align 4 +; CHECK-NEXT: [[TMP6]] = tail call float @llvm.fmuladd.f32(float [[TMP4]], float [[TMP5]], float [[ACC_07_CLONE]]) +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_08_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] ; entry: %cmp6 = icmp sgt i32 %len, 0 diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprode.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprode.ll index 38cce6ffa3e03..42f081e17c55c 100644 --- a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprode.ll +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/dotprode.ll @@ -1,29 +1,45 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=true < %s | FileCheck %s ; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) define dso_local noundef i32 @dsps_dotprode_f32_ansi(ptr nocapture noundef readonly %src1, ptr nocapture noundef readonly %src2, ptr nocapture noundef writeonly %dest, i32 noundef %len, i32 noundef %step1, i32 noundef %step2) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_dotprode_f32_ansi( ; CHECK-SAME: ptr nocapture noundef readonly [[SRC1:%.*]], ptr nocapture noundef readonly [[SRC2:%.*]], ptr nocapture noundef writeonly [[DEST:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]] -; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_CLONE:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_PREHEADER]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ], [ [[TMP6:%.*]], [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: store float [[ACC_0_LCSSA]], ptr [[DEST]], align 4 ; CHECK-NEXT: ret i32 0 ; CHECK: for.body: -; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ] -; CHECK-NEXT: [[ACC_09:%.*]] = phi float [ [[TMP2]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[ACC_09:%.*]] = phi float [ [[TMP3]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_010]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[MUL]] -; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[MUL1:%.*]] = mul nsw i32 [[I_010]], [[STEP2]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[MUL1]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[TMP2]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float [[ACC_09]]) +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[TMP3]] = tail call float @llvm.fmuladd.f32(float [[TMP1]], float [[TMP2]], float [[ACC_09]]) ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_010]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_010_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ACC_09_CLONE:%.*]] = phi float [ [[TMP6]], [[FOR_BODY_CLONE]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_010_CLONE]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL1_CLONE:%.*]] = mul nsw i32 [[I_010_CLONE]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX2_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[MUL1_CLONE]] +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX2_CLONE]], align 4 +; CHECK-NEXT: [[TMP6]] = tail call float @llvm.fmuladd.f32(float [[TMP4]], float [[TMP5]], float [[ACC_09_CLONE]]) +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_010_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] ; entry: %cmp8 = icmp sgt i32 %len, 0 diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/fir.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/fir.ll index 3e188c7a4a973..fba750345e84a 100644 --- a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/fir.ll +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/fir.ll @@ -1,81 +1,150 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=true < %s | FileCheck %s %struct.fir_f32_s = type { ptr, ptr, i32, i32, i32, i16 } ; Function Attrs: nofree norecurse nosync nounwind memory(readwrite, inaccessiblemem: none) define dso_local noundef i32 @dsps_fir_f32_ansi(ptr nocapture noundef %fir, ptr nocapture noundef readonly %input, ptr nocapture noundef writeonly %output, i32 noundef %len) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_fir_f32_ansi( ; CHECK-SAME: ptr nocapture noundef [[FIR:%.*]], ptr nocapture noundef readonly [[INPUT:%.*]], ptr nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_PREHEADER:%.*]], label [[FOR_BODY_LR_PH_CLONE:%.*]] +; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP67:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP67]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK-NEXT: br i1 [[CMP67]], label [[FOR_BODY_LR_PH:%.*]], label [[IF_END:%.*]] ; CHECK: for.body.lr.ph: ; CHECK-NEXT: [[DELAY:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S:%.*]], ptr [[FIR]], i32 0, i32 1 -; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DELAY]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DELAY]], align 4 ; CHECK-NEXT: [[POS:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 3 ; CHECK-NEXT: [[N:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 2 -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[N]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4 ; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[POS]], align 4 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.cond.cleanup: +; CHECK: if.end: ; CHECK-NEXT: ret i32 0 ; CHECK: for.body: -; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[SPEC_STORE_SELECT:%.*]], [[FOR_COND_CLEANUP21:%.*]] ] +; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[SPEC_STORE_SELECT:%.*]], [[FOR_COND_CLEANUP21:%.*]] ] ; CHECK-NEXT: [[I_068:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC33:%.*]], [[FOR_COND_CLEANUP21]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_068]] -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i32 [[TMP2]] -; CHECK-NEXT: store float [[TMP3]], ptr [[ARRAYIDX1]], align 4 -; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 -; CHECK-NEXT: [[CMP4_NOT:%.*]] = icmp slt i32 [[INC]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[TMP3]] +; CHECK-NEXT: store float [[TMP4]], ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP3]], 1 +; CHECK-NEXT: [[CMP4_NOT:%.*]] = icmp slt i32 [[INC]], [[TMP2]] ; CHECK-NEXT: [[SPEC_STORE_SELECT]] = select i1 [[CMP4_NOT]], i32 [[INC]], i32 0 ; CHECK-NEXT: store i32 [[SPEC_STORE_SELECT]], ptr [[POS]], align 4 -; CHECK-NEXT: [[CMP957:%.*]] = icmp slt i32 [[SPEC_STORE_SELECT]], [[TMP1]] +; CHECK-NEXT: [[CMP957:%.*]] = icmp slt i32 [[SPEC_STORE_SELECT]], [[TMP2]] ; CHECK-NEXT: br i1 [[CMP957]], label [[FOR_BODY11_LR_PH:%.*]], label [[FOR_COND18_PREHEADER:%.*]] ; CHECK: for.body11.lr.ph: -; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[FIR]], align 4 -; CHECK-NEXT: [[TMP5:%.*]] = sub i32 [[TMP1]], [[SPEC_STORE_SELECT]] +; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP2]], [[SPEC_STORE_SELECT]] ; CHECK-NEXT: br label [[FOR_BODY11:%.*]] ; CHECK: for.cond18.preheader: -; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP9:%.*]], [[FOR_BODY11]] ] -; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[TMP5]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP10:%.*]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[TMP6]], [[FOR_BODY11]] ] ; CHECK-NEXT: [[CMP2062:%.*]] = icmp sgt i32 [[SPEC_STORE_SELECT]], 0 ; CHECK-NEXT: br i1 [[CMP2062]], label [[FOR_BODY22_LR_PH:%.*]], label [[FOR_COND_CLEANUP21]] ; CHECK: for.body22.lr.ph: -; CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[FIR]], align 4 ; CHECK-NEXT: br label [[FOR_BODY22:%.*]] ; CHECK: for.body11: ; CHECK-NEXT: [[N_060:%.*]] = phi i32 [ [[SPEC_STORE_SELECT]], [[FOR_BODY11_LR_PH]] ], [ [[INC16:%.*]], [[FOR_BODY11]] ] ; CHECK-NEXT: [[COEFF_POS_059:%.*]] = phi i32 [ 0, [[FOR_BODY11_LR_PH]] ], [ [[INC12:%.*]], [[FOR_BODY11]] ] -; CHECK-NEXT: [[ACC_058:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH]] ], [ [[TMP9]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[ACC_058:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH]] ], [ [[TMP10]], [[FOR_BODY11]] ] ; CHECK-NEXT: [[INC12]] = add nuw i32 [[COEFF_POS_059]], 1 -; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 [[COEFF_POS_059]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX13]], align 4 -; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i32 [[N_060]] -; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 -; CHECK-NEXT: [[TMP9]] = tail call float @llvm.fmuladd.f32(float [[TMP7]], float [[TMP8]], float [[ACC_058]]) +; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i32 [[COEFF_POS_059]] +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX13]], align 4 +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[N_060]] +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP10]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP9]], float [[ACC_058]]) ; CHECK-NEXT: [[INC16]] = add nsw i32 [[N_060]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC12]], [[TMP5]] +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC12]], [[TMP6]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND18_PREHEADER]], label [[FOR_BODY11]] ; CHECK: for.cond.cleanup21: -; CHECK-NEXT: [[ACC_1_LCSSA:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND18_PREHEADER]] ], [ [[TMP12:%.*]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[ACC_1_LCSSA:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND18_PREHEADER]] ], [ [[TMP13:%.*]], [[FOR_BODY22]] ] ; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_068]] ; CHECK-NEXT: store float [[ACC_1_LCSSA]], ptr [[ARRAYIDX31]], align 4 ; CHECK-NEXT: [[INC33]] = add nuw nsw i32 [[I_068]], 1 ; CHECK-NEXT: [[EXITCOND71_NOT:%.*]] = icmp eq i32 [[INC33]], [[LEN]] -; CHECK-NEXT: br i1 [[EXITCOND71_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND71_NOT]], label [[IF_END]], label [[FOR_BODY]] ; CHECK: for.body22: ; CHECK-NEXT: [[N17_065:%.*]] = phi i32 [ 0, [[FOR_BODY22_LR_PH]] ], [ [[INC29:%.*]], [[FOR_BODY22]] ] ; CHECK-NEXT: [[COEFF_POS_164:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA]], [[FOR_BODY22_LR_PH]] ], [ [[INC24:%.*]], [[FOR_BODY22]] ] -; CHECK-NEXT: [[ACC_163:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_BODY22_LR_PH]] ], [ [[TMP12]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[ACC_163:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_BODY22_LR_PH]] ], [ [[TMP13]], [[FOR_BODY22]] ] ; CHECK-NEXT: [[INC24]] = add nuw nsw i32 [[COEFF_POS_164]], 1 -; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 [[COEFF_POS_164]] -; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX25]], align 4 -; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i32 [[N17_065]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX27]], align 4 -; CHECK-NEXT: [[TMP12]] = tail call float @llvm.fmuladd.f32(float [[TMP10]], float [[TMP11]], float [[ACC_163]]) +; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[COEFF_POS_164]] +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX25]], align 4 +; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[N17_065]] +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: [[TMP13]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[ACC_163]]) ; CHECK-NEXT: [[INC29]] = add nuw nsw i32 [[N17_065]], 1 ; CHECK-NEXT: [[EXITCOND70_NOT:%.*]] = icmp eq i32 [[INC29]], [[SPEC_STORE_SELECT]] ; CHECK-NEXT: br i1 [[EXITCOND70_NOT]], label [[FOR_COND_CLEANUP21]], label [[FOR_BODY22]] +; CHECK: for.body.lr.ph.clone: +; CHECK-NEXT: [[DELAY_CLONE:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 1 +; CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DELAY_CLONE]], align 4 +; CHECK-NEXT: [[POS_CLONE:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 3 +; CHECK-NEXT: [[N_CLONE:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 2 +; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[N_CLONE]], align 4 +; CHECK-NEXT: [[DOTPRE_CLONE:%.*]] = load i32, ptr [[POS_CLONE]], align 4 +; CHECK-NEXT: br label [[FOR_BODY_CLONE:%.*]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[TMP16:%.*]] = phi i32 [ [[DOTPRE_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[SPEC_STORE_SELECT_CLONE:%.*]], [[FOR_COND_CLEANUP21_CLONE:%.*]] ] +; CHECK-NEXT: [[I_068_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH_CLONE]] ], [ [[INC33_CLONE:%.*]], [[FOR_COND_CLEANUP21_CLONE]] ] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_068_CLONE]] +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX1_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP16]] +; CHECK-NEXT: store float [[TMP17]], ptr [[ARRAYIDX1_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE:%.*]] = add nsw i32 [[TMP16]], 1 +; CHECK-NEXT: [[CMP4_NOT_CLONE:%.*]] = icmp slt i32 [[INC_CLONE]], [[TMP15]] +; CHECK-NEXT: [[SPEC_STORE_SELECT_CLONE]] = select i1 [[CMP4_NOT_CLONE]], i32 [[INC_CLONE]], i32 0 +; CHECK-NEXT: store i32 [[SPEC_STORE_SELECT_CLONE]], ptr [[POS_CLONE]], align 4 +; CHECK-NEXT: [[CMP957_CLONE:%.*]] = icmp slt i32 [[SPEC_STORE_SELECT_CLONE]], [[TMP15]] +; CHECK-NEXT: br i1 [[CMP957_CLONE]], label [[FOR_BODY11_LR_PH_CLONE:%.*]], label [[FOR_COND18_PREHEADER_CLONE:%.*]] +; CHECK: for.body11.lr.ph.clone: +; CHECK-NEXT: [[TMP18:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = sub i32 [[TMP15]], [[SPEC_STORE_SELECT_CLONE]] +; CHECK-NEXT: br label [[FOR_BODY11_CLONE:%.*]] +; CHECK: for.body11.clone: +; CHECK-NEXT: [[N_060_CLONE:%.*]] = phi i32 [ [[SPEC_STORE_SELECT_CLONE]], [[FOR_BODY11_LR_PH_CLONE]] ], [ [[INC16_CLONE:%.*]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[COEFF_POS_059_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY11_LR_PH_CLONE]] ], [ [[INC12_CLONE:%.*]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[ACC_058_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_CLONE]] ], [ [[TMP22:%.*]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[INC12_CLONE]] = add nuw i32 [[COEFF_POS_059_CLONE]], 1 +; CHECK-NEXT: [[ARRAYIDX13_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[COEFF_POS_059_CLONE]] +; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX13_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX15_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[N_060_CLONE]] +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX15_CLONE]], align 4 +; CHECK-NEXT: [[TMP22]] = tail call float @llvm.fmuladd.f32(float [[TMP20]], float [[TMP21]], float [[ACC_058_CLONE]]) +; CHECK-NEXT: [[INC16_CLONE]] = add nsw i32 [[N_060_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC12_CLONE]], [[TMP19]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[FOR_COND18_PREHEADER_CLONE]], label [[FOR_BODY11_CLONE]] +; CHECK: for.cond18.preheader.clone: +; CHECK-NEXT: [[ACC_0_LCSSA_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_CLONE]] ], [ [[TMP22]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY_CLONE]] ], [ [[TMP19]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[CMP2062_CLONE:%.*]] = icmp sgt i32 [[SPEC_STORE_SELECT_CLONE]], 0 +; CHECK-NEXT: br i1 [[CMP2062_CLONE]], label [[FOR_BODY22_LR_PH_CLONE:%.*]], label [[FOR_COND_CLEANUP21_CLONE]] +; CHECK: for.body22.lr.ph.clone: +; CHECK-NEXT: [[TMP23:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: br label [[FOR_BODY22_CLONE:%.*]] +; CHECK: for.body22.clone: +; CHECK-NEXT: [[N17_065_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY22_LR_PH_CLONE]] ], [ [[INC29_CLONE:%.*]], [[FOR_BODY22_CLONE]] ] +; CHECK-NEXT: [[COEFF_POS_164_CLONE:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA_CLONE]], [[FOR_BODY22_LR_PH_CLONE]] ], [ [[INC24_CLONE:%.*]], [[FOR_BODY22_CLONE]] ] +; CHECK-NEXT: [[ACC_163_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_BODY22_LR_PH_CLONE]] ], [ [[TMP26:%.*]], [[FOR_BODY22_CLONE]] ] +; CHECK-NEXT: [[INC24_CLONE]] = add nuw nsw i32 [[COEFF_POS_164_CLONE]], 1 +; CHECK-NEXT: [[ARRAYIDX25_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[COEFF_POS_164_CLONE]] +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX25_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX27_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[N17_065_CLONE]] +; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX27_CLONE]], align 4 +; CHECK-NEXT: [[TMP26]] = tail call float @llvm.fmuladd.f32(float [[TMP24]], float [[TMP25]], float [[ACC_163_CLONE]]) +; CHECK-NEXT: [[INC29_CLONE]] = add nuw nsw i32 [[N17_065_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND70_NOT_CLONE:%.*]] = icmp eq i32 [[INC29_CLONE]], [[SPEC_STORE_SELECT_CLONE]] +; CHECK-NEXT: br i1 [[EXITCOND70_NOT_CLONE]], label [[FOR_COND_CLEANUP21_CLONE]], label [[FOR_BODY22_CLONE]] +; CHECK: for.cond.cleanup21.clone: +; CHECK-NEXT: [[ACC_1_LCSSA_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_COND18_PREHEADER_CLONE]] ], [ [[TMP26]], [[FOR_BODY22_CLONE]] ] +; CHECK-NEXT: [[ARRAYIDX31_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_068_CLONE]] +; CHECK-NEXT: store float [[ACC_1_LCSSA_CLONE]], ptr [[ARRAYIDX31_CLONE]], align 4 +; CHECK-NEXT: [[INC33_CLONE]] = add nuw nsw i32 [[I_068_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND71_NOT_CLONE:%.*]] = icmp eq i32 [[INC33_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND71_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] ; entry: %cmp67 = icmp sgt i32 %len, 0 diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mul.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mul.ll index 79c7754e70151..419848ce9ce95 100644 --- a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mul.ll +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mul.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=true < %s | FileCheck %s ; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) define dso_local noundef i32 @dsps_mul_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_mul_f32_ansi( @@ -10,12 +10,15 @@ define dso_local noundef i32 @dsps_mul_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] ; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[OUTPUT]], null ; CHECK-NEXT: [[OR_COND20:%.*]] = or i1 [[OR_COND]], [[CMP4]] -; CHECK-NEXT: br i1 [[OR_COND20]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[OR_COND20]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP41:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP721:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP721]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK-NEXT: br i1 [[CMP721]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] ; CHECK: for.body: -; CHECK-NEXT: [[I_022:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[I_022:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_022]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] ; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 @@ -29,8 +32,23 @@ define dso_local noundef i32 @dsps_mul_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_022]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_022_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_022_CLONE]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL8_CLONE:%.*]] = mul nsw i32 [[I_022_CLONE]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8_CLONE]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 +; CHECK-NEXT: [[MUL10_CLONE:%.*]] = fmul float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[MUL11_CLONE:%.*]] = mul nsw i32 [[I_022_CLONE]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX12_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL11_CLONE]] +; CHECK-NEXT: store float [[MUL10_CLONE]], ptr [[ARRAYIDX12_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_022_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mulc.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mulc.ll index ff58e8447f53e..cf3409b7faaa9 100644 --- a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mulc.ll +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/mulc.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=true < %s | FileCheck %s ; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) define dso_local noundef i32 @dsps_mulc_f32_ansi(ptr noalias noundef readonly %input, ptr noalias noundef writeonly %output, i32 noundef %len, float noundef %C, i32 noundef %step_in, i32 noundef %step_out) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_mulc_f32_ansi( @@ -8,12 +8,15 @@ define dso_local noundef i32 @dsps_mulc_f32_ansi(ptr noalias noundef readonly %i ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null ; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] -; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP413:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP413]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK-NEXT: br i1 [[CMP413]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] ; CHECK: for.body: -; CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_014]], [[STEP_IN]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL]] ; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 @@ -24,8 +27,20 @@ define dso_local noundef i32 @dsps_mulc_f32_ansi(ptr noalias noundef readonly %i ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_014]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_014_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_014_CLONE]], [[STEP_IN]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL5_CLONE:%.*]] = fmul float [[TMP1]], [[C]] +; CHECK-NEXT: [[MUL6_CLONE:%.*]] = mul nsw i32 [[I_014_CLONE]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX7_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL6_CLONE]] +; CHECK-NEXT: store float [[MUL5_CLONE]], ptr [[ARRAYIDX7_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_014_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sqrt.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sqrt.ll index 414c1dfd43d23..2d76d9990bdfd 100644 --- a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sqrt.ll +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sqrt.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=true < %s | FileCheck %s ; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, ptr noundef writeonly %output, i32 noundef %len) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_sqrt_f32_ansi( @@ -8,12 +8,15 @@ define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, pt ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null ; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] -; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP411:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP411]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK-NEXT: br i1 [[CMP411]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] ; CHECK: for.body: -; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[SHR_I:%.*]] = ashr i32 [[TMP0]], 1 @@ -23,8 +26,19 @@ define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, pt ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_012]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_012_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012_CLONE]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[SHR_I_CLONE:%.*]] = ashr i32 [[TMP1]], 1 +; CHECK-NEXT: [[ADD_I_CLONE:%.*]] = add nsw i32 [[SHR_I_CLONE]], 532365312 +; CHECK-NEXT: [[ARRAYIDX5_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_012_CLONE]] +; CHECK-NEXT: store i32 [[ADD_I_CLONE]], ptr [[ARRAYIDX5_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_012_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sub.ll b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sub.ll index a473a0e83c7a2..f8d0852af686b 100644 --- a/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sub.ll +++ b/llvm/test/CodeGen/RISCV/RISCVSplitLoopByLength/sub.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-split-loop-by-length -riscv-split-loop-by-length=true < %s | FileCheck %s ; Function Attrs: nofree norecurse nosync nounwind memory(argmem: readwrite) define dso_local noundef i32 @dsps_sub_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_sub_f32_ansi( @@ -10,12 +10,15 @@ define dso_local noundef i32 @dsps_sub_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] ; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[OUTPUT]], null ; CHECK-NEXT: [[OR_COND19:%.*]] = or i1 [[OR_COND]], [[CMP4]] -; CHECK-NEXT: br i1 [[OR_COND19]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[OR_COND19]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP41:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP720:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP720]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK-NEXT: br i1 [[CMP720]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] ; CHECK: for.body: -; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_021]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] ; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 @@ -29,8 +32,23 @@ define dso_local noundef i32 @dsps_sub_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_021]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_021_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL8_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8_CLONE]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 +; CHECK-NEXT: [[SUB_CLONE:%.*]] = fsub float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[MUL10_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX11_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10_CLONE]] +; CHECK-NEXT: store float [[SUB_CLONE]], ptr [[ARRAYIDX11_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_021_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: From dedb8cc1deaff434bfeeaf4a7a5d2f8cc4c428ec Mon Sep 17 00:00:00 2001 From: "chen.qian" Date: Tue, 15 Oct 2024 10:28:44 +0800 Subject: [PATCH 266/289] [Test] add biquad test case for CustomLICM Pass --- .../CodeGen/RISCV/RISCVCustomLICM/biquad.ll | 169 ++++++++++++++++++ 1 file changed, 169 insertions(+) create mode 100644 llvm/test/CodeGen/RISCV/RISCVCustomLICM/biquad.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVCustomLICM/biquad.ll b/llvm/test/CodeGen/RISCV/RISCVCustomLICM/biquad.ll new file mode 100644 index 0000000000000..a1ae7645abf84 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVCustomLICM/biquad.ll @@ -0,0 +1,169 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-custom-licm -riscv-custom-licm=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_biquad_f32_ansi(ptr nocapture noundef readonly %input, ptr nocapture noundef writeonly %output, i32 noundef %len, ptr nocapture noundef readonly %coef, ptr nocapture noundef %w) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_biquad_f32_ansi( +; CHECK-SAME: ptr nocapture noundef readonly [[INPUT:%.*]], ptr nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], ptr nocapture noundef readonly [[COEF:%.*]], ptr nocapture noundef [[W:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_PREHEADER:%.*]], label [[FOR_BODY_LR_PH_CLONE:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP30:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP30]], label [[FOR_BODY_LR_PH:%.*]], label [[IF_END:%.*]] +; CHECK: for.body.lr.ph: +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 3 +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 4 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[W]], i32 1 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 1 +; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 2 +; CHECK-NEXT: [[DOTPRE:%.*]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: [[DOTPRE32:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: if.end: +; CHECK-NEXT: ret i32 0 +; CHECK: for.body: +; CHECK-NEXT: [[TMP1:%.*]] = phi float [ [[DOTPRE32]], [[FOR_BODY_LR_PH]] ], [ [[TMP13:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = phi float [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[TMP7:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[I_031:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_031]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[NEG:%.*]] = fneg float [[TMP4]] +; CHECK-NEXT: [[TMP5:%.*]] = tail call float @llvm.fmuladd.f32(float [[NEG]], float [[TMP2]], float [[TMP3]]) +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: [[NEG5:%.*]] = fneg float [[TMP6]] +; CHECK-NEXT: [[TMP7]] = tail call float @llvm.fmuladd.f32(float [[NEG5]], float [[TMP1]], float [[TMP5]]) +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[COEF]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +; CHECK-NEXT: [[MUL9:%.*]] = fmul float [[TMP2]], [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP7]], float [[MUL9]]) +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP1]], float [[TMP10]]) +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_031]] +; CHECK-NEXT: store float [[TMP12]], ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: [[TMP13]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: store float [[TMP13]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: store float [[TMP7]], ptr [[W]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_031]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK: for.body.lr.ph.clone: +; CHECK-NEXT: [[ARRAYIDX1_CLONE:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 3 +; CHECK-NEXT: [[ARRAYIDX3_CLONE:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 4 +; CHECK-NEXT: [[ARRAYIDX4_CLONE:%.*]] = getelementptr inbounds float, ptr [[W]], i32 1 +; CHECK-NEXT: [[ARRAYIDX7_CLONE:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 1 +; CHECK-NEXT: [[ARRAYIDX10_CLONE:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 2 +; CHECK-NEXT: [[DOTPRE_CLONE:%.*]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: [[DOTPRE32_CLONE:%.*]] = load float, ptr [[ARRAYIDX4_CLONE]], align 4 +; CHECK-NEXT: br label [[FOR_BODY_CLONE:%.*]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[TMP14:%.*]] = phi float [ [[DOTPRE32_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[TMP26:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[TMP15:%.*]] = phi float [ [[DOTPRE_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[TMP20:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[I_031_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH_CLONE]] ], [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_031_CLONE]] +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX1_CLONE]], align 4 +; CHECK-NEXT: [[NEG_CLONE:%.*]] = fneg float [[TMP17]] +; CHECK-NEXT: [[TMP18:%.*]] = tail call float @llvm.fmuladd.f32(float [[NEG_CLONE]], float [[TMP15]], float [[TMP16]]) +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3_CLONE]], align 4 +; CHECK-NEXT: [[NEG5_CLONE:%.*]] = fneg float [[TMP19]] +; CHECK-NEXT: [[TMP20]] = tail call float @llvm.fmuladd.f32(float [[NEG5_CLONE]], float [[TMP14]], float [[TMP18]]) +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[COEF]], align 4 +; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7_CLONE]], align 4 +; CHECK-NEXT: [[MUL9_CLONE:%.*]] = fmul float [[TMP15]], [[TMP22]] +; CHECK-NEXT: [[TMP23:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP20]], float [[MUL9_CLONE]]) +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX10_CLONE]], align 4 +; CHECK-NEXT: [[TMP25:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP24]], float [[TMP14]], float [[TMP23]]) +; CHECK-NEXT: [[ARRAYIDX12_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_031_CLONE]] +; CHECK-NEXT: store float [[TMP25]], ptr [[ARRAYIDX12_CLONE]], align 4 +; CHECK-NEXT: [[TMP26]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: store float [[TMP26]], ptr [[ARRAYIDX4_CLONE]], align 4 +; CHECK-NEXT: store float [[TMP20]], ptr [[W]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_031_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] +; +entry: + %0 = icmp sgt i32 %len, 2 + br i1 %0, label %for.cond.preheader, label %for.body.lr.ph.clone + +for.cond.preheader: ; preds = %entry + %cmp30 = icmp sgt i32 %len, 0 + br i1 %cmp30, label %for.body.lr.ph, label %if.end + +for.body.lr.ph: ; preds = %for.cond.preheader + %arrayidx1 = getelementptr inbounds float, ptr %coef, i32 3 + %arrayidx3 = getelementptr inbounds float, ptr %coef, i32 4 + %arrayidx4 = getelementptr inbounds float, ptr %w, i32 1 + %arrayidx7 = getelementptr inbounds float, ptr %coef, i32 1 + %arrayidx10 = getelementptr inbounds float, ptr %coef, i32 2 + %.pre = load float, ptr %w, align 4 + %.pre32 = load float, ptr %arrayidx4, align 4 + br label %for.body + +if.end: ; preds = %for.body.clone, %for.body, %for.cond.preheader + ret i32 0 + +for.body: ; preds = %for.body, %for.body.lr.ph + %1 = phi float [ %.pre32, %for.body.lr.ph ], [ %13, %for.body ] + %2 = phi float [ %.pre, %for.body.lr.ph ], [ %7, %for.body ] + %i.031 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] + %arrayidx = getelementptr inbounds float, ptr %input, i32 %i.031 + %3 = load float, ptr %arrayidx, align 4 + %4 = load float, ptr %arrayidx1, align 4 + %neg = fneg float %4 + %5 = tail call float @llvm.fmuladd.f32(float %neg, float %2, float %3) + %6 = load float, ptr %arrayidx3, align 4 + %neg5 = fneg float %6 + %7 = tail call float @llvm.fmuladd.f32(float %neg5, float %1, float %5) + %8 = load float, ptr %coef, align 4 + %9 = load float, ptr %arrayidx7, align 4 + %mul9 = fmul float %2, %9 + %10 = tail call float @llvm.fmuladd.f32(float %8, float %7, float %mul9) + %11 = load float, ptr %arrayidx10, align 4 + %12 = tail call float @llvm.fmuladd.f32(float %11, float %1, float %10) + %arrayidx12 = getelementptr inbounds float, ptr %output, i32 %i.031 + store float %12, ptr %arrayidx12, align 4 + %13 = load float, ptr %w, align 4 + store float %13, ptr %arrayidx4, align 4 + store float %7, ptr %w, align 4 + %inc = add nuw nsw i32 %i.031, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %if.end, label %for.body + +for.body.lr.ph.clone: ; preds = %entry + %arrayidx1.clone = getelementptr inbounds float, ptr %coef, i32 3 + %arrayidx3.clone = getelementptr inbounds float, ptr %coef, i32 4 + %arrayidx4.clone = getelementptr inbounds float, ptr %w, i32 1 + %arrayidx7.clone = getelementptr inbounds float, ptr %coef, i32 1 + %arrayidx10.clone = getelementptr inbounds float, ptr %coef, i32 2 + %.pre.clone = load float, ptr %w, align 4 + %.pre32.clone = load float, ptr %arrayidx4.clone, align 4 + br label %for.body.clone + +for.body.clone: ; preds = %for.body.clone, %for.body.lr.ph.clone + %14 = phi float [ %.pre32.clone, %for.body.lr.ph.clone ], [ %26, %for.body.clone ] + %15 = phi float [ %.pre.clone, %for.body.lr.ph.clone ], [ %20, %for.body.clone ] + %i.031.clone = phi i32 [ 0, %for.body.lr.ph.clone ], [ %inc.clone, %for.body.clone ] + %arrayidx.clone = getelementptr inbounds float, ptr %input, i32 %i.031.clone + %16 = load float, ptr %arrayidx.clone, align 4 + %17 = load float, ptr %arrayidx1.clone, align 4 + %neg.clone = fneg float %17 + %18 = tail call float @llvm.fmuladd.f32(float %neg.clone, float %15, float %16) + %19 = load float, ptr %arrayidx3.clone, align 4 + %neg5.clone = fneg float %19 + %20 = tail call float @llvm.fmuladd.f32(float %neg5.clone, float %14, float %18) + %21 = load float, ptr %coef, align 4 + %22 = load float, ptr %arrayidx7.clone, align 4 + %mul9.clone = fmul float %15, %22 + %23 = tail call float @llvm.fmuladd.f32(float %21, float %20, float %mul9.clone) + %24 = load float, ptr %arrayidx10.clone, align 4 + %25 = tail call float @llvm.fmuladd.f32(float %24, float %14, float %23) + %arrayidx12.clone = getelementptr inbounds float, ptr %output, i32 %i.031.clone + store float %25, ptr %arrayidx12.clone, align 4 + %26 = load float, ptr %w, align 4 + store float %26, ptr %arrayidx4.clone, align 4 + store float %20, ptr %w, align 4 + %inc.clone = add nuw nsw i32 %i.031.clone, 1 + %exitcond.not.clone = icmp eq i32 %inc.clone, %len + br i1 %exitcond.not.clone, label %if.end, label %for.body.clone +} From 916259bf8fa1dd11096615626aa076176f83711c Mon Sep 17 00:00:00 2001 From: "chen.qian" Date: Tue, 12 Nov 2024 15:01:45 +0800 Subject: [PATCH 267/289] [Pass] add CustomLICM Pass --- llvm/lib/Target/RISCV/CMakeLists.txt | 1 + llvm/lib/Target/RISCV/RISCVCustomLICM.cpp | 404 ++++++++++++++++++ llvm/lib/Target/RISCV/RISCVCustomLICM.h | 53 +++ llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 10 +- .../CodeGen/RISCV/RISCVCustomLICM/biquad.ll | 82 ++-- 5 files changed, 508 insertions(+), 42 deletions(-) create mode 100644 llvm/lib/Target/RISCV/RISCVCustomLICM.cpp create mode 100644 llvm/lib/Target/RISCV/RISCVCustomLICM.h diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt index 1bb63ffe6d43f..e3558d689a0cd 100644 --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -37,6 +37,7 @@ add_llvm_target(RISCVCodeGen RISCVFrameLowering.cpp RISCVGatherScatterLowering.cpp RISCVSplitLoopByLength.cpp + RISCVCustomLICM.cpp RISCVInsertVSETVLI.cpp RISCVInsertReadWriteCSR.cpp RISCVInsertWriteVXRM.cpp diff --git a/llvm/lib/Target/RISCV/RISCVCustomLICM.cpp b/llvm/lib/Target/RISCV/RISCVCustomLICM.cpp new file mode 100644 index 0000000000000..4839efc87cb0f --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVCustomLICM.cpp @@ -0,0 +1,404 @@ +//===- RISCVCustomLICM.cpp - Custom Loop Invariant Code Motion ------*- C++ +//-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// RISCVCustomLICM pass - Custom Loop Invariant Code Motion +// This pass aims to optimize loops by moving invariant code out of loops +// Main optimizations: +// 1. Hoisted loop-invariant loads out of the main loop: +// Loaded coef[0-4] values before the loop and stored them in local variables +// for use inside the loop +// 2. Pre-computed negations of some coefficients: +// Created %7 = fneg float %1 (negation of coef[3]) +// Created %8 = fneg float %2 (negation of coef[4]) +/* +Before optimization: +for.body: + %1 = load float, ptr %arrayidx1, align 4 + %2 = load float, ptr %arrayidx3, align 4 + %3 = load float, ptr %coef, align 4 + %4 = load float, ptr %arrayidx7, align 4 + %5 = load float, ptr %arrayidx10, align 4 + %6 = load float, ptr %w, align 4 + ... + %neg = fneg float %4 + ... + %neg5 = fneg float %6 + +After optimization: +for.body.lr.ph: + %1 = load float, ptr %arrayidx1, align 4 + %2 = load float, ptr %arrayidx3, align 4 + %3 = load float, ptr %coef, align 4 + %4 = load float, ptr %arrayidx7, align 4 + %5 = load float, ptr %arrayidx10, align 4 + %6 = load float, ptr %w, align 4 + ... + %neg = fneg float %4 + %neg5 = fneg float %6 + br label %for.body +for.body: + ... +*/ + +#include "RISCVCustomLICM.h" +#include "llvm/ADT/PriorityWorklist.h" +#include "llvm/ADT/SetOperations.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/AliasSetTracker.h" +#include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/CaptureTracking.h" +#include "llvm/Analysis/GuardUtils.h" +#include "llvm/Analysis/LazyBlockFrequencyInfo.h" +#include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/LoopIterator.h" +#include "llvm/Analysis/LoopNestAnalysis.h" +#include "llvm/Analysis/LoopPass.h" +#include "llvm/Analysis/MemorySSA.h" +#include "llvm/Analysis/MemorySSAUpdater.h" +#include "llvm/Analysis/MustExecute.h" +#include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/IR/CFG.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/PassManager.h" +#include "llvm/IR/PatternMatch.h" +#include "llvm/IR/PredIteratorCache.h" +#include "llvm/InitializePasses.h" +#include "llvm/Passes/PassBuilder.h" +#include "llvm/Passes/PassPlugin.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LICM.h" +#include "llvm/Transforms/Utils.h" +#include "llvm/Transforms/Utils/AssumeBundleBuilder.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/LoopUtils.h" +#include "llvm/Transforms/Utils/SSAUpdater.h" +#include +#include + +using namespace llvm; + +#define DEBUG_TYPE "riscv-custom-licm" + +// Command line option to enable/disable RISCVCustomLICM +cl::opt llvm::EnableRISCVCustomLICM( + "riscv-custom-licm", cl::init(false), + cl::desc("enable custom licm for specific loop")); +/* +for.body: + ... + %neg = fneg float %4 + ... + %neg5 = fneg float %6 + +=> +for.body.lr.ph: + ... + %neg = fneg float %4 + %neg5 = fneg float %6 + br label %for.body + +for.body: + ... + +// Since the input and output of fneg remain constant throughout the loop, +// extracting it to the preheader can improve performance. +// This optimization moves fneg instructions out of the loop body +// to reduce redundant computations in each iteration. + +*/ +// Function to move fneg instructions out of the loop +void RISCVCustomLICMPass::moveFnegOutOfLoop(BasicBlock *Preheader, + BasicBlock &BB, LLVMContext &Ctx) { + IRBuilder<> Builder(Preheader->getTerminator()); + SmallVector toRemove; + + for (auto &I : BB) { + if (auto *FNeg = dyn_cast(&I)) { + if (FNeg->getOpcode() == Instruction::FNeg) { + Value *Operand = FNeg->getOperand(0); + Value *NewFNeg = Builder.CreateFNeg(Operand); + FNeg->replaceAllUsesWith(NewFNeg); + toRemove.push_back(FNeg); + } + } + } + + // Remove the old fneg instructions + for (auto *I : toRemove) { + I->eraseFromParent(); + } +} + +// Helper function to get a basic block by name +static inline BasicBlock *getBasicBlockByName(Function &F, StringRef Name) { + for (BasicBlock &BB : F) + if (BB.getName() == Name) + return &BB; + return nullptr; +} + +// Function to adjust PHI nodes in the loop +void RISCVCustomLICMPass::adjustPhiNodes(BasicBlock &BB) { + SmallVector Phis; + for (auto &I : BB.phis()) { + Phis.push_back(&I); + } + + if (Phis.size() >= 2) { + PHINode *Phi1 = Phis[0]; + PHINode *Phi2 = Phis[1]; + + // Swap the positions of two PHI nodes + Phi2->moveBefore(Phi1); + + // Update the loop entry value of the second PHI node + Value *LoopValue = Phi2; + for (unsigned i = 0; i < Phi1->getNumIncomingValues(); ++i) { + if (Phi1->getIncomingBlock(i) == &BB) { + Phi1->setIncomingValue(i, LoopValue); + break; + } + } + } +} + +// Function to create a cleanup block for the loop +void RISCVCustomLICMPass::createCleanupBlock(Function &F, BasicBlock &LoopBB) { + LLVMContext &Ctx = F.getContext(); + BasicBlock *CleanupBB = + BasicBlock::Create(Ctx, "for.cond.cleanup", &F, LoopBB.getNextNode()); + IRBuilder<> Builder(CleanupBB); + + // Create a branch to if.end + BasicBlock *IfEndBB = getBasicBlockByName(F, "if.end"); + assert(IfEndBB && "if.end basic block not found"); + Builder.CreateBr(IfEndBB); + + // Update the loop's branch + Instruction *LoopTerminator = LoopBB.getTerminator(); + assert(LoopTerminator && "Loop terminator not found"); + + if (BranchInst *BI = dyn_cast(LoopTerminator)) { + if (BI->isConditional() && BI->getSuccessor(1) == &LoopBB) { + BI->setSuccessor(0, CleanupBB); + } + } +} + +// Function to move store instructions out of the loop +void RISCVCustomLICMPass::moveStoreOutOfLoop(BasicBlock &BB) { + BasicBlock *CleanupBB = BB.getNextNode(); + if (!CleanupBB || CleanupBB->getName() != "for.cond.cleanup") + return; + + IRBuilder<> Builder(CleanupBB->getFirstNonPHI()); + SmallVector toRemove; + + for (auto &I : BB) { + if (auto *Store = dyn_cast(&I)) { + Value *Val1 = Store->getOperand(1); + Value *Val0 = Store->getOperand(0); + // Check if the stored value is defined in the current basic block + if (!isa(Val1) || + cast(Val1)->getParent() != &BB) { + Value *Ptr = Store->getPointerOperand(); + Builder.CreateStore(Val0, Ptr); + toRemove.push_back(Store); + } + } + } + + // Remove the old store instructions + for (auto *I : toRemove) { + I->eraseFromParent(); + } +} + +// Check the number of basic blocks and parameters of the function +static bool checkBasicBlocksAndParams(Function &F) { + // Check number of basic blocks + if (F.size() != 7) + return false; + + // Check number of parameters + if (F.arg_size() != 5) + return false; + + return true; +} + +// Check loop nesting depth +static bool checkLoopNesting(Function &F, LoopInfo &LI) { + // Check maximum loop depth + unsigned int maxLoopDepth = 0; + for (auto &BB : F) { + maxLoopDepth = std::max(maxLoopDepth, LI.getLoopDepth(&BB)); + } + if (maxLoopDepth != 1) + return false; + + // Check outer and inner loop counts + int outerLoopCount = 0; + int innerLoopCount = 0; + for (Loop *L : LI.getLoopsInPreorder()) { + if (L->getLoopDepth() == 1) { + outerLoopCount++; + if (L->getSubLoops().size() > 0) { + innerLoopCount++; + } + } + } + + return (outerLoopCount == 2 && innerLoopCount == 0); +} + +// Check if fmuladd.f32 intrinsic is used +static bool checkFMulAddUsage(Function &F) { + for (auto &BB : F) { + for (auto &I : BB) { + if (RecurrenceDescriptor::isFMulAddIntrinsic(&I)) { + return true; + } + } + } + return false; +} + +// Check existence of basic blocks and control flow +static bool checkBasicBlocksAndControlFlow(Function &F) { + // Get all required basic blocks + BasicBlock *Entry = getBasicBlockByName(F, "entry"); + BasicBlock *ForCondPreheader = getBasicBlockByName(F, "for.cond.preheader"); + BasicBlock *ForBodyLrPh = getBasicBlockByName(F, "for.body.lr.ph"); + BasicBlock *IfEnd = getBasicBlockByName(F, "if.end"); + BasicBlock *ForBody = getBasicBlockByName(F, "for.body"); + BasicBlock *ForBodyLrPhClone = getBasicBlockByName(F, "for.body.lr.ph.clone"); + BasicBlock *ForBodyClone = getBasicBlockByName(F, "for.body.clone"); + + // Check if all basic blocks exist + if (!Entry || !ForCondPreheader || !ForBodyLrPh || !IfEnd || !ForBody || + !ForBodyLrPhClone || !ForBodyClone) + return false; + + // Check control flow + if (Entry->getTerminator()->getSuccessor(0) != ForCondPreheader || + Entry->getTerminator()->getSuccessor(1) != ForBodyLrPhClone || + ForCondPreheader->getTerminator()->getSuccessor(0) != ForBodyLrPh || + ForCondPreheader->getTerminator()->getSuccessor(1) != IfEnd || + ForBodyLrPh->getSingleSuccessor() != ForBody || + ForBody->getTerminator()->getSuccessor(0) != IfEnd || + ForBody->getTerminator()->getSuccessor(1) != ForBody || + ForBodyLrPhClone->getSingleSuccessor() != ForBodyClone || + ForBodyClone->getTerminator()->getSuccessor(0) != IfEnd || + ForBodyClone->getTerminator()->getSuccessor(1) != ForBodyClone) + return false; + + return true; +} + +// Main check function +static bool isSafeToOptimizeBiquadType(Function &F, LoopInfo &LI) { + return checkBasicBlocksAndParams(F) && checkLoopNesting(F, LI) && + checkFMulAddUsage(F) && checkBasicBlocksAndControlFlow(F); +} + +// Main function to run the CustomLICM pass +PreservedAnalyses RISCVCustomLICMPass::run(Function &F, + FunctionAnalysisManager &FAM) { + if (!EnableRISCVCustomLICM) + return PreservedAnalyses::all(); + + DominatorTree &DT = FAM.getResult(F); + LoopInfo &LI = FAM.getResult(F); + + if (!isSafeToOptimizeBiquadType(F, LI)) { + return PreservedAnalyses::all(); + } + + bool Changed = false; + + for (auto &L : LI) { + if (L->getLoopDepth() != 1 || L->getBlocks().empty()) + continue; // Only process the outermost non-empty loops + + BasicBlock *Preheader = L->getLoopPreheader(); + if (!Preheader) { + Preheader = InsertPreheaderForLoop(L, &DT, &LI, nullptr, true); + if (!Preheader) + continue; + } + + Changed |= optimizeLoop(L, Preheader, F); + } + + LLVM_DEBUG(F.dump()); + + return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all(); +} + +// Function to optimize a single loop +bool RISCVCustomLICMPass::optimizeLoop(Loop *L, BasicBlock *Preheader, + Function &F) { + SmallVector InvariantInsts; + + for (auto &BB : L->blocks()) { + if (BB->getName() != "for.body") + continue; + + for (auto &I : *BB) { + if (L->hasLoopInvariantOperands(&I) && !isMustTailCall(&I)) { + InvariantInsts.push_back(&I); + } + } + + // Move loop invariant instructions + for (auto *I : InvariantInsts) { + I->moveBefore(Preheader->getTerminator()); + } + + // Execute other optimizations + moveFnegOutOfLoop(Preheader, *BB, F.getContext()); + adjustPhiNodes(*BB); + createCleanupBlock(F, *BB); + moveStoreOutOfLoop(*BB); + } + + return !InvariantInsts.empty(); +} + +// Helper function to check if an instruction is a must-tail call +bool RISCVCustomLICMPass::isMustTailCall(Instruction *I) { + if (CallInst *CI = dyn_cast(I)) { + return CI->isMustTailCall(); + } + return false; +} diff --git a/llvm/lib/Target/RISCV/RISCVCustomLICM.h b/llvm/lib/Target/RISCV/RISCVCustomLICM.h new file mode 100644 index 0000000000000..d6ce2a4ee3c2e --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVCustomLICM.h @@ -0,0 +1,53 @@ +//===- RISCVcustomLICM.h - Function Entry/Exit Instrumentation ------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// RISCVCustomLICM pass - Custom Loop Invariant Code Motion +// This pass aims to optimize loops by moving invariant code out of loops +// +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_RISCVCUSTOMLICM_H +#define LLVM_TRANSFORMS_UTILS_RISCVCUSTOMLICM_H + +#include "llvm/Support/CommandLine.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/PassManager.h" + +namespace llvm { +extern cl::opt EnableRISCVCustomLICM; +class Function; + +struct RISCVCustomLICMPass : public PassInfoMixin { + RISCVCustomLICMPass() {} + + PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); + + static bool isRequired() { return true; } + // Implement logic to move fneg operations out of the loop + void moveFnegOutOfLoop(BasicBlock *Preheader, BasicBlock &BB, + LLVMContext &Ctx); + + // Implement logic to adjust phi nodes + void adjustPhiNodes(BasicBlock &BB); + + // Implement logic to create for.cond.cleanup basic block + void createCleanupBlock(Function &F, BasicBlock &LoopBB); + + // Implement logic to move store operations after the loop ends + void moveStoreOutOfLoop(BasicBlock &BB); + + bool optimizeLoop(Loop *L, BasicBlock *Preheader, Function &F); + + bool isMustTailCall(Instruction *I); +}; + +} // namespace llvm + +#endif // LLVM_TRANSFORMS_UTILS_RISCVCUSTOMLICM_H diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp index 369f40c35028d..f92fd1a06be38 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -13,10 +13,11 @@ #include "RISCVTargetMachine.h" #include "MCTargetDesc/RISCVBaseInfo.h" #include "RISCV.h" +#include "RISCVCustomLICM.h" #include "RISCVMachineFunctionInfo.h" +#include "RISCVSplitLoopByLength.h" #include "RISCVTargetObjectFile.h" #include "RISCVTargetTransformInfo.h" -#include "RISCVSplitLoopByLength.h" #include "TargetInfo/RISCVTargetInfo.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/TargetTransformInfo.h" @@ -37,7 +38,6 @@ #include "llvm/Passes/PassBuilder.h" #include "llvm/Support/FormattedStream.h" #include "llvm/Target/TargetOptions.h" -#include "llvm/Passes/PassBuilder.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h" @@ -595,6 +595,10 @@ void RISCVTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { FPM.addPass(RISCVSplitLoopByLengthPass()); return true; } + if (Name == "riscv-custom-licm") { + FPM.addPass(RISCVCustomLICMPass()); + return true; + } return false; }); @@ -602,8 +606,10 @@ void RISCVTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { [](ModulePassManager &PM, OptimizationLevel Level) { if(EnableEsp32P4Optimize && (Level == OptimizationLevel::O3 || Level == OptimizationLevel::O2)){ EnableRISCVSplitLoopByLength = true; + EnableRISCVCustomLICM = true; FunctionPassManager FPM; FPM.addPass(RISCVSplitLoopByLengthPass()); + FPM.addPass(RISCVCustomLICMPass()); PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); } }); diff --git a/llvm/test/CodeGen/RISCV/RISCVCustomLICM/biquad.ll b/llvm/test/CodeGen/RISCV/RISCVCustomLICM/biquad.ll index a1ae7645abf84..0be55d497e11e 100644 --- a/llvm/test/CodeGen/RISCV/RISCVCustomLICM/biquad.ll +++ b/llvm/test/CodeGen/RISCV/RISCVCustomLICM/biquad.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-custom-licm -riscv-custom-licm=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-custom-licm -riscv-custom-licm=true < %s | FileCheck %s define dso_local noundef i32 @dsps_biquad_f32_ansi(ptr nocapture noundef readonly %input, ptr nocapture noundef writeonly %output, i32 noundef %len, ptr nocapture noundef readonly %coef, ptr nocapture noundef %w) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_biquad_f32_ansi( ; CHECK-SAME: ptr nocapture noundef readonly [[INPUT:%.*]], ptr nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], ptr nocapture noundef readonly [[COEF:%.*]], ptr nocapture noundef [[W:%.*]]) local_unnamed_addr { @@ -17,35 +17,37 @@ define dso_local noundef i32 @dsps_biquad_f32_ansi(ptr nocapture noundef readonl ; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 2 ; CHECK-NEXT: [[DOTPRE:%.*]] = load float, ptr [[W]], align 4 ; CHECK-NEXT: [[DOTPRE32:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[COEF]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = fneg float [[TMP1]] +; CHECK-NEXT: [[TMP8:%.*]] = fneg float [[TMP2]] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: if.end: ; CHECK-NEXT: ret i32 0 ; CHECK: for.body: -; CHECK-NEXT: [[TMP1:%.*]] = phi float [ [[DOTPRE32]], [[FOR_BODY_LR_PH]] ], [ [[TMP13:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[TMP2:%.*]] = phi float [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[TMP7:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP9:%.*]] = phi float [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[TMP13:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = phi float [ [[DOTPRE32]], [[FOR_BODY_LR_PH]] ], [ [[TMP9]], [[FOR_BODY]] ] ; CHECK-NEXT: [[I_031:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_031]] -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 -; CHECK-NEXT: [[NEG:%.*]] = fneg float [[TMP4]] -; CHECK-NEXT: [[TMP5:%.*]] = tail call float @llvm.fmuladd.f32(float [[NEG]], float [[TMP2]], float [[TMP3]]) -; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 -; CHECK-NEXT: [[NEG5:%.*]] = fneg float [[TMP6]] -; CHECK-NEXT: [[TMP7]] = tail call float @llvm.fmuladd.f32(float [[NEG5]], float [[TMP1]], float [[TMP5]]) -; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[COEF]], align 4 -; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 -; CHECK-NEXT: [[MUL9:%.*]] = fmul float [[TMP2]], [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP7]], float [[MUL9]]) -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 -; CHECK-NEXT: [[TMP12:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP1]], float [[TMP10]]) +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP7]], float [[TMP9]], float [[TMP11]]) +; CHECK-NEXT: [[TMP13]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP10]], float [[TMP12]]) +; CHECK-NEXT: [[MUL9:%.*]] = fmul float [[TMP9]], [[TMP4]] +; CHECK-NEXT: [[TMP14:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP3]], float [[TMP13]], float [[MUL9]]) +; CHECK-NEXT: [[TMP15:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP5]], float [[TMP10]], float [[TMP14]]) ; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_031]] -; CHECK-NEXT: store float [[TMP12]], ptr [[ARRAYIDX12]], align 4 -; CHECK-NEXT: [[TMP13]] = load float, ptr [[W]], align 4 -; CHECK-NEXT: store float [[TMP13]], ptr [[ARRAYIDX4]], align 4 -; CHECK-NEXT: store float [[TMP7]], ptr [[W]], align 4 +; CHECK-NEXT: store float [[TMP15]], ptr [[ARRAYIDX12]], align 4 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_031]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: store float [[TMP6]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: store float [[TMP13]], ptr [[W]], align 4 +; CHECK-NEXT: br label [[IF_END]] ; CHECK: for.body.lr.ph.clone: ; CHECK-NEXT: [[ARRAYIDX1_CLONE:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 3 ; CHECK-NEXT: [[ARRAYIDX3_CLONE:%.*]] = getelementptr inbounds float, ptr [[COEF]], i32 4 @@ -56,28 +58,28 @@ define dso_local noundef i32 @dsps_biquad_f32_ansi(ptr nocapture noundef readonl ; CHECK-NEXT: [[DOTPRE32_CLONE:%.*]] = load float, ptr [[ARRAYIDX4_CLONE]], align 4 ; CHECK-NEXT: br label [[FOR_BODY_CLONE:%.*]] ; CHECK: for.body.clone: -; CHECK-NEXT: [[TMP14:%.*]] = phi float [ [[DOTPRE32_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[TMP26:%.*]], [[FOR_BODY_CLONE]] ] -; CHECK-NEXT: [[TMP15:%.*]] = phi float [ [[DOTPRE_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[TMP20:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[TMP16:%.*]] = phi float [ [[DOTPRE32_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[TMP28:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[TMP17:%.*]] = phi float [ [[DOTPRE_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[TMP22:%.*]], [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: [[I_031_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH_CLONE]] ], [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_031_CLONE]] -; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 -; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX1_CLONE]], align 4 -; CHECK-NEXT: [[NEG_CLONE:%.*]] = fneg float [[TMP17]] -; CHECK-NEXT: [[TMP18:%.*]] = tail call float @llvm.fmuladd.f32(float [[NEG_CLONE]], float [[TMP15]], float [[TMP16]]) -; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3_CLONE]], align 4 -; CHECK-NEXT: [[NEG5_CLONE:%.*]] = fneg float [[TMP19]] -; CHECK-NEXT: [[TMP20]] = tail call float @llvm.fmuladd.f32(float [[NEG5_CLONE]], float [[TMP14]], float [[TMP18]]) -; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[COEF]], align 4 -; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7_CLONE]], align 4 -; CHECK-NEXT: [[MUL9_CLONE:%.*]] = fmul float [[TMP15]], [[TMP22]] -; CHECK-NEXT: [[TMP23:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP20]], float [[MUL9_CLONE]]) -; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX10_CLONE]], align 4 -; CHECK-NEXT: [[TMP25:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP24]], float [[TMP14]], float [[TMP23]]) +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX1_CLONE]], align 4 +; CHECK-NEXT: [[NEG_CLONE:%.*]] = fneg float [[TMP19]] +; CHECK-NEXT: [[TMP20:%.*]] = tail call float @llvm.fmuladd.f32(float [[NEG_CLONE]], float [[TMP17]], float [[TMP18]]) +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX3_CLONE]], align 4 +; CHECK-NEXT: [[NEG5_CLONE:%.*]] = fneg float [[TMP21]] +; CHECK-NEXT: [[TMP22]] = tail call float @llvm.fmuladd.f32(float [[NEG5_CLONE]], float [[TMP16]], float [[TMP20]]) +; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[COEF]], align 4 +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX7_CLONE]], align 4 +; CHECK-NEXT: [[MUL9_CLONE:%.*]] = fmul float [[TMP17]], [[TMP24]] +; CHECK-NEXT: [[TMP25:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP23]], float [[TMP22]], float [[MUL9_CLONE]]) +; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX10_CLONE]], align 4 +; CHECK-NEXT: [[TMP27:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP26]], float [[TMP16]], float [[TMP25]]) ; CHECK-NEXT: [[ARRAYIDX12_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_031_CLONE]] -; CHECK-NEXT: store float [[TMP25]], ptr [[ARRAYIDX12_CLONE]], align 4 -; CHECK-NEXT: [[TMP26]] = load float, ptr [[W]], align 4 -; CHECK-NEXT: store float [[TMP26]], ptr [[ARRAYIDX4_CLONE]], align 4 -; CHECK-NEXT: store float [[TMP20]], ptr [[W]], align 4 +; CHECK-NEXT: store float [[TMP27]], ptr [[ARRAYIDX12_CLONE]], align 4 +; CHECK-NEXT: [[TMP28]] = load float, ptr [[W]], align 4 +; CHECK-NEXT: store float [[TMP28]], ptr [[ARRAYIDX4_CLONE]], align 4 +; CHECK-NEXT: store float [[TMP22]], ptr [[W]], align 4 ; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_031_CLONE]], 1 ; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] From 5ad8bbd7e5a276caf76bd02731d4aaa8434f676d Mon Sep 17 00:00:00 2001 From: "chen.qian" Date: Tue, 15 Oct 2024 10:53:54 +0800 Subject: [PATCH 268/289] [Test] add LoopUnrollAndRemainder test cases --- .../RISCV/RISCVLoopUnrollAndRemainder/add.ll | 104 ++++++ .../RISCV/RISCVLoopUnrollAndRemainder/addc.ll | 88 +++++ .../RISCVLoopUnrollAndRemainder/ccorr.ll | 241 ++++++++++++++ .../RISCV/RISCVLoopUnrollAndRemainder/conv.ll | 237 ++++++++++++++ .../RISCV/RISCVLoopUnrollAndRemainder/corr.ll | 97 ++++++ .../RISCVLoopUnrollAndRemainder/dotprod.ll | 75 +++++ .../dotprod_template_complex.ll | 49 +++ .../RISCVLoopUnrollAndRemainder/dotprode.ll | 83 +++++ .../RISCV/RISCVLoopUnrollAndRemainder/fir.ll | 306 ++++++++++++++++++ .../RISCV/RISCVLoopUnrollAndRemainder/fird.ll | 207 ++++++++++++ .../loopsecvconstant.ll | 39 +++ .../RISCV/RISCVLoopUnrollAndRemainder/mul.ll | 104 ++++++ .../RISCV/RISCVLoopUnrollAndRemainder/mulc.ll | 88 +++++ .../RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll | 84 +++++ .../RISCV/RISCVLoopUnrollAndRemainder/sub.ll | 104 ++++++ 15 files changed, 1906 insertions(+) create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/add.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/addc.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/ccorr.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/conv.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/corr.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprode.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fir.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mul.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mulc.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll create mode 100644 llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sub.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/add.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/add.ll new file mode 100644 index 0000000000000..3960501c6ff11 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/add.ll @@ -0,0 +1,104 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_add_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_add_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[INPUT1:%.*]], ptr noundef readonly [[INPUT2:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT1]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[INPUT2]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND19:%.*]] = or i1 [[OR_COND]], [[CMP4]] +; CHECK-NEXT: br i1 [[OR_COND19]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP41:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP720:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP720]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_021]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL8:%.*]] = mul nsw i32 [[I_021]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[I_021]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10]] +; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX11]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_021]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_021_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL8_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8_CLONE]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 +; CHECK-NEXT: [[ADD_CLONE:%.*]] = fadd float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[MUL10_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX11_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10_CLONE]] +; CHECK-NEXT: store float [[ADD_CLONE]], ptr [[ARRAYIDX11_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_021_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input1, null + %cmp1 = icmp eq ptr %input2, null + %or.cond = or i1 %cmp, %cmp1 + %cmp4 = icmp eq ptr %output, null + %or.cond19 = or i1 %or.cond, %cmp4 + br i1 %or.cond19, label %return, label %if.end + +if.end: ; preds = %entry + %cmp41 = icmp sgt i32 %len, 2 + br i1 %cmp41, label %for.body, label %for.cond.preheader + +for.cond.preheader: ; preds = %if.end + %cmp720 = icmp sgt i32 %len, 0 + br i1 %cmp720, label %for.body.clone, label %return + +for.body: ; preds = %for.body, %if.end + %i.021 = phi i32 [ %inc, %for.body ], [ 0, %if.end ] + %mul = mul nsw i32 %i.021, %step1 + %arrayidx = getelementptr inbounds float, ptr %input1, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %mul8 = mul nsw i32 %i.021, %step2 + %arrayidx9 = getelementptr inbounds float, ptr %input2, i32 %mul8 + %1 = load float, ptr %arrayidx9, align 4 + %add = fadd float %0, %1 + %mul10 = mul nsw i32 %i.021, %step_out + %arrayidx11 = getelementptr inbounds float, ptr %output, i32 %mul10 + store float %add, ptr %arrayidx11, align 4 + %inc = add nuw nsw i32 %i.021, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +for.body.clone: ; preds = %for.body.clone, %for.cond.preheader + %i.021.clone = phi i32 [ %inc.clone, %for.body.clone ], [ 0, %for.cond.preheader ] + %mul.clone = mul nsw i32 %i.021.clone, %step1 + %arrayidx.clone = getelementptr inbounds float, ptr %input1, i32 %mul.clone + %2 = load float, ptr %arrayidx.clone, align 4 + %mul8.clone = mul nsw i32 %i.021.clone, %step2 + %arrayidx9.clone = getelementptr inbounds float, ptr %input2, i32 %mul8.clone + %3 = load float, ptr %arrayidx9.clone, align 4 + %add.clone = fadd float %2, %3 + %mul10.clone = mul nsw i32 %i.021.clone, %step_out + %arrayidx11.clone = getelementptr inbounds float, ptr %output, i32 %mul10.clone + store float %add.clone, ptr %arrayidx11.clone, align 4 + %inc.clone = add nuw nsw i32 %i.021.clone, 1 + %exitcond.not.clone = icmp eq i32 %inc.clone, %len + br i1 %exitcond.not.clone, label %return, label %for.body.clone + +return: ; preds = %for.body.clone, %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ], [ 0, %for.body.clone ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/addc.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/addc.ll new file mode 100644 index 0000000000000..dd35ce0373fc6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/addc.ll @@ -0,0 +1,88 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_addc_f32_ansi(ptr noundef readonly %input, ptr noundef writeonly %output, i32 noundef %len, float noundef %C, i32 noundef %step_in, i32 noundef %step_out) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_addc_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[INPUT:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], float noundef [[C:%.*]], i32 noundef [[STEP_IN:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP412:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP412]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_013:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_013]], [[STEP_IN]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[C]] +; CHECK-NEXT: [[MUL5:%.*]] = mul nsw i32 [[I_013]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL5]] +; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX6]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_013]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_013_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_013_CLONE]], [[STEP_IN]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[ADD_CLONE:%.*]] = fadd float [[TMP1]], [[C]] +; CHECK-NEXT: [[MUL5_CLONE:%.*]] = mul nsw i32 [[I_013_CLONE]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX6_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL5_CLONE]] +; CHECK-NEXT: store float [[ADD_CLONE]], ptr [[ARRAYIDX6_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_013_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input, null + %cmp1 = icmp eq ptr %output, null + %or.cond = or i1 %cmp, %cmp1 + br i1 %or.cond, label %return, label %if.end + +if.end: ; preds = %entry + %cmp4 = icmp sgt i32 %len, 2 + br i1 %cmp4, label %for.body, label %for.cond.preheader + +for.cond.preheader: ; preds = %if.end + %cmp412 = icmp sgt i32 %len, 0 + br i1 %cmp412, label %for.body.clone, label %return + +for.body: ; preds = %for.body, %if.end + %i.013 = phi i32 [ %inc, %for.body ], [ 0, %if.end ] + %mul = mul nsw i32 %i.013, %step_in + %arrayidx = getelementptr inbounds float, ptr %input, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %add = fadd float %0, %C + %mul5 = mul nsw i32 %i.013, %step_out + %arrayidx6 = getelementptr inbounds float, ptr %output, i32 %mul5 + store float %add, ptr %arrayidx6, align 4 + %inc = add nuw nsw i32 %i.013, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +for.body.clone: ; preds = %for.body.clone, %for.cond.preheader + %i.013.clone = phi i32 [ %inc.clone, %for.body.clone ], [ 0, %for.cond.preheader ] + %mul.clone = mul nsw i32 %i.013.clone, %step_in + %arrayidx.clone = getelementptr inbounds float, ptr %input, i32 %mul.clone + %1 = load float, ptr %arrayidx.clone, align 4 + %add.clone = fadd float %1, %C + %mul5.clone = mul nsw i32 %i.013.clone, %step_out + %arrayidx6.clone = getelementptr inbounds float, ptr %output, i32 %mul5.clone + store float %add.clone, ptr %arrayidx6.clone, align 4 + %inc.clone = add nuw nsw i32 %i.013.clone, 1 + %exitcond.not.clone = icmp eq i32 %inc.clone, %len + br i1 %exitcond.not.clone, label %return, label %for.body.clone + +return: ; preds = %for.body.clone, %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ], [ 0, %for.body.clone ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/ccorr.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/ccorr.ll new file mode 100644 index 0000000000000..11c9c556d526e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/ccorr.ll @@ -0,0 +1,241 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_ccorr_f32_ansi(ptr noundef readonly %Signal, i32 noundef %siglen, ptr noundef readonly %Kernel, i32 noundef %kernlen, ptr noundef writeonly %corrvout) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_ccorr_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[SIGNAL:%.*]], i32 noundef [[SIGLEN:%.*]], ptr noundef readonly [[KERNEL:%.*]], i32 noundef [[KERNLEN:%.*]], ptr noundef writeonly [[CORRVOUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[SIGNAL]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[KERNEL]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[CORRVOUT]], null +; CHECK-NEXT: [[OR_COND122:%.*]] = or i1 [[OR_COND]], [[CMP4]] +; CHECK-NEXT: br i1 [[OR_COND122]], label [[RETURN:%.*]], label [[IF_END6:%.*]] +; CHECK: if.end6: +; CHECK-NEXT: [[CMP7:%.*]] = icmp slt i32 [[SIGLEN]], [[KERNLEN]] +; CHECK-NEXT: br i1 [[CMP7]], label [[IF_THEN8:%.*]], label [[IF_END9:%.*]] +; CHECK: if.then8: +; CHECK-NEXT: br label [[IF_END9]] +; CHECK: if.end9: +; CHECK-NEXT: [[LKERN_0:%.*]] = phi i32 [ [[SIGLEN]], [[IF_THEN8]] ], [ [[KERNLEN]], [[IF_END6]] ] +; CHECK-NEXT: [[LSIG_0:%.*]] = phi i32 [ [[KERNLEN]], [[IF_THEN8]] ], [ [[SIGLEN]], [[IF_END6]] ] +; CHECK-NEXT: [[KERN_0:%.*]] = phi ptr [ [[SIGNAL]], [[IF_THEN8]] ], [ [[KERNEL]], [[IF_END6]] ] +; CHECK-NEXT: [[SIG_0:%.*]] = phi ptr [ [[KERNEL]], [[IF_THEN8]] ], [ [[SIGNAL]], [[IF_END6]] ] +; CHECK-NEXT: [[CMP10124:%.*]] = icmp sgt i32 [[LKERN_0]], 0 +; CHECK-NEXT: br i1 [[CMP10124]], label [[FOR_BODY:%.*]], label [[FOR_COND22_PREHEADER:%.*]] +; CHECK: for.cond22.preheader: +; CHECK-NEXT: [[CMP23128:%.*]] = icmp slt i32 [[LKERN_0]], [[LSIG_0]] +; CHECK-NEXT: br i1 [[CMP23128]], label [[FOR_BODY25:%.*]], label [[FOR_COND45_PREHEADER:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_END:%.*]] ], [ 1, [[IF_END9]] ] +; CHECK-NEXT: [[N_0125:%.*]] = phi i32 [ [[INC19:%.*]], [[FOR_END]] ], [ 0, [[IF_END9]] ] +; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[N_0125]], -1 +; CHECK-NEXT: [[SUB11:%.*]] = add nsw i32 [[LKERN_0]], [[TMP0]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[CORRVOUT]], i32 [[N_0125]] +; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: br label [[FOR_BODY14:%.*]] +; CHECK: for.body14: +; CHECK-NEXT: [[K_0123:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[INC:%.*]], [[FOR_BODY14]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP4:%.*]], [[FOR_BODY14]] ] +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K_0123]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB11]], [[K_0123]] +; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[ADD]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX16]], align 4 +; CHECK-NEXT: [[TMP4]] = tail call float @llvm.fmuladd.f32(float [[TMP2]], float [[TMP3]], float [[TMP1]]) +; CHECK-NEXT: store float [[TMP4]], ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[K_0123]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[INDVARS_IV]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY14]] +; CHECK: for.end: +; CHECK-NEXT: [[INC19]] = add nuw nsw i32 [[N_0125]], 1 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw i32 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[EXITCOND134_NOT:%.*]] = icmp eq i32 [[INC19]], [[LKERN_0]] +; CHECK-NEXT: br i1 [[EXITCOND134_NOT]], label [[FOR_COND22_PREHEADER]], label [[FOR_BODY]] +; CHECK: for.cond45.preheader: +; CHECK-NEXT: [[ADD46:%.*]] = add i32 [[SIGLEN]], -1 +; CHECK-NEXT: [[SUB47:%.*]] = add i32 [[ADD46]], [[KERNLEN]] +; CHECK-NEXT: [[CMP48132:%.*]] = icmp slt i32 [[LSIG_0]], [[SUB47]] +; CHECK-NEXT: br i1 [[CMP48132]], label [[FOR_BODY50_LR_PH:%.*]], label [[RETURN]] +; CHECK: for.body50.lr.ph: +; CHECK-NEXT: [[SUB57:%.*]] = add nsw i32 [[LSIG_0]], -1 +; CHECK-NEXT: br label [[FOR_BODY50:%.*]] +; CHECK: for.body25: +; CHECK-NEXT: [[N21_0129:%.*]] = phi i32 [ [[INC42:%.*]], [[FOR_END40:%.*]] ], [ [[LKERN_0]], [[FOR_COND22_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, ptr [[CORRVOUT]], i32 [[N21_0129]] +; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX28]], align 4 +; CHECK-NEXT: [[SUB29:%.*]] = sub nuw nsw i32 [[N21_0129]], [[LKERN_0]] +; CHECK-NEXT: [[ADD30:%.*]] = add nsw i32 [[SUB29]], 1 +; CHECK-NEXT: [[CMP32_NOT126:%.*]] = icmp ugt i32 [[ADD30]], [[N21_0129]] +; CHECK-NEXT: br i1 [[CMP32_NOT126]], label [[FOR_END40]], label [[FOR_BODY33:%.*]] +; CHECK: for.body33: +; CHECK-NEXT: [[TMP5:%.*]] = phi float [ [[TMP8:%.*]], [[FOR_BODY33]] ], [ 0.000000e+00, [[FOR_BODY25]] ] +; CHECK-NEXT: [[K27_0127:%.*]] = phi i32 [ [[INC39:%.*]], [[FOR_BODY33]] ], [ [[ADD30]], [[FOR_BODY25]] ] +; CHECK-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K27_0127]] +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX34]], align 4 +; CHECK-NEXT: [[SUB35:%.*]] = sub i32 [[K27_0127]], [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35]] +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX36]], align 4 +; CHECK-NEXT: [[TMP8]] = tail call float @llvm.fmuladd.f32(float [[TMP6]], float [[TMP7]], float [[TMP5]]) +; CHECK-NEXT: store float [[TMP8]], ptr [[ARRAYIDX28]], align 4 +; CHECK-NEXT: [[INC39]] = add i32 [[K27_0127]], 1 +; CHECK-NEXT: [[CMP32_NOT:%.*]] = icmp ugt i32 [[INC39]], [[N21_0129]] +; CHECK-NEXT: br i1 [[CMP32_NOT]], label [[FOR_END40]], label [[FOR_BODY33]] +; CHECK: for.end40: +; CHECK-NEXT: [[INC42]] = add nuw nsw i32 [[N21_0129]], 1 +; CHECK-NEXT: [[EXITCOND135_NOT:%.*]] = icmp eq i32 [[INC42]], [[LSIG_0]] +; CHECK-NEXT: br i1 [[EXITCOND135_NOT]], label [[FOR_COND45_PREHEADER]], label [[FOR_BODY25]] +; CHECK: for.body50: +; CHECK-NEXT: [[N44_0133:%.*]] = phi i32 [ [[LSIG_0]], [[FOR_BODY50_LR_PH]] ], [ [[INC69:%.*]], [[FOR_END67:%.*]] ] +; CHECK-NEXT: [[ARRAYIDX54:%.*]] = getelementptr inbounds float, ptr [[CORRVOUT]], i32 [[N44_0133]] +; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX54]], align 4 +; CHECK-NEXT: [[SUB55:%.*]] = sub nsw i32 [[N44_0133]], [[LKERN_0]] +; CHECK-NEXT: [[ADD56:%.*]] = add nsw i32 [[SUB55]], 1 +; CHECK-NEXT: [[CMP59_NOT130:%.*]] = icmp ugt i32 [[ADD56]], [[SUB57]] +; CHECK-NEXT: br i1 [[CMP59_NOT130]], label [[FOR_END67]], label [[FOR_BODY60:%.*]] +; CHECK: for.body60: +; CHECK-NEXT: [[TMP9:%.*]] = phi float [ [[TMP12:%.*]], [[FOR_BODY60]] ], [ 0.000000e+00, [[FOR_BODY50]] ] +; CHECK-NEXT: [[K53_0131:%.*]] = phi i32 [ [[INC66:%.*]], [[FOR_BODY60]] ], [ [[ADD56]], [[FOR_BODY50]] ] +; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K53_0131]] +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX61]], align 4 +; CHECK-NEXT: [[SUB62:%.*]] = sub i32 [[K53_0131]], [[ADD56]] +; CHECK-NEXT: [[ARRAYIDX63:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB62]] +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX63]], align 4 +; CHECK-NEXT: [[TMP12]] = tail call float @llvm.fmuladd.f32(float [[TMP10]], float [[TMP11]], float [[TMP9]]) +; CHECK-NEXT: store float [[TMP12]], ptr [[ARRAYIDX54]], align 4 +; CHECK-NEXT: [[INC66]] = add i32 [[K53_0131]], 1 +; CHECK-NEXT: [[CMP59_NOT:%.*]] = icmp ugt i32 [[INC66]], [[SUB57]] +; CHECK-NEXT: br i1 [[CMP59_NOT]], label [[FOR_END67]], label [[FOR_BODY60]] +; CHECK: for.end67: +; CHECK-NEXT: [[INC69]] = add nsw i32 [[N44_0133]], 1 +; CHECK-NEXT: [[EXITCOND136_NOT:%.*]] = icmp eq i32 [[INC69]], [[SUB47]] +; CHECK-NEXT: br i1 [[EXITCOND136_NOT]], label [[RETURN]], label [[FOR_BODY50]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND45_PREHEADER]] ], [ 0, [[FOR_END67]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %Signal, null + %cmp1 = icmp eq ptr %Kernel, null + %or.cond = or i1 %cmp, %cmp1 + %cmp4 = icmp eq ptr %corrvout, null + %or.cond122 = or i1 %or.cond, %cmp4 + br i1 %or.cond122, label %return, label %if.end6 + +if.end6: ; preds = %entry + %cmp7 = icmp slt i32 %siglen, %kernlen + br i1 %cmp7, label %if.then8, label %if.end9 + +if.then8: ; preds = %if.end6 + br label %if.end9 + +if.end9: ; preds = %if.then8, %if.end6 + %lkern.0 = phi i32 [ %siglen, %if.then8 ], [ %kernlen, %if.end6 ] + %lsig.0 = phi i32 [ %kernlen, %if.then8 ], [ %siglen, %if.end6 ] + %kern.0 = phi ptr [ %Signal, %if.then8 ], [ %Kernel, %if.end6 ] + %sig.0 = phi ptr [ %Kernel, %if.then8 ], [ %Signal, %if.end6 ] + %cmp10124 = icmp sgt i32 %lkern.0, 0 + br i1 %cmp10124, label %for.body, label %for.cond22.preheader + +for.cond22.preheader: ; preds = %for.end, %if.end9 + %cmp23128 = icmp slt i32 %lkern.0, %lsig.0 + br i1 %cmp23128, label %for.body25, label %for.cond45.preheader + +for.body: ; preds = %for.end, %if.end9 + %indvars.iv = phi i32 [ %indvars.iv.next, %for.end ], [ 1, %if.end9 ] + %n.0125 = phi i32 [ %inc19, %for.end ], [ 0, %if.end9 ] + %0 = xor i32 %n.0125, -1 + %sub11 = add nsw i32 %lkern.0, %0 + %arrayidx = getelementptr inbounds float, ptr %corrvout, i32 %n.0125 + store float 0.000000e+00, ptr %arrayidx, align 4 + br label %for.body14 + +for.body14: ; preds = %for.body14, %for.body + %k.0123 = phi i32 [ 0, %for.body ], [ %inc, %for.body14 ] + %1 = phi float [ 0.000000e+00, %for.body ], [ %4, %for.body14 ] + %arrayidx15 = getelementptr inbounds float, ptr %sig.0, i32 %k.0123 + %2 = load float, ptr %arrayidx15, align 4 + %add = add i32 %sub11, %k.0123 + %arrayidx16 = getelementptr inbounds float, ptr %kern.0, i32 %add + %3 = load float, ptr %arrayidx16, align 4 + %4 = tail call float @llvm.fmuladd.f32(float %2, float %3, float %1) + store float %4, ptr %arrayidx, align 4 + %inc = add nuw nsw i32 %k.0123, 1 + %exitcond = icmp eq i32 %inc, %indvars.iv + br i1 %exitcond, label %for.end, label %for.body14 + +for.end: ; preds = %for.body14 + %inc19 = add nuw nsw i32 %n.0125, 1 + %indvars.iv.next = add nuw i32 %indvars.iv, 1 + %exitcond134.not = icmp eq i32 %inc19, %lkern.0 + br i1 %exitcond134.not, label %for.cond22.preheader, label %for.body + +for.cond45.preheader: ; preds = %for.end40, %for.cond22.preheader + %add46 = add i32 %siglen, -1 + %sub47 = add i32 %add46, %kernlen + %cmp48132 = icmp slt i32 %lsig.0, %sub47 + br i1 %cmp48132, label %for.body50.lr.ph, label %return + +for.body50.lr.ph: ; preds = %for.cond45.preheader + %sub57 = add nsw i32 %lsig.0, -1 + br label %for.body50 + +for.body25: ; preds = %for.end40, %for.cond22.preheader + %n21.0129 = phi i32 [ %inc42, %for.end40 ], [ %lkern.0, %for.cond22.preheader ] + %arrayidx28 = getelementptr inbounds float, ptr %corrvout, i32 %n21.0129 + store float 0.000000e+00, ptr %arrayidx28, align 4 + %sub29 = sub nuw nsw i32 %n21.0129, %lkern.0 + %add30 = add nsw i32 %sub29, 1 + %cmp32.not126 = icmp ugt i32 %add30, %n21.0129 + br i1 %cmp32.not126, label %for.end40, label %for.body33 + +for.body33: ; preds = %for.body33, %for.body25 + %5 = phi float [ %8, %for.body33 ], [ 0.000000e+00, %for.body25 ] + %k27.0127 = phi i32 [ %inc39, %for.body33 ], [ %add30, %for.body25 ] + %arrayidx34 = getelementptr inbounds float, ptr %sig.0, i32 %k27.0127 + %6 = load float, ptr %arrayidx34, align 4 + %sub35 = sub i32 %k27.0127, %add30 + %arrayidx36 = getelementptr inbounds float, ptr %kern.0, i32 %sub35 + %7 = load float, ptr %arrayidx36, align 4 + %8 = tail call float @llvm.fmuladd.f32(float %6, float %7, float %5) + store float %8, ptr %arrayidx28, align 4 + %inc39 = add i32 %k27.0127, 1 + %cmp32.not = icmp ugt i32 %inc39, %n21.0129 + br i1 %cmp32.not, label %for.end40, label %for.body33 + +for.end40: ; preds = %for.body33, %for.body25 + %inc42 = add nuw nsw i32 %n21.0129, 1 + %exitcond135.not = icmp eq i32 %inc42, %lsig.0 + br i1 %exitcond135.not, label %for.cond45.preheader, label %for.body25 + +for.body50: ; preds = %for.end67, %for.body50.lr.ph + %n44.0133 = phi i32 [ %lsig.0, %for.body50.lr.ph ], [ %inc69, %for.end67 ] + %arrayidx54 = getelementptr inbounds float, ptr %corrvout, i32 %n44.0133 + store float 0.000000e+00, ptr %arrayidx54, align 4 + %sub55 = sub nsw i32 %n44.0133, %lkern.0 + %add56 = add nsw i32 %sub55, 1 + %cmp59.not130 = icmp ugt i32 %add56, %sub57 + br i1 %cmp59.not130, label %for.end67, label %for.body60 + +for.body60: ; preds = %for.body60, %for.body50 + %9 = phi float [ %12, %for.body60 ], [ 0.000000e+00, %for.body50 ] + %k53.0131 = phi i32 [ %inc66, %for.body60 ], [ %add56, %for.body50 ] + %arrayidx61 = getelementptr inbounds float, ptr %sig.0, i32 %k53.0131 + %10 = load float, ptr %arrayidx61, align 4 + %sub62 = sub i32 %k53.0131, %add56 + %arrayidx63 = getelementptr inbounds float, ptr %kern.0, i32 %sub62 + %11 = load float, ptr %arrayidx63, align 4 + %12 = tail call float @llvm.fmuladd.f32(float %10, float %11, float %9) + store float %12, ptr %arrayidx54, align 4 + %inc66 = add i32 %k53.0131, 1 + %cmp59.not = icmp ugt i32 %inc66, %sub57 + br i1 %cmp59.not, label %for.end67, label %for.body60 + +for.end67: ; preds = %for.body60, %for.body50 + %inc69 = add nsw i32 %n44.0133, 1 + %exitcond136.not = icmp eq i32 %inc69, %sub47 + br i1 %exitcond136.not, label %return, label %for.body50 + +return: ; preds = %for.end67, %for.cond45.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond45.preheader ], [ 0, %for.end67 ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/conv.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/conv.ll new file mode 100644 index 0000000000000..33a08dfbf9df1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/conv.ll @@ -0,0 +1,237 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_conv_f32_ansi(ptr noundef readonly %Signal, i32 noundef %siglen, ptr noundef readonly %Kernel, i32 noundef %kernlen, ptr noundef writeonly %convout) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_conv_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[SIGNAL:%.*]], i32 noundef [[SIGLEN:%.*]], ptr noundef readonly [[KERNEL:%.*]], i32 noundef [[KERNLEN:%.*]], ptr noundef writeonly [[CONVOUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[SIGNAL]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[KERNEL]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[CONVOUT]], null +; CHECK-NEXT: [[OR_COND118:%.*]] = or i1 [[OR_COND]], [[CMP4]] +; CHECK-NEXT: br i1 [[OR_COND118]], label [[RETURN:%.*]], label [[IF_END6:%.*]] +; CHECK: if.end6: +; CHECK-NEXT: [[CMP7:%.*]] = icmp slt i32 [[SIGLEN]], [[KERNLEN]] +; CHECK-NEXT: br i1 [[CMP7]], label [[IF_THEN8:%.*]], label [[IF_END9:%.*]] +; CHECK: if.then8: +; CHECK-NEXT: br label [[IF_END9]] +; CHECK: if.end9: +; CHECK-NEXT: [[LKERN_0:%.*]] = phi i32 [ [[SIGLEN]], [[IF_THEN8]] ], [ [[KERNLEN]], [[IF_END6]] ] +; CHECK-NEXT: [[LSIG_0:%.*]] = phi i32 [ [[KERNLEN]], [[IF_THEN8]] ], [ [[SIGLEN]], [[IF_END6]] ] +; CHECK-NEXT: [[KERN_0:%.*]] = phi ptr [ [[SIGNAL]], [[IF_THEN8]] ], [ [[KERNEL]], [[IF_END6]] ] +; CHECK-NEXT: [[SIG_0:%.*]] = phi ptr [ [[KERNEL]], [[IF_THEN8]] ], [ [[SIGNAL]], [[IF_END6]] ] +; CHECK-NEXT: [[CMP10120:%.*]] = icmp sgt i32 [[LKERN_0]], 0 +; CHECK-NEXT: br i1 [[CMP10120]], label [[FOR_BODY:%.*]], label [[FOR_COND21_PREHEADER:%.*]] +; CHECK: for.cond21.preheader: +; CHECK-NEXT: [[CMP22125:%.*]] = icmp slt i32 [[LKERN_0]], [[LSIG_0]] +; CHECK-NEXT: br i1 [[CMP22125]], label [[FOR_BODY24:%.*]], label [[FOR_COND42_PREHEADER:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_END:%.*]] ], [ 1, [[IF_END9]] ] +; CHECK-NEXT: [[N_0121:%.*]] = phi i32 [ [[INC18:%.*]], [[FOR_END]] ], [ 0, [[IF_END9]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[CONVOUT]], i32 [[N_0121]] +; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: br label [[FOR_BODY13:%.*]] +; CHECK: for.body13: +; CHECK-NEXT: [[K_0119:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[INC:%.*]], [[FOR_BODY13]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP3:%.*]], [[FOR_BODY13]] ] +; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K_0119]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX14]], align 4 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[N_0121]], [[K_0119]] +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP3]] = tail call float @llvm.fmuladd.f32(float [[TMP1]], float [[TMP2]], float [[TMP0]]) +; CHECK-NEXT: store float [[TMP3]], ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[K_0119]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[INDVARS_IV]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY13]] +; CHECK: for.end: +; CHECK-NEXT: [[INC18]] = add nuw nsw i32 [[N_0121]], 1 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw i32 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[EXITCOND132_NOT:%.*]] = icmp eq i32 [[INC18]], [[LKERN_0]] +; CHECK-NEXT: br i1 [[EXITCOND132_NOT]], label [[FOR_COND21_PREHEADER]], label [[FOR_BODY]] +; CHECK: for.cond42.preheader: +; CHECK-NEXT: [[ADD43:%.*]] = add i32 [[SIGLEN]], -1 +; CHECK-NEXT: [[SUB44:%.*]] = add i32 [[ADD43]], [[KERNLEN]] +; CHECK-NEXT: [[CMP45130:%.*]] = icmp slt i32 [[LSIG_0]], [[SUB44]] +; CHECK-NEXT: br i1 [[CMP45130]], label [[FOR_BODY47_LR_PH:%.*]], label [[RETURN]] +; CHECK: for.body47.lr.ph: +; CHECK-NEXT: [[SUB54:%.*]] = add nsw i32 [[LSIG_0]], -1 +; CHECK-NEXT: br label [[FOR_BODY47:%.*]] +; CHECK: for.body24: +; CHECK-NEXT: [[N20_0126:%.*]] = phi i32 [ [[INC39:%.*]], [[FOR_END37:%.*]] ], [ [[LKERN_0]], [[FOR_COND21_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[CONVOUT]], i32 [[N20_0126]] +; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX26]], align 4 +; CHECK-NEXT: [[SUB27:%.*]] = sub nuw nsw i32 [[N20_0126]], [[LKERN_0]] +; CHECK-NEXT: [[K25_0122:%.*]] = add i32 [[SUB27]], 1 +; CHECK-NEXT: [[CMP29_NOT123:%.*]] = icmp ugt i32 [[K25_0122]], [[N20_0126]] +; CHECK-NEXT: br i1 [[CMP29_NOT123]], label [[FOR_END37]], label [[FOR_BODY30:%.*]] +; CHECK: for.body30: +; CHECK-NEXT: [[TMP4:%.*]] = phi float [ [[TMP7:%.*]], [[FOR_BODY30]] ], [ 0.000000e+00, [[FOR_BODY24]] ] +; CHECK-NEXT: [[K25_0124:%.*]] = phi i32 [ [[K25_0:%.*]], [[FOR_BODY30]] ], [ [[K25_0122]], [[FOR_BODY24]] ] +; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0124]] +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX31]], align 4 +; CHECK-NEXT: [[SUB32:%.*]] = sub i32 [[N20_0126]], [[K25_0124]] +; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32]] +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: [[TMP7]] = tail call float @llvm.fmuladd.f32(float [[TMP5]], float [[TMP6]], float [[TMP4]]) +; CHECK-NEXT: store float [[TMP7]], ptr [[ARRAYIDX26]], align 4 +; CHECK-NEXT: [[K25_0]] = add i32 [[K25_0124]], 1 +; CHECK-NEXT: [[CMP29_NOT:%.*]] = icmp ugt i32 [[K25_0]], [[N20_0126]] +; CHECK-NEXT: br i1 [[CMP29_NOT]], label [[FOR_END37]], label [[FOR_BODY30]] +; CHECK: for.end37: +; CHECK-NEXT: [[INC39]] = add nuw nsw i32 [[N20_0126]], 1 +; CHECK-NEXT: [[EXITCOND133_NOT:%.*]] = icmp eq i32 [[INC39]], [[LSIG_0]] +; CHECK-NEXT: br i1 [[EXITCOND133_NOT]], label [[FOR_COND42_PREHEADER]], label [[FOR_BODY24]] +; CHECK: for.body47: +; CHECK-NEXT: [[N41_0131:%.*]] = phi i32 [ [[LSIG_0]], [[FOR_BODY47_LR_PH]] ], [ [[INC66:%.*]], [[FOR_END64:%.*]] ] +; CHECK-NEXT: [[ARRAYIDX51:%.*]] = getelementptr inbounds float, ptr [[CONVOUT]], i32 [[N41_0131]] +; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX51]], align 4 +; CHECK-NEXT: [[SUB52:%.*]] = sub nsw i32 [[N41_0131]], [[LKERN_0]] +; CHECK-NEXT: [[K50_0127:%.*]] = add i32 [[SUB52]], 1 +; CHECK-NEXT: [[CMP56_NOT128:%.*]] = icmp ugt i32 [[K50_0127]], [[SUB54]] +; CHECK-NEXT: br i1 [[CMP56_NOT128]], label [[FOR_END64]], label [[FOR_BODY57:%.*]] +; CHECK: for.body57: +; CHECK-NEXT: [[TMP8:%.*]] = phi float [ [[TMP11:%.*]], [[FOR_BODY57]] ], [ 0.000000e+00, [[FOR_BODY47]] ] +; CHECK-NEXT: [[K50_0129:%.*]] = phi i32 [ [[K50_0:%.*]], [[FOR_BODY57]] ], [ [[K50_0127]], [[FOR_BODY47]] ] +; CHECK-NEXT: [[ARRAYIDX58:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0129]] +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX58]], align 4 +; CHECK-NEXT: [[SUB59:%.*]] = sub i32 [[N41_0131]], [[K50_0129]] +; CHECK-NEXT: [[ARRAYIDX60:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB59]] +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX60]], align 4 +; CHECK-NEXT: [[TMP11]] = tail call float @llvm.fmuladd.f32(float [[TMP9]], float [[TMP10]], float [[TMP8]]) +; CHECK-NEXT: store float [[TMP11]], ptr [[ARRAYIDX51]], align 4 +; CHECK-NEXT: [[K50_0]] = add i32 [[K50_0129]], 1 +; CHECK-NEXT: [[CMP56_NOT:%.*]] = icmp ugt i32 [[K50_0]], [[SUB54]] +; CHECK-NEXT: br i1 [[CMP56_NOT]], label [[FOR_END64]], label [[FOR_BODY57]] +; CHECK: for.end64: +; CHECK-NEXT: [[INC66]] = add nsw i32 [[N41_0131]], 1 +; CHECK-NEXT: [[EXITCOND134_NOT:%.*]] = icmp eq i32 [[INC66]], [[SUB44]] +; CHECK-NEXT: br i1 [[EXITCOND134_NOT]], label [[RETURN]], label [[FOR_BODY47]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND42_PREHEADER]] ], [ 0, [[FOR_END64]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %Signal, null + %cmp1 = icmp eq ptr %Kernel, null + %or.cond = or i1 %cmp, %cmp1 + %cmp4 = icmp eq ptr %convout, null + %or.cond118 = or i1 %or.cond, %cmp4 + br i1 %or.cond118, label %return, label %if.end6 + +if.end6: ; preds = %entry + %cmp7 = icmp slt i32 %siglen, %kernlen + br i1 %cmp7, label %if.then8, label %if.end9 + +if.then8: ; preds = %if.end6 + br label %if.end9 + +if.end9: ; preds = %if.then8, %if.end6 + %lkern.0 = phi i32 [ %siglen, %if.then8 ], [ %kernlen, %if.end6 ] + %lsig.0 = phi i32 [ %kernlen, %if.then8 ], [ %siglen, %if.end6 ] + %kern.0 = phi ptr [ %Signal, %if.then8 ], [ %Kernel, %if.end6 ] + %sig.0 = phi ptr [ %Kernel, %if.then8 ], [ %Signal, %if.end6 ] + %cmp10120 = icmp sgt i32 %lkern.0, 0 + br i1 %cmp10120, label %for.body, label %for.cond21.preheader + +for.cond21.preheader: ; preds = %for.end, %if.end9 + %cmp22125 = icmp slt i32 %lkern.0, %lsig.0 + br i1 %cmp22125, label %for.body24, label %for.cond42.preheader + +for.body: ; preds = %for.end, %if.end9 + %indvars.iv = phi i32 [ %indvars.iv.next, %for.end ], [ 1, %if.end9 ] + %n.0121 = phi i32 [ %inc18, %for.end ], [ 0, %if.end9 ] + %arrayidx = getelementptr inbounds float, ptr %convout, i32 %n.0121 + store float 0.000000e+00, ptr %arrayidx, align 4 + br label %for.body13 + +for.body13: ; preds = %for.body13, %for.body + %k.0119 = phi i32 [ 0, %for.body ], [ %inc, %for.body13 ] + %0 = phi float [ 0.000000e+00, %for.body ], [ %3, %for.body13 ] + %arrayidx14 = getelementptr inbounds float, ptr %sig.0, i32 %k.0119 + %1 = load float, ptr %arrayidx14, align 4 + %sub = sub nsw i32 %n.0121, %k.0119 + %arrayidx15 = getelementptr inbounds float, ptr %kern.0, i32 %sub + %2 = load float, ptr %arrayidx15, align 4 + %3 = tail call float @llvm.fmuladd.f32(float %1, float %2, float %0) + store float %3, ptr %arrayidx, align 4 + %inc = add nuw nsw i32 %k.0119, 1 + %exitcond = icmp eq i32 %inc, %indvars.iv + br i1 %exitcond, label %for.end, label %for.body13 + +for.end: ; preds = %for.body13 + %inc18 = add nuw nsw i32 %n.0121, 1 + %indvars.iv.next = add nuw i32 %indvars.iv, 1 + %exitcond132.not = icmp eq i32 %inc18, %lkern.0 + br i1 %exitcond132.not, label %for.cond21.preheader, label %for.body + +for.cond42.preheader: ; preds = %for.end37, %for.cond21.preheader + %add43 = add i32 %siglen, -1 + %sub44 = add i32 %add43, %kernlen + %cmp45130 = icmp slt i32 %lsig.0, %sub44 + br i1 %cmp45130, label %for.body47.lr.ph, label %return + +for.body47.lr.ph: ; preds = %for.cond42.preheader + %sub54 = add nsw i32 %lsig.0, -1 + br label %for.body47 + +for.body24: ; preds = %for.end37, %for.cond21.preheader + %n20.0126 = phi i32 [ %inc39, %for.end37 ], [ %lkern.0, %for.cond21.preheader ] + %arrayidx26 = getelementptr inbounds float, ptr %convout, i32 %n20.0126 + store float 0.000000e+00, ptr %arrayidx26, align 4 + %sub27 = sub nuw nsw i32 %n20.0126, %lkern.0 + %k25.0122 = add i32 %sub27, 1 + %cmp29.not123 = icmp ugt i32 %k25.0122, %n20.0126 + br i1 %cmp29.not123, label %for.end37, label %for.body30 + +for.body30: ; preds = %for.body30, %for.body24 + %4 = phi float [ %7, %for.body30 ], [ 0.000000e+00, %for.body24 ] + %k25.0124 = phi i32 [ %k25.0, %for.body30 ], [ %k25.0122, %for.body24 ] + %arrayidx31 = getelementptr inbounds float, ptr %sig.0, i32 %k25.0124 + %5 = load float, ptr %arrayidx31, align 4 + %sub32 = sub i32 %n20.0126, %k25.0124 + %arrayidx33 = getelementptr inbounds float, ptr %kern.0, i32 %sub32 + %6 = load float, ptr %arrayidx33, align 4 + %7 = tail call float @llvm.fmuladd.f32(float %5, float %6, float %4) + store float %7, ptr %arrayidx26, align 4 + %k25.0 = add i32 %k25.0124, 1 + %cmp29.not = icmp ugt i32 %k25.0, %n20.0126 + br i1 %cmp29.not, label %for.end37, label %for.body30 + +for.end37: ; preds = %for.body30, %for.body24 + %inc39 = add nuw nsw i32 %n20.0126, 1 + %exitcond133.not = icmp eq i32 %inc39, %lsig.0 + br i1 %exitcond133.not, label %for.cond42.preheader, label %for.body24 + +for.body47: ; preds = %for.end64, %for.body47.lr.ph + %n41.0131 = phi i32 [ %lsig.0, %for.body47.lr.ph ], [ %inc66, %for.end64 ] + %arrayidx51 = getelementptr inbounds float, ptr %convout, i32 %n41.0131 + store float 0.000000e+00, ptr %arrayidx51, align 4 + %sub52 = sub nsw i32 %n41.0131, %lkern.0 + %k50.0127 = add i32 %sub52, 1 + %cmp56.not128 = icmp ugt i32 %k50.0127, %sub54 + br i1 %cmp56.not128, label %for.end64, label %for.body57 + +for.body57: ; preds = %for.body57, %for.body47 + %8 = phi float [ %11, %for.body57 ], [ 0.000000e+00, %for.body47 ] + %k50.0129 = phi i32 [ %k50.0, %for.body57 ], [ %k50.0127, %for.body47 ] + %arrayidx58 = getelementptr inbounds float, ptr %sig.0, i32 %k50.0129 + %9 = load float, ptr %arrayidx58, align 4 + %sub59 = sub i32 %n41.0131, %k50.0129 + %arrayidx60 = getelementptr inbounds float, ptr %kern.0, i32 %sub59 + %10 = load float, ptr %arrayidx60, align 4 + %11 = tail call float @llvm.fmuladd.f32(float %9, float %10, float %8) + store float %11, ptr %arrayidx51, align 4 + %k50.0 = add i32 %k50.0129, 1 + %cmp56.not = icmp ugt i32 %k50.0, %sub54 + br i1 %cmp56.not, label %for.end64, label %for.body57 + +for.end64: ; preds = %for.body57, %for.body47 + %inc66 = add nsw i32 %n41.0131, 1 + %exitcond134.not = icmp eq i32 %inc66, %sub44 + br i1 %exitcond134.not, label %return, label %for.body47 + +return: ; preds = %for.end64, %for.cond42.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond42.preheader ], [ 0, %for.end64 ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/corr.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/corr.ll new file mode 100644 index 0000000000000..cd8f939112a54 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/corr.ll @@ -0,0 +1,97 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_corr_f32_ansi(ptr noundef readonly %Signal, i32 noundef %siglen, ptr noundef readonly %Pattern, i32 noundef %patlen, ptr noundef writeonly %dest) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_corr_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[SIGNAL:%.*]], i32 noundef [[SIGLEN:%.*]], ptr noundef readonly [[PATTERN:%.*]], i32 noundef [[PATLEN:%.*]], ptr noundef writeonly [[DEST:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[SIGNAL]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[PATTERN]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[DEST]], null +; CHECK-NEXT: [[OR_COND33:%.*]] = or i1 [[OR_COND]], [[CMP4]] +; CHECK-NEXT: [[CMP7:%.*]] = icmp slt i32 [[SIGLEN]], [[PATLEN]] +; CHECK-NEXT: [[OR_COND34:%.*]] = or i1 [[CMP7]], [[OR_COND33]] +; CHECK-NEXT: br i1 [[OR_COND34]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[SIGLEN]], [[PATLEN]] +; CHECK-NEXT: [[CMP1235_NOT:%.*]] = icmp eq i32 [[PATLEN]], 0 +; CHECK-NEXT: br i1 [[CMP1235_NOT]], label [[FOR_COND11_PREHEADER_PREHEADER:%.*]], label [[FOR_COND11_PREHEADER_US:%.*]] +; CHECK: for.cond11.preheader.preheader: +; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[SIGLEN]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 4 +; CHECK-NEXT: tail call void @llvm.memset.p0.i32(ptr nonnull align 4 [[DEST]], i8 0, i32 [[TMP1]], i1 false) +; CHECK-NEXT: br label [[RETURN]] +; CHECK: for.cond11.preheader.us: +; CHECK-NEXT: [[N_038_US:%.*]] = phi i32 [ [[INC18_US:%.*]], [[FOR_COND11_FOR_COND_CLEANUP13_CRIT_EDGE_US:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[SIGNAL]], i32 [[N_038_US]] +; CHECK-NEXT: br label [[FOR_BODY14_US:%.*]] +; CHECK: for.body14.us: +; CHECK-NEXT: [[M_037_US:%.*]] = phi i32 [ 0, [[FOR_COND11_PREHEADER_US]] ], [ [[INC_US:%.*]], [[FOR_BODY14_US]] ] +; CHECK-NEXT: [[K_CORR_036_US:%.*]] = phi float [ 0.000000e+00, [[FOR_COND11_PREHEADER_US]] ], [ [[TMP5:%.*]], [[FOR_BODY14_US]] ] +; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr float, ptr [[TMP2]], i32 [[M_037_US]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX_US]], align 4 +; CHECK-NEXT: [[ARRAYIDX15_US:%.*]] = getelementptr inbounds float, ptr [[PATTERN]], i32 [[M_037_US]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX15_US]], align 4 +; CHECK-NEXT: [[TMP5]] = tail call float @llvm.fmuladd.f32(float [[TMP3]], float [[TMP4]], float [[K_CORR_036_US]]) +; CHECK-NEXT: [[INC_US]] = add nuw i32 [[M_037_US]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC_US]], [[PATLEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND11_FOR_COND_CLEANUP13_CRIT_EDGE_US]], label [[FOR_BODY14_US]] +; CHECK: for.cond11.for.cond.cleanup13_crit_edge.us: +; CHECK-NEXT: [[ARRAYIDX16_US:%.*]] = getelementptr inbounds float, ptr [[DEST]], i32 [[N_038_US]] +; CHECK-NEXT: store float [[TMP5]], ptr [[ARRAYIDX16_US]], align 4 +; CHECK-NEXT: [[INC18_US]] = add nuw i32 [[N_038_US]], 1 +; CHECK-NEXT: [[CMP10_NOT_US_NOT:%.*]] = icmp ult i32 [[N_038_US]], [[SUB]] +; CHECK-NEXT: br i1 [[CMP10_NOT_US_NOT]], label [[FOR_COND11_PREHEADER_US]], label [[RETURN]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND11_PREHEADER_PREHEADER]] ], [ 0, [[FOR_COND11_FOR_COND_CLEANUP13_CRIT_EDGE_US]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %Signal, null + %cmp1 = icmp eq ptr %Pattern, null + %or.cond = or i1 %cmp, %cmp1 + %cmp4 = icmp eq ptr %dest, null + %or.cond33 = or i1 %or.cond, %cmp4 + %cmp7 = icmp slt i32 %siglen, %patlen + %or.cond34 = or i1 %cmp7, %or.cond33 + br i1 %or.cond34, label %return, label %for.cond.preheader + +for.cond.preheader: ; preds = %entry + %sub = sub nsw i32 %siglen, %patlen + %cmp1235.not = icmp eq i32 %patlen, 0 + br i1 %cmp1235.not, label %for.cond11.preheader.preheader, label %for.cond11.preheader.us + +for.cond11.preheader.preheader: ; preds = %for.cond.preheader + %0 = shl i32 %siglen, 2 + %1 = add i32 %0, 4 + tail call void @llvm.memset.p0.i32(ptr nonnull align 4 %dest, i8 0, i32 %1, i1 false) + br label %return + +for.cond11.preheader.us: ; preds = %for.cond11.for.cond.cleanup13_crit_edge.us, %for.cond.preheader + %n.038.us = phi i32 [ %inc18.us, %for.cond11.for.cond.cleanup13_crit_edge.us ], [ 0, %for.cond.preheader ] + %2 = getelementptr float, ptr %Signal, i32 %n.038.us + br label %for.body14.us + +for.body14.us: ; preds = %for.body14.us, %for.cond11.preheader.us + %m.037.us = phi i32 [ 0, %for.cond11.preheader.us ], [ %inc.us, %for.body14.us ] + %k_corr.036.us = phi float [ 0.000000e+00, %for.cond11.preheader.us ], [ %5, %for.body14.us ] + %arrayidx.us = getelementptr float, ptr %2, i32 %m.037.us + %3 = load float, ptr %arrayidx.us, align 4 + %arrayidx15.us = getelementptr inbounds float, ptr %Pattern, i32 %m.037.us + %4 = load float, ptr %arrayidx15.us, align 4 + %5 = tail call float @llvm.fmuladd.f32(float %3, float %4, float %k_corr.036.us) + %inc.us = add nuw i32 %m.037.us, 1 + %exitcond.not = icmp eq i32 %inc.us, %patlen + br i1 %exitcond.not, label %for.cond11.for.cond.cleanup13_crit_edge.us, label %for.body14.us + +for.cond11.for.cond.cleanup13_crit_edge.us: ; preds = %for.body14.us + %arrayidx16.us = getelementptr inbounds float, ptr %dest, i32 %n.038.us + store float %5, ptr %arrayidx16.us, align 4 + %inc18.us = add nuw i32 %n.038.us, 1 + %cmp10.not.us.not = icmp ult i32 %n.038.us, %sub + br i1 %cmp10.not.us.not, label %for.cond11.preheader.us, label %return + +return: ; preds = %for.cond11.for.cond.cleanup13_crit_edge.us, %for.cond11.preheader.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond11.preheader.preheader ], [ 0, %for.cond11.for.cond.cleanup13_crit_edge.us ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod.ll new file mode 100644 index 0000000000000..2fe5f8edd108c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod.ll @@ -0,0 +1,75 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_dotprod_f32_ansi(ptr nocapture noundef readonly %src1, ptr nocapture noundef readonly %src2, ptr nocapture noundef writeonly %dest, i32 noundef %len) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_dotprod_f32_ansi( +; CHECK-SAME: ptr nocapture noundef readonly [[SRC1:%.*]], ptr nocapture noundef readonly [[SRC2:%.*]], ptr nocapture noundef writeonly [[DEST:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY_CLONE:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_PREHEADER]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ], [ [[TMP6:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: store float [[ACC_0_LCSSA]], ptr [[DEST]], align 4 +; CHECK-NEXT: ret i32 0 +; CHECK: for.body: +; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[ACC_07:%.*]] = phi float [ [[TMP3]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[I_08]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[I_08]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[TMP3]] = tail call float @llvm.fmuladd.f32(float [[TMP1]], float [[TMP2]], float [[ACC_07]]) +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_08_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ACC_07_CLONE:%.*]] = phi float [ [[TMP6]], [[FOR_BODY_CLONE]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[I_08_CLONE]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX1_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[I_08_CLONE]] +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX1_CLONE]], align 4 +; CHECK-NEXT: [[TMP6]] = tail call float @llvm.fmuladd.f32(float [[TMP4]], float [[TMP5]], float [[ACC_07_CLONE]]) +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_08_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] +; +entry: + %0 = icmp sgt i32 %len, 2 + br i1 %0, label %for.body, label %for.cond.preheader + +for.cond.preheader: ; preds = %entry + %cmp6 = icmp sgt i32 %len, 0 + br i1 %cmp6, label %for.body.clone, label %if.end + +if.end: ; preds = %for.body.clone, %for.body, %for.cond.preheader + %acc.0.lcssa = phi float [ 0.000000e+00, %for.cond.preheader ], [ %3, %for.body ], [ %6, %for.body.clone ] + store float %acc.0.lcssa, ptr %dest, align 4 + ret i32 0 + +for.body: ; preds = %for.body, %entry + %i.08 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %acc.07 = phi float [ %3, %for.body ], [ 0.000000e+00, %entry ] + %arrayidx = getelementptr inbounds float, ptr %src1, i32 %i.08 + %1 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds float, ptr %src2, i32 %i.08 + %2 = load float, ptr %arrayidx1, align 4 + %3 = tail call float @llvm.fmuladd.f32(float %1, float %2, float %acc.07) + %inc = add nuw nsw i32 %i.08, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %if.end, label %for.body + +for.body.clone: ; preds = %for.body.clone, %for.cond.preheader + %i.08.clone = phi i32 [ %inc.clone, %for.body.clone ], [ 0, %for.cond.preheader ] + %acc.07.clone = phi float [ %6, %for.body.clone ], [ 0.000000e+00, %for.cond.preheader ] + %arrayidx.clone = getelementptr inbounds float, ptr %src1, i32 %i.08.clone + %4 = load float, ptr %arrayidx.clone, align 4 + %arrayidx1.clone = getelementptr inbounds float, ptr %src2, i32 %i.08.clone + %5 = load float, ptr %arrayidx1.clone, align 4 + %6 = tail call float @llvm.fmuladd.f32(float %4, float %5, float %acc.07.clone) + %inc.clone = add nuw nsw i32 %i.08.clone, 1 + %exitcond.not.clone = icmp eq i32 %inc.clone, %len + br i1 %exitcond.not.clone, label %if.end, label %for.body.clone +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll new file mode 100644 index 0000000000000..8db7f9dd4c788 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local float @test_loop(ptr nocapture noundef readonly %data1, ptr nocapture noundef readonly %data2, i32 noundef %start_index, i32 noundef %end_index, i32 noundef %update1, i32 noundef %update2, float noundef %offset) local_unnamed_addr { +; CHECK-LABEL: define dso_local float @test_loop( +; CHECK-SAME: ptr nocapture noundef readonly [[DATA1:%.*]], ptr nocapture noundef readonly [[DATA2:%.*]], i32 noundef [[START_INDEX:%.*]], i32 noundef [[END_INDEX:%.*]], i32 noundef [[UPDATE1:%.*]], i32 noundef [[UPDATE2:%.*]], float noundef [[OFFSET:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr float, ptr [[DATA1]], i32 [[UPDATE1]] +; CHECK-NEXT: [[INVARIANT_GEP8:%.*]] = getelementptr float, ptr [[DATA2]], i32 [[UPDATE2]] +; CHECK-NEXT: [[CMP10:%.*]] = icmp slt i32 [[START_INDEX]], [[END_INDEX]] +; CHECK-NEXT: br i1 [[CMP10]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[ADD3:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] +; CHECK: for.body: +; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[START_INDEX]], [[ENTRY]] ] +; CHECK-NEXT: [[RESULT_011:%.*]] = phi float [ [[ADD3]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i32 [[I_012]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4 +; CHECK-NEXT: [[GEP9:%.*]] = getelementptr float, ptr [[INVARIANT_GEP8]], i32 [[I_012]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[GEP9]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float [[OFFSET]]) +; CHECK-NEXT: [[ADD3]] = fadd float [[RESULT_011]], [[TMP2]] +; CHECK-NEXT: [[INC]] = add nsw i32 [[I_012]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[END_INDEX]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; +entry: + %invariant.gep = getelementptr float, ptr %data1, i32 %update1 + %invariant.gep8 = getelementptr float, ptr %data2, i32 %update2 + %cmp10 = icmp slt i32 %start_index, %end_index + br i1 %cmp10, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.body, %entry + %result.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %add3, %for.body ] + ret float %result.0.lcssa + +for.body: ; preds = %for.body, %entry + %i.012 = phi i32 [ %inc, %for.body ], [ %start_index, %entry ] + %result.011 = phi float [ %add3, %for.body ], [ 0.000000e+00, %entry ] + %gep = getelementptr float, ptr %invariant.gep, i32 %i.012 + %0 = load float, ptr %gep, align 4 + %gep9 = getelementptr float, ptr %invariant.gep8, i32 %i.012 + %1 = load float, ptr %gep9, align 4 + %2 = tail call float @llvm.fmuladd.f32(float %0, float %1, float %offset) + %add3 = fadd float %result.011, %2 + %inc = add nsw i32 %i.012, 1 + %exitcond.not = icmp eq i32 %inc, %end_index + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprode.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprode.ll new file mode 100644 index 0000000000000..78ea995d2a297 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprode.ll @@ -0,0 +1,83 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_dotprode_f32_ansi(ptr nocapture noundef readonly %src1, ptr nocapture noundef readonly %src2, ptr nocapture noundef writeonly %dest, i32 noundef %len, i32 noundef %step1, i32 noundef %step2) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_dotprode_f32_ansi( +; CHECK-SAME: ptr nocapture noundef readonly [[SRC1:%.*]], ptr nocapture noundef readonly [[SRC2:%.*]], ptr nocapture noundef writeonly [[DEST:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_CLONE:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_PREHEADER]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ], [ [[TMP6:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: store float [[ACC_0_LCSSA]], ptr [[DEST]], align 4 +; CHECK-NEXT: ret i32 0 +; CHECK: for.body: +; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[ACC_09:%.*]] = phi float [ [[TMP3]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_010]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[MUL]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL1:%.*]] = mul nsw i32 [[I_010]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[MUL1]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[TMP3]] = tail call float @llvm.fmuladd.f32(float [[TMP1]], float [[TMP2]], float [[ACC_09]]) +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_010]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_010_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ACC_09_CLONE:%.*]] = phi float [ [[TMP6]], [[FOR_BODY_CLONE]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_010_CLONE]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL1_CLONE:%.*]] = mul nsw i32 [[I_010_CLONE]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX2_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[MUL1_CLONE]] +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX2_CLONE]], align 4 +; CHECK-NEXT: [[TMP6]] = tail call float @llvm.fmuladd.f32(float [[TMP4]], float [[TMP5]], float [[ACC_09_CLONE]]) +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_010_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] +; +entry: + %0 = icmp sgt i32 %len, 2 + br i1 %0, label %for.body, label %for.cond.preheader + +for.cond.preheader: ; preds = %entry + %cmp8 = icmp sgt i32 %len, 0 + br i1 %cmp8, label %for.body.clone, label %if.end + +if.end: ; preds = %for.body.clone, %for.body, %for.cond.preheader + %acc.0.lcssa = phi float [ 0.000000e+00, %for.cond.preheader ], [ %3, %for.body ], [ %6, %for.body.clone ] + store float %acc.0.lcssa, ptr %dest, align 4 + ret i32 0 + +for.body: ; preds = %for.body, %entry + %i.010 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %acc.09 = phi float [ %3, %for.body ], [ 0.000000e+00, %entry ] + %mul = mul nsw i32 %i.010, %step1 + %arrayidx = getelementptr inbounds float, ptr %src1, i32 %mul + %1 = load float, ptr %arrayidx, align 4 + %mul1 = mul nsw i32 %i.010, %step2 + %arrayidx2 = getelementptr inbounds float, ptr %src2, i32 %mul1 + %2 = load float, ptr %arrayidx2, align 4 + %3 = tail call float @llvm.fmuladd.f32(float %1, float %2, float %acc.09) + %inc = add nuw nsw i32 %i.010, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %if.end, label %for.body + +for.body.clone: ; preds = %for.body.clone, %for.cond.preheader + %i.010.clone = phi i32 [ %inc.clone, %for.body.clone ], [ 0, %for.cond.preheader ] + %acc.09.clone = phi float [ %6, %for.body.clone ], [ 0.000000e+00, %for.cond.preheader ] + %mul.clone = mul nsw i32 %i.010.clone, %step1 + %arrayidx.clone = getelementptr inbounds float, ptr %src1, i32 %mul.clone + %4 = load float, ptr %arrayidx.clone, align 4 + %mul1.clone = mul nsw i32 %i.010.clone, %step2 + %arrayidx2.clone = getelementptr inbounds float, ptr %src2, i32 %mul1.clone + %5 = load float, ptr %arrayidx2.clone, align 4 + %6 = tail call float @llvm.fmuladd.f32(float %4, float %5, float %acc.09.clone) + %inc.clone = add nuw nsw i32 %i.010.clone, 1 + %exitcond.not.clone = icmp eq i32 %inc.clone, %len + br i1 %exitcond.not.clone, label %if.end, label %for.body.clone +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fir.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fir.ll new file mode 100644 index 0000000000000..6a8cb4868b7ea --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fir.ll @@ -0,0 +1,306 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +%struct.fir_f32_s = type { ptr, ptr, i32, i32, i32, i16 } +define dso_local noundef i32 @dsps_fir_f32_ansi(ptr nocapture noundef %fir, ptr nocapture noundef readonly %input, ptr nocapture noundef writeonly %output, i32 noundef %len) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_fir_f32_ansi( +; CHECK-SAME: ptr nocapture noundef [[FIR:%.*]], ptr nocapture noundef readonly [[INPUT:%.*]], ptr nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_PREHEADER:%.*]], label [[FOR_BODY_LR_PH_CLONE:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP67:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP67]], label [[FOR_BODY_LR_PH:%.*]], label [[IF_END:%.*]] +; CHECK: for.body.lr.ph: +; CHECK-NEXT: [[DELAY:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S:%.*]], ptr [[FIR]], i32 0, i32 1 +; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DELAY]], align 4 +; CHECK-NEXT: [[POS:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 3 +; CHECK-NEXT: [[N:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 2 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4 +; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[POS]], align 4 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: if.end: +; CHECK-NEXT: ret i32 0 +; CHECK: for.body: +; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[SPEC_STORE_SELECT:%.*]], [[FOR_COND_CLEANUP21:%.*]] ] +; CHECK-NEXT: [[I_068:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC33:%.*]], [[FOR_COND_CLEANUP21]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_068]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[TMP3]] +; CHECK-NEXT: store float [[TMP4]], ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP3]], 1 +; CHECK-NEXT: [[CMP4_NOT:%.*]] = icmp slt i32 [[INC]], [[TMP2]] +; CHECK-NEXT: [[SPEC_STORE_SELECT]] = select i1 [[CMP4_NOT]], i32 [[INC]], i32 0 +; CHECK-NEXT: store i32 [[SPEC_STORE_SELECT]], ptr [[POS]], align 4 +; CHECK-NEXT: [[CMP957:%.*]] = icmp slt i32 [[SPEC_STORE_SELECT]], [[TMP2]] +; CHECK-NEXT: br i1 [[CMP957]], label [[FOR_BODY11_LR_PH:%.*]], label [[FOR_COND18_PREHEADER:%.*]] +; CHECK: for.body11.lr.ph: +; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP2]], [[SPEC_STORE_SELECT]] +; CHECK-NEXT: br label [[FOR_BODY11:%.*]] +; CHECK: for.cond18.preheader: +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP10:%.*]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[TMP6]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[CMP2062:%.*]] = icmp sgt i32 [[SPEC_STORE_SELECT]], 0 +; CHECK-NEXT: br i1 [[CMP2062]], label [[FOR_BODY22_LR_PH:%.*]], label [[FOR_COND_CLEANUP21]] +; CHECK: for.body22.lr.ph: +; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: br label [[FOR_BODY22:%.*]] +; CHECK: for.body11: +; CHECK-NEXT: [[N_060:%.*]] = phi i32 [ [[SPEC_STORE_SELECT]], [[FOR_BODY11_LR_PH]] ], [ [[INC16:%.*]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[COEFF_POS_059:%.*]] = phi i32 [ 0, [[FOR_BODY11_LR_PH]] ], [ [[INC12:%.*]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[ACC_058:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH]] ], [ [[TMP10]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[INC12]] = add nuw i32 [[COEFF_POS_059]], 1 +; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i32 [[COEFF_POS_059]] +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX13]], align 4 +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[N_060]] +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP10]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP9]], float [[ACC_058]]) +; CHECK-NEXT: [[INC16]] = add nsw i32 [[N_060]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC12]], [[TMP6]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND18_PREHEADER]], label [[FOR_BODY11]] +; CHECK: for.cond.cleanup21: +; CHECK-NEXT: [[ACC_1_LCSSA:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND18_PREHEADER]] ], [ [[TMP13:%.*]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_068]] +; CHECK-NEXT: store float [[ACC_1_LCSSA]], ptr [[ARRAYIDX31]], align 4 +; CHECK-NEXT: [[INC33]] = add nuw nsw i32 [[I_068]], 1 +; CHECK-NEXT: [[EXITCOND71_NOT:%.*]] = icmp eq i32 [[INC33]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND71_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK: for.body22: +; CHECK-NEXT: [[N17_065:%.*]] = phi i32 [ 0, [[FOR_BODY22_LR_PH]] ], [ [[INC29:%.*]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[COEFF_POS_164:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA]], [[FOR_BODY22_LR_PH]] ], [ [[INC24:%.*]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[ACC_163:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_BODY22_LR_PH]] ], [ [[TMP13]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[INC24]] = add nuw nsw i32 [[COEFF_POS_164]], 1 +; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[COEFF_POS_164]] +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX25]], align 4 +; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[N17_065]] +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: [[TMP13]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[ACC_163]]) +; CHECK-NEXT: [[INC29]] = add nuw nsw i32 [[N17_065]], 1 +; CHECK-NEXT: [[EXITCOND70_NOT:%.*]] = icmp eq i32 [[INC29]], [[SPEC_STORE_SELECT]] +; CHECK-NEXT: br i1 [[EXITCOND70_NOT]], label [[FOR_COND_CLEANUP21]], label [[FOR_BODY22]] +; CHECK: for.body.lr.ph.clone: +; CHECK-NEXT: [[DELAY_CLONE:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 1 +; CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DELAY_CLONE]], align 4 +; CHECK-NEXT: [[POS_CLONE:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 3 +; CHECK-NEXT: [[N_CLONE:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 2 +; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[N_CLONE]], align 4 +; CHECK-NEXT: [[DOTPRE_CLONE:%.*]] = load i32, ptr [[POS_CLONE]], align 4 +; CHECK-NEXT: br label [[FOR_BODY_CLONE:%.*]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[TMP16:%.*]] = phi i32 [ [[DOTPRE_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[SPEC_STORE_SELECT_CLONE:%.*]], [[FOR_COND_CLEANUP21_CLONE:%.*]] ] +; CHECK-NEXT: [[I_068_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH_CLONE]] ], [ [[INC33_CLONE:%.*]], [[FOR_COND_CLEANUP21_CLONE]] ] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_068_CLONE]] +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX1_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP16]] +; CHECK-NEXT: store float [[TMP17]], ptr [[ARRAYIDX1_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE:%.*]] = add nsw i32 [[TMP16]], 1 +; CHECK-NEXT: [[CMP4_NOT_CLONE:%.*]] = icmp slt i32 [[INC_CLONE]], [[TMP15]] +; CHECK-NEXT: [[SPEC_STORE_SELECT_CLONE]] = select i1 [[CMP4_NOT_CLONE]], i32 [[INC_CLONE]], i32 0 +; CHECK-NEXT: store i32 [[SPEC_STORE_SELECT_CLONE]], ptr [[POS_CLONE]], align 4 +; CHECK-NEXT: [[CMP957_CLONE:%.*]] = icmp slt i32 [[SPEC_STORE_SELECT_CLONE]], [[TMP15]] +; CHECK-NEXT: br i1 [[CMP957_CLONE]], label [[FOR_BODY11_LR_PH_CLONE:%.*]], label [[FOR_COND18_PREHEADER_CLONE:%.*]] +; CHECK: for.body11.lr.ph.clone: +; CHECK-NEXT: [[TMP18:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = sub i32 [[TMP15]], [[SPEC_STORE_SELECT_CLONE]] +; CHECK-NEXT: br label [[FOR_BODY11_CLONE:%.*]] +; CHECK: for.body11.clone: +; CHECK-NEXT: [[N_060_CLONE:%.*]] = phi i32 [ [[SPEC_STORE_SELECT_CLONE]], [[FOR_BODY11_LR_PH_CLONE]] ], [ [[INC16_CLONE:%.*]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[COEFF_POS_059_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY11_LR_PH_CLONE]] ], [ [[INC12_CLONE:%.*]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[ACC_058_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_CLONE]] ], [ [[TMP22:%.*]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[INC12_CLONE]] = add nuw i32 [[COEFF_POS_059_CLONE]], 1 +; CHECK-NEXT: [[ARRAYIDX13_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[COEFF_POS_059_CLONE]] +; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX13_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX15_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[N_060_CLONE]] +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX15_CLONE]], align 4 +; CHECK-NEXT: [[TMP22]] = tail call float @llvm.fmuladd.f32(float [[TMP20]], float [[TMP21]], float [[ACC_058_CLONE]]) +; CHECK-NEXT: [[INC16_CLONE]] = add nsw i32 [[N_060_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC12_CLONE]], [[TMP19]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[FOR_COND18_PREHEADER_CLONE]], label [[FOR_BODY11_CLONE]] +; CHECK: for.cond18.preheader.clone: +; CHECK-NEXT: [[ACC_0_LCSSA_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_CLONE]] ], [ [[TMP22]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY_CLONE]] ], [ [[TMP19]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[CMP2062_CLONE:%.*]] = icmp sgt i32 [[SPEC_STORE_SELECT_CLONE]], 0 +; CHECK-NEXT: br i1 [[CMP2062_CLONE]], label [[FOR_BODY22_LR_PH_CLONE:%.*]], label [[FOR_COND_CLEANUP21_CLONE]] +; CHECK: for.body22.lr.ph.clone: +; CHECK-NEXT: [[TMP23:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: br label [[FOR_BODY22_CLONE:%.*]] +; CHECK: for.body22.clone: +; CHECK-NEXT: [[N17_065_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY22_LR_PH_CLONE]] ], [ [[INC29_CLONE:%.*]], [[FOR_BODY22_CLONE]] ] +; CHECK-NEXT: [[COEFF_POS_164_CLONE:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA_CLONE]], [[FOR_BODY22_LR_PH_CLONE]] ], [ [[INC24_CLONE:%.*]], [[FOR_BODY22_CLONE]] ] +; CHECK-NEXT: [[ACC_163_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_BODY22_LR_PH_CLONE]] ], [ [[TMP26:%.*]], [[FOR_BODY22_CLONE]] ] +; CHECK-NEXT: [[INC24_CLONE]] = add nuw nsw i32 [[COEFF_POS_164_CLONE]], 1 +; CHECK-NEXT: [[ARRAYIDX25_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[COEFF_POS_164_CLONE]] +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX25_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX27_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[N17_065_CLONE]] +; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX27_CLONE]], align 4 +; CHECK-NEXT: [[TMP26]] = tail call float @llvm.fmuladd.f32(float [[TMP24]], float [[TMP25]], float [[ACC_163_CLONE]]) +; CHECK-NEXT: [[INC29_CLONE]] = add nuw nsw i32 [[N17_065_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND70_NOT_CLONE:%.*]] = icmp eq i32 [[INC29_CLONE]], [[SPEC_STORE_SELECT_CLONE]] +; CHECK-NEXT: br i1 [[EXITCOND70_NOT_CLONE]], label [[FOR_COND_CLEANUP21_CLONE]], label [[FOR_BODY22_CLONE]] +; CHECK: for.cond.cleanup21.clone: +; CHECK-NEXT: [[ACC_1_LCSSA_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_COND18_PREHEADER_CLONE]] ], [ [[TMP26]], [[FOR_BODY22_CLONE]] ] +; CHECK-NEXT: [[ARRAYIDX31_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_068_CLONE]] +; CHECK-NEXT: store float [[ACC_1_LCSSA_CLONE]], ptr [[ARRAYIDX31_CLONE]], align 4 +; CHECK-NEXT: [[INC33_CLONE]] = add nuw nsw i32 [[I_068_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND71_NOT_CLONE:%.*]] = icmp eq i32 [[INC33_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND71_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] +; +entry: + %0 = icmp sgt i32 %len, 2 + br i1 %0, label %for.cond.preheader, label %for.body.lr.ph.clone + +for.cond.preheader: ; preds = %entry + %cmp67 = icmp sgt i32 %len, 0 + br i1 %cmp67, label %for.body.lr.ph, label %if.end + +for.body.lr.ph: ; preds = %for.cond.preheader + %delay = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 1 + %1 = load ptr, ptr %delay, align 4 + %pos = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 3 + %N = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 2 + %2 = load i32, ptr %N, align 4 + %.pre = load i32, ptr %pos, align 4 + br label %for.body + +if.end: ; preds = %for.cond.cleanup21.clone, %for.cond.cleanup21, %for.cond.preheader + ret i32 0 + +for.body: ; preds = %for.cond.cleanup21, %for.body.lr.ph + %3 = phi i32 [ %.pre, %for.body.lr.ph ], [ %spec.store.select, %for.cond.cleanup21 ] + %i.068 = phi i32 [ 0, %for.body.lr.ph ], [ %inc33, %for.cond.cleanup21 ] + %arrayidx = getelementptr inbounds float, ptr %input, i32 %i.068 + %4 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds float, ptr %1, i32 %3 + store float %4, ptr %arrayidx1, align 4 + %inc = add nsw i32 %3, 1 + %cmp4.not = icmp slt i32 %inc, %2 + %spec.store.select = select i1 %cmp4.not, i32 %inc, i32 0 + store i32 %spec.store.select, ptr %pos, align 4 + %cmp957 = icmp slt i32 %spec.store.select, %2 + br i1 %cmp957, label %for.body11.lr.ph, label %for.cond18.preheader + +for.body11.lr.ph: ; preds = %for.body + %5 = load ptr, ptr %fir, align 4 + %6 = sub i32 %2, %spec.store.select + br label %for.body11 + +for.cond18.preheader: ; preds = %for.body11, %for.body + %acc.0.lcssa = phi float [ 0.000000e+00, %for.body ], [ %10, %for.body11 ] + %coeff_pos.0.lcssa = phi i32 [ 0, %for.body ], [ %6, %for.body11 ] + %cmp2062 = icmp sgt i32 %spec.store.select, 0 + br i1 %cmp2062, label %for.body22.lr.ph, label %for.cond.cleanup21 + +for.body22.lr.ph: ; preds = %for.cond18.preheader + %7 = load ptr, ptr %fir, align 4 + br label %for.body22 + +for.body11: ; preds = %for.body11, %for.body11.lr.ph + %n.060 = phi i32 [ %spec.store.select, %for.body11.lr.ph ], [ %inc16, %for.body11 ] + %coeff_pos.059 = phi i32 [ 0, %for.body11.lr.ph ], [ %inc12, %for.body11 ] + %acc.058 = phi float [ 0.000000e+00, %for.body11.lr.ph ], [ %10, %for.body11 ] + %inc12 = add nuw i32 %coeff_pos.059, 1 + %arrayidx13 = getelementptr inbounds float, ptr %5, i32 %coeff_pos.059 + %8 = load float, ptr %arrayidx13, align 4 + %arrayidx15 = getelementptr inbounds float, ptr %1, i32 %n.060 + %9 = load float, ptr %arrayidx15, align 4 + %10 = tail call float @llvm.fmuladd.f32(float %8, float %9, float %acc.058) + %inc16 = add nsw i32 %n.060, 1 + %exitcond.not = icmp eq i32 %inc12, %6 + br i1 %exitcond.not, label %for.cond18.preheader, label %for.body11 + +for.cond.cleanup21: ; preds = %for.body22, %for.cond18.preheader + %acc.1.lcssa = phi float [ %acc.0.lcssa, %for.cond18.preheader ], [ %13, %for.body22 ] + %arrayidx31 = getelementptr inbounds float, ptr %output, i32 %i.068 + store float %acc.1.lcssa, ptr %arrayidx31, align 4 + %inc33 = add nuw nsw i32 %i.068, 1 + %exitcond71.not = icmp eq i32 %inc33, %len + br i1 %exitcond71.not, label %if.end, label %for.body + +for.body22: ; preds = %for.body22, %for.body22.lr.ph + %n17.065 = phi i32 [ 0, %for.body22.lr.ph ], [ %inc29, %for.body22 ] + %coeff_pos.164 = phi i32 [ %coeff_pos.0.lcssa, %for.body22.lr.ph ], [ %inc24, %for.body22 ] + %acc.163 = phi float [ %acc.0.lcssa, %for.body22.lr.ph ], [ %13, %for.body22 ] + %inc24 = add nuw nsw i32 %coeff_pos.164, 1 + %arrayidx25 = getelementptr inbounds float, ptr %7, i32 %coeff_pos.164 + %11 = load float, ptr %arrayidx25, align 4 + %arrayidx27 = getelementptr inbounds float, ptr %1, i32 %n17.065 + %12 = load float, ptr %arrayidx27, align 4 + %13 = tail call float @llvm.fmuladd.f32(float %11, float %12, float %acc.163) + %inc29 = add nuw nsw i32 %n17.065, 1 + %exitcond70.not = icmp eq i32 %inc29, %spec.store.select + br i1 %exitcond70.not, label %for.cond.cleanup21, label %for.body22 + +for.body.lr.ph.clone: ; preds = %entry + %delay.clone = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 1 + %14 = load ptr, ptr %delay.clone, align 4 + %pos.clone = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 3 + %N.clone = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 2 + %15 = load i32, ptr %N.clone, align 4 + %.pre.clone = load i32, ptr %pos.clone, align 4 + br label %for.body.clone + +for.body.clone: ; preds = %for.cond.cleanup21.clone, %for.body.lr.ph.clone + %16 = phi i32 [ %.pre.clone, %for.body.lr.ph.clone ], [ %spec.store.select.clone, %for.cond.cleanup21.clone ] + %i.068.clone = phi i32 [ 0, %for.body.lr.ph.clone ], [ %inc33.clone, %for.cond.cleanup21.clone ] + %arrayidx.clone = getelementptr inbounds float, ptr %input, i32 %i.068.clone + %17 = load float, ptr %arrayidx.clone, align 4 + %arrayidx1.clone = getelementptr inbounds float, ptr %14, i32 %16 + store float %17, ptr %arrayidx1.clone, align 4 + %inc.clone = add nsw i32 %16, 1 + %cmp4.not.clone = icmp slt i32 %inc.clone, %15 + %spec.store.select.clone = select i1 %cmp4.not.clone, i32 %inc.clone, i32 0 + store i32 %spec.store.select.clone, ptr %pos.clone, align 4 + %cmp957.clone = icmp slt i32 %spec.store.select.clone, %15 + br i1 %cmp957.clone, label %for.body11.lr.ph.clone, label %for.cond18.preheader.clone + +for.body11.lr.ph.clone: ; preds = %for.body.clone + %18 = load ptr, ptr %fir, align 4 + %19 = sub i32 %15, %spec.store.select.clone + br label %for.body11.clone + +for.body11.clone: ; preds = %for.body11.clone, %for.body11.lr.ph.clone + %n.060.clone = phi i32 [ %spec.store.select.clone, %for.body11.lr.ph.clone ], [ %inc16.clone, %for.body11.clone ] + %coeff_pos.059.clone = phi i32 [ 0, %for.body11.lr.ph.clone ], [ %inc12.clone, %for.body11.clone ] + %acc.058.clone = phi float [ 0.000000e+00, %for.body11.lr.ph.clone ], [ %22, %for.body11.clone ] + %inc12.clone = add nuw i32 %coeff_pos.059.clone, 1 + %arrayidx13.clone = getelementptr inbounds float, ptr %18, i32 %coeff_pos.059.clone + %20 = load float, ptr %arrayidx13.clone, align 4 + %arrayidx15.clone = getelementptr inbounds float, ptr %14, i32 %n.060.clone + %21 = load float, ptr %arrayidx15.clone, align 4 + %22 = tail call float @llvm.fmuladd.f32(float %20, float %21, float %acc.058.clone) + %inc16.clone = add nsw i32 %n.060.clone, 1 + %exitcond.not.clone = icmp eq i32 %inc12.clone, %19 + br i1 %exitcond.not.clone, label %for.cond18.preheader.clone, label %for.body11.clone + +for.cond18.preheader.clone: ; preds = %for.body11.clone, %for.body.clone + %acc.0.lcssa.clone = phi float [ 0.000000e+00, %for.body.clone ], [ %22, %for.body11.clone ] + %coeff_pos.0.lcssa.clone = phi i32 [ 0, %for.body.clone ], [ %19, %for.body11.clone ] + %cmp2062.clone = icmp sgt i32 %spec.store.select.clone, 0 + br i1 %cmp2062.clone, label %for.body22.lr.ph.clone, label %for.cond.cleanup21.clone + +for.body22.lr.ph.clone: ; preds = %for.cond18.preheader.clone + %23 = load ptr, ptr %fir, align 4 + br label %for.body22.clone + +for.body22.clone: ; preds = %for.body22.clone, %for.body22.lr.ph.clone + %n17.065.clone = phi i32 [ 0, %for.body22.lr.ph.clone ], [ %inc29.clone, %for.body22.clone ] + %coeff_pos.164.clone = phi i32 [ %coeff_pos.0.lcssa.clone, %for.body22.lr.ph.clone ], [ %inc24.clone, %for.body22.clone ] + %acc.163.clone = phi float [ %acc.0.lcssa.clone, %for.body22.lr.ph.clone ], [ %26, %for.body22.clone ] + %inc24.clone = add nuw nsw i32 %coeff_pos.164.clone, 1 + %arrayidx25.clone = getelementptr inbounds float, ptr %23, i32 %coeff_pos.164.clone + %24 = load float, ptr %arrayidx25.clone, align 4 + %arrayidx27.clone = getelementptr inbounds float, ptr %14, i32 %n17.065.clone + %25 = load float, ptr %arrayidx27.clone, align 4 + %26 = tail call float @llvm.fmuladd.f32(float %24, float %25, float %acc.163.clone) + %inc29.clone = add nuw nsw i32 %n17.065.clone, 1 + %exitcond70.not.clone = icmp eq i32 %inc29.clone, %spec.store.select.clone + br i1 %exitcond70.not.clone, label %for.cond.cleanup21.clone, label %for.body22.clone + +for.cond.cleanup21.clone: ; preds = %for.body22.clone, %for.cond18.preheader.clone + %acc.1.lcssa.clone = phi float [ %acc.0.lcssa.clone, %for.cond18.preheader.clone ], [ %26, %for.body22.clone ] + %arrayidx31.clone = getelementptr inbounds float, ptr %output, i32 %i.068.clone + store float %acc.1.lcssa.clone, ptr %arrayidx31.clone, align 4 + %inc33.clone = add nuw nsw i32 %i.068.clone, 1 + %exitcond71.not.clone = icmp eq i32 %inc33.clone, %len + br i1 %exitcond71.not.clone, label %if.end, label %for.body.clone +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll new file mode 100644 index 0000000000000..875710cf61b86 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll @@ -0,0 +1,207 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +%struct.fir_f32_s = type { ptr, ptr, i32, i32, i32, i16 } +define dso_local noundef i32 @dsps_fird_f32_ansi(ptr nocapture noundef %fir, ptr nocapture noundef readonly %input, ptr nocapture noundef writeonly %output, i32 noundef %len) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_fird_f32_ansi( +; CHECK-SAME: ptr nocapture noundef [[FIR:%.*]], ptr nocapture noundef readonly [[INPUT:%.*]], ptr nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP77:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP77]], label [[FOR_COND1_PREHEADER_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond1.preheader.lr.ph: +; CHECK-NEXT: [[DECIM:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S:%.*]], ptr [[FIR]], i32 0, i32 4 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[DECIM]], align 4 +; CHECK-NEXT: [[CMP263:%.*]] = icmp sgt i32 [[TMP0]], 0 +; CHECK-NEXT: [[DELAY:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 1 +; CHECK-NEXT: [[POS:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 3 +; CHECK-NEXT: [[N:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 2 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[N]], align 4 +; CHECK-NEXT: [[POS9_PROMOTED:%.*]] = load i32, ptr [[POS]], align 4 +; CHECK-NEXT: br label [[FOR_COND1_PREHEADER:%.*]] +; CHECK: for.cond1.preheader: +; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[POS9_PROMOTED]], [[FOR_COND1_PREHEADER_LR_PH]] ], [ [[TMP4:%.*]], [[FOR_COND_CLEANUP26:%.*]] ] +; CHECK-NEXT: [[I_080:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_LR_PH]] ], [ [[INC39:%.*]], [[FOR_COND_CLEANUP26]] ] +; CHECK-NEXT: [[INPUT_ADDR_078:%.*]] = phi ptr [ [[INPUT]], [[FOR_COND1_PREHEADER_LR_PH]] ], [ [[INPUT_ADDR_1_LCSSA:%.*]], [[FOR_COND_CLEANUP26]] ] +; CHECK-NEXT: br i1 [[CMP263]], label [[FOR_BODY4_LR_PH:%.*]], label [[FOR_COND_CLEANUP3:%.*]] +; CHECK: for.body4.lr.ph: +; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DELAY]], align 4 +; CHECK-NEXT: br label [[FOR_BODY4:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LEN]], [[FOR_COND_CLEANUP26]] ] +; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] +; CHECK: for.cond1.for.cond.cleanup3_crit_edge: +; CHECK-NEXT: store i32 [[SPEC_SELECT:%.*]], ptr [[POS]], align 4 +; CHECK-NEXT: br label [[FOR_COND_CLEANUP3]] +; CHECK: for.cond.cleanup3: +; CHECK-NEXT: [[TMP4]] = phi i32 [ [[SPEC_SELECT]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE:%.*]] ], [ [[TMP2]], [[FOR_COND1_PREHEADER]] ] +; CHECK-NEXT: [[INPUT_ADDR_1_LCSSA]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE]] ], [ [[INPUT_ADDR_078]], [[FOR_COND1_PREHEADER]] ] +; CHECK-NEXT: [[CMP1266:%.*]] = icmp slt i32 [[TMP4]], [[TMP1]] +; CHECK-NEXT: br i1 [[CMP1266]], label [[FOR_BODY14_LR_PH:%.*]], label [[FOR_COND23_PREHEADER:%.*]] +; CHECK: for.body14.lr.ph: +; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DELAY]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = sub i32 [[TMP1]], [[TMP4]] +; CHECK-NEXT: br label [[FOR_BODY14:%.*]] +; CHECK: for.body4: +; CHECK-NEXT: [[TMP8:%.*]] = phi i32 [ [[TMP2]], [[FOR_BODY4_LR_PH]] ], [ [[SPEC_SELECT]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[K_065:%.*]] = phi i32 [ 0, [[FOR_BODY4_LR_PH]] ], [ [[INC8:%.*]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[INPUT_ADDR_164:%.*]] = phi ptr [ [[INPUT_ADDR_078]], [[FOR_BODY4_LR_PH]] ], [ [[INCDEC_PTR]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[INPUT_ADDR_164]], i32 1 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[INPUT_ADDR_164]], align 4 +; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 [[TMP8]] +; CHECK-NEXT: store float [[TMP9]], ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CMP6_NOT:%.*]] = icmp slt i32 [[INC]], [[TMP1]] +; CHECK-NEXT: [[SPEC_SELECT]] = select i1 [[CMP6_NOT]], i32 [[INC]], i32 0 +; CHECK-NEXT: [[INC8]] = add nuw nsw i32 [[K_065]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC8]], [[TMP0]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE]], label [[FOR_BODY4]] +; CHECK: for.cond23.preheader: +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP14:%.*]], [[FOR_BODY14]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_COND_CLEANUP3]] ], [ [[TMP7]], [[FOR_BODY14]] ] +; CHECK-NEXT: [[CMP2572:%.*]] = icmp sgt i32 [[TMP4]], 0 +; CHECK-NEXT: br i1 [[CMP2572]], label [[FOR_BODY27_LR_PH:%.*]], label [[FOR_COND_CLEANUP26]] +; CHECK: for.body27.lr.ph: +; CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DELAY]], align 4 +; CHECK-NEXT: br label [[FOR_BODY27:%.*]] +; CHECK: for.body14: +; CHECK-NEXT: [[N_069:%.*]] = phi i32 [ [[TMP4]], [[FOR_BODY14_LR_PH]] ], [ [[INC20:%.*]], [[FOR_BODY14]] ] +; CHECK-NEXT: [[COEFF_POS_068:%.*]] = phi i32 [ 0, [[FOR_BODY14_LR_PH]] ], [ [[INC15:%.*]], [[FOR_BODY14]] ] +; CHECK-NEXT: [[ACC_067:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP14]], [[FOR_BODY14]] ] +; CHECK-NEXT: [[INC15]] = add nuw i32 [[COEFF_POS_068]], 1 +; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i32 [[COEFF_POS_068]] +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX16]], align 4 +; CHECK-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 [[N_069]] +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX18]], align 4 +; CHECK-NEXT: [[TMP14]] = tail call float @llvm.fmuladd.f32(float [[TMP12]], float [[TMP13]], float [[ACC_067]]) +; CHECK-NEXT: [[INC20]] = add nsw i32 [[N_069]], 1 +; CHECK-NEXT: [[EXITCOND83_NOT:%.*]] = icmp eq i32 [[INC15]], [[TMP7]] +; CHECK-NEXT: br i1 [[EXITCOND83_NOT]], label [[FOR_COND23_PREHEADER]], label [[FOR_BODY14]] +; CHECK: for.cond.cleanup26: +; CHECK-NEXT: [[ACC_1_LCSSA:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND23_PREHEADER]] ], [ [[TMP17:%.*]], [[FOR_BODY27]] ] +; CHECK-NEXT: [[INC39]] = add nuw nsw i32 [[I_080]], 1 +; CHECK-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_080]] +; CHECK-NEXT: store float [[ACC_1_LCSSA]], ptr [[ARRAYIDX37]], align 4 +; CHECK-NEXT: [[EXITCOND85_NOT:%.*]] = icmp eq i32 [[INC39]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND85_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER]] +; CHECK: for.body27: +; CHECK-NEXT: [[N22_075:%.*]] = phi i32 [ 0, [[FOR_BODY27_LR_PH]] ], [ [[INC34:%.*]], [[FOR_BODY27]] ] +; CHECK-NEXT: [[COEFF_POS_174:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA]], [[FOR_BODY27_LR_PH]] ], [ [[INC29:%.*]], [[FOR_BODY27]] ] +; CHECK-NEXT: [[ACC_173:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_BODY27_LR_PH]] ], [ [[TMP17]], [[FOR_BODY27]] ] +; CHECK-NEXT: [[INC29]] = add nuw nsw i32 [[COEFF_POS_174]], 1 +; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 [[COEFF_POS_174]] +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX30]], align 4 +; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i32 [[N22_075]] +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX32]], align 4 +; CHECK-NEXT: [[TMP17]] = tail call float @llvm.fmuladd.f32(float [[TMP15]], float [[TMP16]], float [[ACC_173]]) +; CHECK-NEXT: [[INC34]] = add nuw nsw i32 [[N22_075]], 1 +; CHECK-NEXT: [[EXITCOND84_NOT:%.*]] = icmp eq i32 [[INC34]], [[TMP4]] +; CHECK-NEXT: br i1 [[EXITCOND84_NOT]], label [[FOR_COND_CLEANUP26]], label [[FOR_BODY27]] +; +entry: + %cmp77 = icmp sgt i32 %len, 0 + br i1 %cmp77, label %for.cond1.preheader.lr.ph, label %for.cond.cleanup + +for.cond1.preheader.lr.ph: ; preds = %entry + %decim = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 4 + %0 = load i32, ptr %decim, align 4 + %cmp263 = icmp sgt i32 %0, 0 + %delay = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 1 + %pos = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 3 + %N = getelementptr inbounds %struct.fir_f32_s, ptr %fir, i32 0, i32 2 + %1 = load i32, ptr %N, align 4 + %pos9.promoted = load i32, ptr %pos, align 4 + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.cond.cleanup26, %for.cond1.preheader.lr.ph + %2 = phi i32 [ %pos9.promoted, %for.cond1.preheader.lr.ph ], [ %4, %for.cond.cleanup26 ] + %i.080 = phi i32 [ 0, %for.cond1.preheader.lr.ph ], [ %inc39, %for.cond.cleanup26 ] + %input.addr.078 = phi ptr [ %input, %for.cond1.preheader.lr.ph ], [ %input.addr.1.lcssa, %for.cond.cleanup26 ] + br i1 %cmp263, label %for.body4.lr.ph, label %for.cond.cleanup3 + +for.body4.lr.ph: ; preds = %for.cond1.preheader + %3 = load ptr, ptr %delay, align 4 + br label %for.body4 + +for.cond.cleanup: ; preds = %for.cond.cleanup26, %entry + %result.0.lcssa = phi i32 [ 0, %entry ], [ %len, %for.cond.cleanup26 ] + ret i32 %result.0.lcssa + +for.cond1.for.cond.cleanup3_crit_edge: ; preds = %for.body4 + store i32 %spec.select, ptr %pos, align 4 + br label %for.cond.cleanup3 + +for.cond.cleanup3: ; preds = %for.cond1.for.cond.cleanup3_crit_edge, %for.cond1.preheader + %4 = phi i32 [ %spec.select, %for.cond1.for.cond.cleanup3_crit_edge ], [ %2, %for.cond1.preheader ] + %input.addr.1.lcssa = phi ptr [ %incdec.ptr, %for.cond1.for.cond.cleanup3_crit_edge ], [ %input.addr.078, %for.cond1.preheader ] + %cmp1266 = icmp slt i32 %4, %1 + br i1 %cmp1266, label %for.body14.lr.ph, label %for.cond23.preheader + +for.body14.lr.ph: ; preds = %for.cond.cleanup3 + %5 = load ptr, ptr %fir, align 4 + %6 = load ptr, ptr %delay, align 4 + %7 = sub i32 %1, %4 + br label %for.body14 + +for.body4: ; preds = %for.body4, %for.body4.lr.ph + %8 = phi i32 [ %2, %for.body4.lr.ph ], [ %spec.select, %for.body4 ] + %k.065 = phi i32 [ 0, %for.body4.lr.ph ], [ %inc8, %for.body4 ] + %input.addr.164 = phi ptr [ %input.addr.078, %for.body4.lr.ph ], [ %incdec.ptr, %for.body4 ] + %incdec.ptr = getelementptr inbounds float, ptr %input.addr.164, i32 1 + %9 = load float, ptr %input.addr.164, align 4 + %inc = add nsw i32 %8, 1 + %arrayidx = getelementptr inbounds float, ptr %3, i32 %8 + store float %9, ptr %arrayidx, align 4 + %cmp6.not = icmp slt i32 %inc, %1 + %spec.select = select i1 %cmp6.not, i32 %inc, i32 0 + %inc8 = add nuw nsw i32 %k.065, 1 + %exitcond.not = icmp eq i32 %inc8, %0 + br i1 %exitcond.not, label %for.cond1.for.cond.cleanup3_crit_edge, label %for.body4 + +for.cond23.preheader: ; preds = %for.body14, %for.cond.cleanup3 + %acc.0.lcssa = phi float [ 0.000000e+00, %for.cond.cleanup3 ], [ %14, %for.body14 ] + %coeff_pos.0.lcssa = phi i32 [ 0, %for.cond.cleanup3 ], [ %7, %for.body14 ] + %cmp2572 = icmp sgt i32 %4, 0 + br i1 %cmp2572, label %for.body27.lr.ph, label %for.cond.cleanup26 + +for.body27.lr.ph: ; preds = %for.cond23.preheader + %10 = load ptr, ptr %fir, align 4 + %11 = load ptr, ptr %delay, align 4 + br label %for.body27 + +for.body14: ; preds = %for.body14, %for.body14.lr.ph + %n.069 = phi i32 [ %4, %for.body14.lr.ph ], [ %inc20, %for.body14 ] + %coeff_pos.068 = phi i32 [ 0, %for.body14.lr.ph ], [ %inc15, %for.body14 ] + %acc.067 = phi float [ 0.000000e+00, %for.body14.lr.ph ], [ %14, %for.body14 ] + %inc15 = add nuw i32 %coeff_pos.068, 1 + %arrayidx16 = getelementptr inbounds float, ptr %5, i32 %coeff_pos.068 + %12 = load float, ptr %arrayidx16, align 4 + %arrayidx18 = getelementptr inbounds float, ptr %6, i32 %n.069 + %13 = load float, ptr %arrayidx18, align 4 + %14 = tail call float @llvm.fmuladd.f32(float %12, float %13, float %acc.067) + %inc20 = add nsw i32 %n.069, 1 + %exitcond83.not = icmp eq i32 %inc15, %7 + br i1 %exitcond83.not, label %for.cond23.preheader, label %for.body14 + +for.cond.cleanup26: ; preds = %for.body27, %for.cond23.preheader + %acc.1.lcssa = phi float [ %acc.0.lcssa, %for.cond23.preheader ], [ %17, %for.body27 ] + %inc39 = add nuw nsw i32 %i.080, 1 + %arrayidx37 = getelementptr inbounds float, ptr %output, i32 %i.080 + store float %acc.1.lcssa, ptr %arrayidx37, align 4 + %exitcond85.not = icmp eq i32 %inc39, %len + br i1 %exitcond85.not, label %for.cond.cleanup, label %for.cond1.preheader + +for.body27: ; preds = %for.body27, %for.body27.lr.ph + %n22.075 = phi i32 [ 0, %for.body27.lr.ph ], [ %inc34, %for.body27 ] + %coeff_pos.174 = phi i32 [ %coeff_pos.0.lcssa, %for.body27.lr.ph ], [ %inc29, %for.body27 ] + %acc.173 = phi float [ %acc.0.lcssa, %for.body27.lr.ph ], [ %17, %for.body27 ] + %inc29 = add nuw nsw i32 %coeff_pos.174, 1 + %arrayidx30 = getelementptr inbounds float, ptr %10, i32 %coeff_pos.174 + %15 = load float, ptr %arrayidx30, align 4 + %arrayidx32 = getelementptr inbounds float, ptr %11, i32 %n22.075 + %16 = load float, ptr %arrayidx32, align 4 + %17 = tail call float @llvm.fmuladd.f32(float %15, float %16, float %acc.173) + %inc34 = add nuw nsw i32 %n22.075, 1 + %exitcond84.not = icmp eq i32 %inc34, %4 + br i1 %exitcond84.not, label %for.cond.cleanup26, label %for.body27 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll new file mode 100644 index 0000000000000..a4fb7808a4f8e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll @@ -0,0 +1,39 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local float @test_loop(ptr nocapture noundef readonly %data1, ptr nocapture noundef readonly %data2) local_unnamed_addr { +; CHECK-LABEL: define dso_local float @test_loop( +; CHECK-SAME: ptr nocapture noundef readonly [[DATA1:%.*]], ptr nocapture noundef readonly [[DATA2:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret float [[TMP2:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[RESULT_06:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP2]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[DATA1]], i32 [[I_07]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[DATA2]], i32 [[I_07]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[TMP2]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float [[RESULT_06]]) +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; +entry: + br label %for.body + +for.cond.cleanup: ; preds = %for.body + ret float %2 + +for.body: ; preds = %for.body, %entry + %i.07 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %result.06 = phi float [ 0.000000e+00, %entry ], [ %2, %for.body ] + %arrayidx = getelementptr inbounds float, ptr %data1, i32 %i.07 + %0 = load float, ptr %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds float, ptr %data2, i32 %i.07 + %1 = load float, ptr %arrayidx1, align 4 + %2 = tail call float @llvm.fmuladd.f32(float %0, float %1, float %result.06) + %inc = add nuw nsw i32 %i.07, 1 + %exitcond.not = icmp eq i32 %inc, 1024 + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mul.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mul.ll new file mode 100644 index 0000000000000..bcf9852fd491e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mul.ll @@ -0,0 +1,104 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_mul_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_mul_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[INPUT1:%.*]], ptr noundef readonly [[INPUT2:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT1]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[INPUT2]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND20:%.*]] = or i1 [[OR_COND]], [[CMP4]] +; CHECK-NEXT: br i1 [[OR_COND20]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP41:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP721:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP721]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_022:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_022]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL8:%.*]] = mul nsw i32 [[I_022]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[MUL10:%.*]] = fmul float [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[MUL11:%.*]] = mul nsw i32 [[I_022]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL11]] +; CHECK-NEXT: store float [[MUL10]], ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_022]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_022_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_022_CLONE]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL8_CLONE:%.*]] = mul nsw i32 [[I_022_CLONE]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8_CLONE]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 +; CHECK-NEXT: [[MUL10_CLONE:%.*]] = fmul float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[MUL11_CLONE:%.*]] = mul nsw i32 [[I_022_CLONE]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX12_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL11_CLONE]] +; CHECK-NEXT: store float [[MUL10_CLONE]], ptr [[ARRAYIDX12_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_022_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input1, null + %cmp1 = icmp eq ptr %input2, null + %or.cond = or i1 %cmp, %cmp1 + %cmp4 = icmp eq ptr %output, null + %or.cond20 = or i1 %or.cond, %cmp4 + br i1 %or.cond20, label %return, label %if.end + +if.end: ; preds = %entry + %cmp41 = icmp sgt i32 %len, 2 + br i1 %cmp41, label %for.body, label %for.cond.preheader + +for.cond.preheader: ; preds = %if.end + %cmp721 = icmp sgt i32 %len, 0 + br i1 %cmp721, label %for.body.clone, label %return + +for.body: ; preds = %for.body, %if.end + %i.022 = phi i32 [ %inc, %for.body ], [ 0, %if.end ] + %mul = mul nsw i32 %i.022, %step1 + %arrayidx = getelementptr inbounds float, ptr %input1, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %mul8 = mul nsw i32 %i.022, %step2 + %arrayidx9 = getelementptr inbounds float, ptr %input2, i32 %mul8 + %1 = load float, ptr %arrayidx9, align 4 + %mul10 = fmul float %0, %1 + %mul11 = mul nsw i32 %i.022, %step_out + %arrayidx12 = getelementptr inbounds float, ptr %output, i32 %mul11 + store float %mul10, ptr %arrayidx12, align 4 + %inc = add nuw nsw i32 %i.022, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +for.body.clone: ; preds = %for.body.clone, %for.cond.preheader + %i.022.clone = phi i32 [ %inc.clone, %for.body.clone ], [ 0, %for.cond.preheader ] + %mul.clone = mul nsw i32 %i.022.clone, %step1 + %arrayidx.clone = getelementptr inbounds float, ptr %input1, i32 %mul.clone + %2 = load float, ptr %arrayidx.clone, align 4 + %mul8.clone = mul nsw i32 %i.022.clone, %step2 + %arrayidx9.clone = getelementptr inbounds float, ptr %input2, i32 %mul8.clone + %3 = load float, ptr %arrayidx9.clone, align 4 + %mul10.clone = fmul float %2, %3 + %mul11.clone = mul nsw i32 %i.022.clone, %step_out + %arrayidx12.clone = getelementptr inbounds float, ptr %output, i32 %mul11.clone + store float %mul10.clone, ptr %arrayidx12.clone, align 4 + %inc.clone = add nuw nsw i32 %i.022.clone, 1 + %exitcond.not.clone = icmp eq i32 %inc.clone, %len + br i1 %exitcond.not.clone, label %return, label %for.body.clone + +return: ; preds = %for.body.clone, %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ], [ 0, %for.body.clone ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mulc.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mulc.ll new file mode 100644 index 0000000000000..2c81f5bfd4b6f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mulc.ll @@ -0,0 +1,88 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_mulc_f32_ansi(ptr noalias noundef readonly %input, ptr noalias noundef writeonly %output, i32 noundef %len, float noundef %C, i32 noundef %step_in, i32 noundef %step_out) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_mulc_f32_ansi( +; CHECK-SAME: ptr noalias noundef readonly [[INPUT:%.*]], ptr noalias noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], float noundef [[C:%.*]], i32 noundef [[STEP_IN:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP413:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP413]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_014]], [[STEP_IN]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL5:%.*]] = fmul float [[TMP0]], [[C]] +; CHECK-NEXT: [[MUL6:%.*]] = mul nsw i32 [[I_014]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL6]] +; CHECK-NEXT: store float [[MUL5]], ptr [[ARRAYIDX7]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_014]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_014_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_014_CLONE]], [[STEP_IN]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL5_CLONE:%.*]] = fmul float [[TMP1]], [[C]] +; CHECK-NEXT: [[MUL6_CLONE:%.*]] = mul nsw i32 [[I_014_CLONE]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX7_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL6_CLONE]] +; CHECK-NEXT: store float [[MUL5_CLONE]], ptr [[ARRAYIDX7_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_014_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input, null + %cmp1 = icmp eq ptr %output, null + %or.cond = or i1 %cmp, %cmp1 + br i1 %or.cond, label %return, label %if.end + +if.end: ; preds = %entry + %cmp4 = icmp sgt i32 %len, 2 + br i1 %cmp4, label %for.body, label %for.cond.preheader + +for.cond.preheader: ; preds = %if.end + %cmp413 = icmp sgt i32 %len, 0 + br i1 %cmp413, label %for.body.clone, label %return + +for.body: ; preds = %for.body, %if.end + %i.014 = phi i32 [ %inc, %for.body ], [ 0, %if.end ] + %mul = mul nsw i32 %i.014, %step_in + %arrayidx = getelementptr inbounds float, ptr %input, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %mul5 = fmul float %0, %C + %mul6 = mul nsw i32 %i.014, %step_out + %arrayidx7 = getelementptr inbounds float, ptr %output, i32 %mul6 + store float %mul5, ptr %arrayidx7, align 4 + %inc = add nuw nsw i32 %i.014, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +for.body.clone: ; preds = %for.body.clone, %for.cond.preheader + %i.014.clone = phi i32 [ %inc.clone, %for.body.clone ], [ 0, %for.cond.preheader ] + %mul.clone = mul nsw i32 %i.014.clone, %step_in + %arrayidx.clone = getelementptr inbounds float, ptr %input, i32 %mul.clone + %1 = load float, ptr %arrayidx.clone, align 4 + %mul5.clone = fmul float %1, %C + %mul6.clone = mul nsw i32 %i.014.clone, %step_out + %arrayidx7.clone = getelementptr inbounds float, ptr %output, i32 %mul6.clone + store float %mul5.clone, ptr %arrayidx7.clone, align 4 + %inc.clone = add nuw nsw i32 %i.014.clone, 1 + %exitcond.not.clone = icmp eq i32 %inc.clone, %len + br i1 %exitcond.not.clone, label %return, label %for.body.clone + +return: ; preds = %for.body.clone, %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ], [ 0, %for.body.clone ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll new file mode 100644 index 0000000000000..99ac2877f76c6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll @@ -0,0 +1,84 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, ptr noundef writeonly %output, i32 noundef %len) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_sqrt_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[INPUT:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP411:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP411]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[SHR_I:%.*]] = ashr i32 [[TMP0]], 1 +; CHECK-NEXT: [[ADD_I:%.*]] = add nsw i32 [[SHR_I]], 532365312 +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_012]] +; CHECK-NEXT: store i32 [[ADD_I]], ptr [[ARRAYIDX5]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_012]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_012_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012_CLONE]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[SHR_I_CLONE:%.*]] = ashr i32 [[TMP1]], 1 +; CHECK-NEXT: [[ADD_I_CLONE:%.*]] = add nsw i32 [[SHR_I_CLONE]], 532365312 +; CHECK-NEXT: [[ARRAYIDX5_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_012_CLONE]] +; CHECK-NEXT: store i32 [[ADD_I_CLONE]], ptr [[ARRAYIDX5_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_012_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input, null + %cmp1 = icmp eq ptr %output, null + %or.cond = or i1 %cmp, %cmp1 + br i1 %or.cond, label %return, label %if.end + +if.end: ; preds = %entry + %cmp4 = icmp sgt i32 %len, 2 + br i1 %cmp4, label %for.body, label %for.cond.preheader + +for.cond.preheader: ; preds = %if.end + %cmp411 = icmp sgt i32 %len, 0 + br i1 %cmp411, label %for.body.clone, label %return + +for.body: ; preds = %for.body, %if.end + %i.012 = phi i32 [ %inc, %for.body ], [ 0, %if.end ] + %arrayidx = getelementptr inbounds float, ptr %input, i32 %i.012 + %0 = load i32, ptr %arrayidx, align 4 + %shr.i = ashr i32 %0, 1 + %add.i = add nsw i32 %shr.i, 532365312 + %arrayidx5 = getelementptr inbounds float, ptr %output, i32 %i.012 + store i32 %add.i, ptr %arrayidx5, align 4 + %inc = add nuw nsw i32 %i.012, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +for.body.clone: ; preds = %for.body.clone, %for.cond.preheader + %i.012.clone = phi i32 [ %inc.clone, %for.body.clone ], [ 0, %for.cond.preheader ] + %arrayidx.clone = getelementptr inbounds float, ptr %input, i32 %i.012.clone + %1 = load i32, ptr %arrayidx.clone, align 4 + %shr.i.clone = ashr i32 %1, 1 + %add.i.clone = add nsw i32 %shr.i.clone, 532365312 + %arrayidx5.clone = getelementptr inbounds float, ptr %output, i32 %i.012.clone + store i32 %add.i.clone, ptr %arrayidx5.clone, align 4 + %inc.clone = add nuw nsw i32 %i.012.clone, 1 + %exitcond.not.clone = icmp eq i32 %inc.clone, %len + br i1 %exitcond.not.clone, label %return, label %for.body.clone + +return: ; preds = %for.body.clone, %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ], [ 0, %for.body.clone ] + ret i32 %retval.0 +} diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sub.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sub.ll new file mode 100644 index 0000000000000..9468a11ba6232 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sub.ll @@ -0,0 +1,104 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +define dso_local noundef i32 @dsps_sub_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { +; CHECK-LABEL: define dso_local noundef i32 @dsps_sub_f32_ansi( +; CHECK-SAME: ptr noundef readonly [[INPUT1:%.*]], ptr noundef readonly [[INPUT2:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT1]], null +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[INPUT2]], null +; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp eq ptr [[OUTPUT]], null +; CHECK-NEXT: [[OR_COND19:%.*]] = or i1 [[OR_COND]], [[CMP4]] +; CHECK-NEXT: br i1 [[OR_COND19]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP41:%.*]] = icmp sgt i32 [[LEN]], 2 +; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[CMP720:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP720]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.body: +; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_021]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL8:%.*]] = mul nsw i32 [[I_021]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[SUB:%.*]] = fsub float [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[I_021]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10]] +; CHECK-NEXT: store float [[SUB]], ptr [[ARRAYIDX11]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_021]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[RETURN]], label [[FOR_BODY]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_021_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL_CLONE]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL8_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX9_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8_CLONE]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 +; CHECK-NEXT: [[SUB_CLONE:%.*]] = fsub float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[MUL10_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP_OUT]] +; CHECK-NEXT: [[ARRAYIDX11_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10_CLONE]] +; CHECK-NEXT: store float [[SUB_CLONE]], ptr [[ARRAYIDX11_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_021_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq ptr %input1, null + %cmp1 = icmp eq ptr %input2, null + %or.cond = or i1 %cmp, %cmp1 + %cmp4 = icmp eq ptr %output, null + %or.cond19 = or i1 %or.cond, %cmp4 + br i1 %or.cond19, label %return, label %if.end + +if.end: ; preds = %entry + %cmp41 = icmp sgt i32 %len, 2 + br i1 %cmp41, label %for.body, label %for.cond.preheader + +for.cond.preheader: ; preds = %if.end + %cmp720 = icmp sgt i32 %len, 0 + br i1 %cmp720, label %for.body.clone, label %return + +for.body: ; preds = %for.body, %if.end + %i.021 = phi i32 [ %inc, %for.body ], [ 0, %if.end ] + %mul = mul nsw i32 %i.021, %step1 + %arrayidx = getelementptr inbounds float, ptr %input1, i32 %mul + %0 = load float, ptr %arrayidx, align 4 + %mul8 = mul nsw i32 %i.021, %step2 + %arrayidx9 = getelementptr inbounds float, ptr %input2, i32 %mul8 + %1 = load float, ptr %arrayidx9, align 4 + %sub = fsub float %0, %1 + %mul10 = mul nsw i32 %i.021, %step_out + %arrayidx11 = getelementptr inbounds float, ptr %output, i32 %mul10 + store float %sub, ptr %arrayidx11, align 4 + %inc = add nuw nsw i32 %i.021, 1 + %exitcond.not = icmp eq i32 %inc, %len + br i1 %exitcond.not, label %return, label %for.body + +for.body.clone: ; preds = %for.body.clone, %for.cond.preheader + %i.021.clone = phi i32 [ %inc.clone, %for.body.clone ], [ 0, %for.cond.preheader ] + %mul.clone = mul nsw i32 %i.021.clone, %step1 + %arrayidx.clone = getelementptr inbounds float, ptr %input1, i32 %mul.clone + %2 = load float, ptr %arrayidx.clone, align 4 + %mul8.clone = mul nsw i32 %i.021.clone, %step2 + %arrayidx9.clone = getelementptr inbounds float, ptr %input2, i32 %mul8.clone + %3 = load float, ptr %arrayidx9.clone, align 4 + %sub.clone = fsub float %2, %3 + %mul10.clone = mul nsw i32 %i.021.clone, %step_out + %arrayidx11.clone = getelementptr inbounds float, ptr %output, i32 %mul10.clone + store float %sub.clone, ptr %arrayidx11.clone, align 4 + %inc.clone = add nuw nsw i32 %i.021.clone, 1 + %exitcond.not.clone = icmp eq i32 %inc.clone, %len + br i1 %exitcond.not.clone, label %return, label %for.body.clone + +return: ; preds = %for.body.clone, %for.body, %for.cond.preheader, %entry + %retval.0 = phi i32 [ 458755, %entry ], [ 0, %for.cond.preheader ], [ 0, %for.body ], [ 0, %for.body.clone ] + ret i32 %retval.0 +} From e6210084e6c9d1d67dc7f6adeae4b84990d0ddbe Mon Sep 17 00:00:00 2001 From: "chen.qian" Date: Fri, 15 Nov 2024 16:15:20 +0800 Subject: [PATCH 269/289] [Pass] Add LoopUnrollAndRemainder pass --- llvm/lib/Target/RISCV/CMakeLists.txt | 1 + .../RISCV/RISCVLoopUnrollAndRemainder.cpp | 5042 +++++++++++++++++ .../RISCV/RISCVLoopUnrollAndRemainder.h | 42 + llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 7 + .../RISCV/RISCVLoopUnrollAndRemainder/add.ll | 162 +- .../RISCV/RISCVLoopUnrollAndRemainder/addc.ll | 126 +- .../RISCVLoopUnrollAndRemainder/ccorr.ll | 454 +- .../RISCV/RISCVLoopUnrollAndRemainder/conv.ll | 450 +- .../RISCV/RISCVLoopUnrollAndRemainder/corr.ll | 242 +- .../RISCVLoopUnrollAndRemainder/dotprod.ll | 129 +- .../dotprod_template_complex.ll | 115 +- .../RISCVLoopUnrollAndRemainder/dotprode.ll | 131 +- .../RISCV/RISCVLoopUnrollAndRemainder/fir.ll | 299 +- .../RISCV/RISCVLoopUnrollAndRemainder/fird.ll | 327 +- .../loopsecvconstant.ll | 78 +- .../RISCV/RISCVLoopUnrollAndRemainder/mul.ll | 162 +- .../RISCV/RISCVLoopUnrollAndRemainder/mulc.ll | 124 +- .../RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll | 142 +- .../RISCV/RISCVLoopUnrollAndRemainder/sub.ll | 162 +- 19 files changed, 7820 insertions(+), 375 deletions(-) create mode 100644 llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.cpp create mode 100644 llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.h diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt index e3558d689a0cd..d22639bc6afa3 100644 --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -38,6 +38,7 @@ add_llvm_target(RISCVCodeGen RISCVGatherScatterLowering.cpp RISCVSplitLoopByLength.cpp RISCVCustomLICM.cpp + RISCVLoopUnrollAndRemainder.cpp RISCVInsertVSETVLI.cpp RISCVInsertReadWriteCSR.cpp RISCVInsertWriteVXRM.cpp diff --git a/llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.cpp b/llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.cpp new file mode 100644 index 0000000000000..a3e044a1a54bb --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.cpp @@ -0,0 +1,5042 @@ +//===-- RISCVLoopUnrollAndRemainder.cpp - Loop Unrolling Pass +//------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements a loop unrolling optimization pass specifically designed +// for Digital Signal Processing (DSP) algorithms. The pass targets common +// computational patterns found in various DSP operations including: +// - FIR and IIR filters +// - Convolution and correlation +// - Vector operations +// - Dot product calculations +// - Mathematical functions +// +// The pass performs the following main operations: +// 1. Identifies loops in DSP algorithm implementations +// 2. Unrolls the main computational loops, typically by a factor of 8 +// 3. Efficiently handles remainder iterations +// 4. Optimizes memory access patterns for improved cache utilization +// 5. Adjusts control flow and PHI nodes to support the unrolled structure +// 6. Performs cleanup and further optimization after unrolling +// +// This transformation can significantly improve performance for DSP algorithms +// by: +// - Increasing instruction-level parallelism +// - Improving cache utilization for data and coefficient access +// - Reducing loop overhead +// - Enabling better vectorization opportunities +// +// The pass is particularly effective for algorithms with intensive loop-based +// computations, where the main computational loop dominates the execution time. +// It aims to optimize both the main loop body and the handling of edge cases, +// providing a balance between performance and code size. +// +//===----------------------------------------------------------------------===// +#include "RISCVLoopUnrollAndRemainder.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/BlockFrequencyInfo.h" +#include "llvm/Analysis/CGSCCPassManager.h" +#include "llvm/Analysis/CodeMetrics.h" +#include "llvm/Analysis/LoopAnalysisManager.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/LoopPass.h" +#include "llvm/Analysis/LoopUnrollAnalyzer.h" +#include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/Analysis/ProfileSummaryInfo.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/CFG.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DiagnosticInfo.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/IntrinsicsRISCV.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/PassManager.h" +#include "llvm/IR/Verifier.h" +#include "llvm/InitializePasses.h" +#include "llvm/Pass.h" +#include "llvm/Passes/PassBuilder.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/IPO.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/DCE.h" +#include "llvm/Transforms/Scalar/DeadStoreElimination.h" +#include "llvm/Transforms/Scalar/EarlyCSE.h" +#include "llvm/Transforms/Scalar/GVN.h" +#include "llvm/Transforms/Scalar/LoopStrengthReduce.h" +#include "llvm/Transforms/Scalar/Reassociate.h" +#include "llvm/Transforms/Utils.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/Cloning.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/LoopPeel.h" +#include "llvm/Transforms/Utils/LoopSimplify.h" +#include "llvm/Transforms/Utils/LoopUtils.h" +#include "llvm/Transforms/Utils/SimplifyCFGOptions.h" +#include "llvm/Transforms/Utils/UnrollLoop.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace llvm; + +#define DEBUG_TYPE "riscv-loop-unroll-and-remainder" + +// Enumeration to represent different types of unrolling +enum class UnrollType { + DOTPROD, + ADD_ADDC_SUB_MUL_MULC_SQRT, + CONV_CCORR, + FIRD, + FIR, + CORR, + UNKNOWN +}; + +// Global variable to store the current unroll type +static UnrollType currentUnrollType = UnrollType::UNKNOWN; + +// Command line option to enable the RISCVLoopUnrollAndRemainder pass +cl::opt llvm::EnableRISCVLoopUnrollAndRemainder( + "riscv-loop-unroll-and-remainder", cl::init(false), + cl::desc("Enable loop unrolling and remainder specific loop")); + +// Helper function to get a basic block by name from a function +static BasicBlock *getBasicBlockByName(Function &F, StringRef Name) { + for (BasicBlock &BB : F) + if (BB.getName() == Name) + return &BB; + return nullptr; +} + +// Helper function to get the first ICmp instruction with a specific predicate +// in a basic block +static ICmpInst *getFirstICmpInstWithPredicate(BasicBlock *BB, + ICmpInst::Predicate Predicate) { + for (Instruction &I : *BB) { + if (auto *CI = dyn_cast(&I)) { + if (CI->getPredicate() == Predicate) { + return CI; + } + } + } + return nullptr; +} + +// Helper function to get the last ICmp instruction with a specific predicate in +// a basic block +static ICmpInst *getLastICmpInstWithPredicate(BasicBlock *BB, + ICmpInst::Predicate Predicate) { + ICmpInst *lastICmp = nullptr; + for (Instruction &I : *BB) { + if (auto *CI = dyn_cast(&I)) { + if (CI->getPredicate() == Predicate) { + lastICmp = CI; + } + } + } + return lastICmp; +} + +// Helper function to get the first ICmp instruction in a basic block +static ICmpInst *getFirstICmpInst(BasicBlock *BB) { + for (Instruction &I : *BB) { + if (auto *CI = dyn_cast(&I)) { + return CI; + } + } + return nullptr; +} + +// Helper function to get the last ICmp instruction in a basic block +static ICmpInst *getLastICmpInst(BasicBlock *BB) { + for (auto it = BB->rbegin(); it != BB->rend(); ++it) { + if (auto *icmp = dyn_cast(&*it)) { + return icmp; + } + } + return nullptr; +} + +// Helper function to get the first float PHI node in a basic block +static PHINode *getFirstFloatPhi(BasicBlock *BB) { + for (auto &Inst : *BB) { + if (auto *Phi = dyn_cast(&Inst)) { + if (Phi->getType()->isFloatTy()) { + return Phi; + } + } + } + return nullptr; +} + +// Helper function to get the last float PHI node in a basic block +static PHINode *getLastFloatPhi(BasicBlock *BB) { + for (auto it = BB->rbegin(); it != BB->rend(); ++it) { + if (auto *Phi = dyn_cast(&*it)) { + if (Phi->getType()->isFloatTy()) { + return Phi; + } + } + } + return nullptr; +} + +// Helper function to get the first 32-bit integer PHI node in a basic block +static PHINode *getFirstI32Phi(BasicBlock *BB) { + for (auto &Inst : *BB) { + if (auto *Phi = dyn_cast(&Inst)) { + if (Phi->getType()->isIntegerTy(32)) { + return Phi; + } + } + } + return nullptr; +} + +// Helper function to get the last 32-bit integer PHI node in a basic block +static PHINode *getLastI32Phi(BasicBlock *BB) { + for (auto it = BB->rbegin(); it != BB->rend(); ++it) { + if (auto *Phi = dyn_cast(&*it)) { + if (Phi->getType()->isIntegerTy(32)) { + return Phi; + } + } + } + return nullptr; +} + +// Helper function to get the last PHI node in a basic block +static PHINode *getLastPhi(BasicBlock *BB) { + for (auto it = BB->rbegin(); it != BB->rend(); ++it) { + if (auto *Phi = dyn_cast(&*it)) { + return Phi; + } + } + return nullptr; +} + +// Helper function to get the first CallInst with a specific name in a basic +// block +static CallInst *getFirstCallInstWithName(BasicBlock *BB, StringRef Name) { + for (Instruction &I : *BB) { + if (auto *Call = dyn_cast(&I)) { + if (Call->getCalledFunction() && + Call->getCalledFunction()->getName() == Name) { + return Call; + } + } + } + return nullptr; +} + +// Helper function to update operands of new instructions +static void updateOperands(SmallVector &NewInsts, + ValueToValueMapTy &ValueMap) { + for (Instruction *inst : NewInsts) { + for (unsigned i = 0; i < inst->getNumOperands(); i++) { + Value *op = inst->getOperand(i); + if (ValueMap.count(op)) { + inst->setOperand(i, ValueMap[op]); + } + } + } +} + +// Helper function to swap the successors of a terminator instruction +static void swapTerminatorSuccessors(BasicBlock *BB) { + if (auto *BI = dyn_cast(BB->getTerminator())) { + if (BI->isConditional() && BI->getNumSuccessors() == 2) { + BasicBlock *TrueSuccessor = BI->getSuccessor(0); + BasicBlock *FalseSuccessor = BI->getSuccessor(1); + BI->setSuccessor(0, FalseSuccessor); + BI->setSuccessor(1, TrueSuccessor); + } else { + llvm_unreachable("BB's terminator is not a conditional branch or doesn't " + "have two successors"); + } + } else { + llvm_unreachable("BB's terminator is not a branch instruction"); + } +} + +// Helper function to clone a basic block and update its relations +static BasicBlock *cloneBasicBlockWithRelations(BasicBlock *BB, + const std::string &NameSuffix, + Function *F) { + ValueToValueMapTy VMap; + BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F); + + // Update instruction references in the new block + for (Instruction &I : *NewBB) { + // Update operands + for (Use &U : I.operands()) { + Value *V = U.get(); + Value *NewV = VMap[V]; + if (NewV) { + U.set(NewV); + } + } + + // Update PHI node basic block references + if (PHINode *PN = dyn_cast(&I)) { + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { + BasicBlock *IncomingBB = PN->getIncomingBlock(i); + if (IncomingBB == BB) { + PN->setIncomingBlock(i, NewBB); + } else if (VMap.count(IncomingBB)) { + PN->setIncomingBlock(i, cast(VMap[IncomingBB])); + } + } + } + } + + return NewBB; +} + +// Helper function to unroll and duplicate a loop iteration +static Instruction *unrollAndDuplicateLoopIteration(LLVMContext &Ctx, + BasicBlock *BB, + IRBuilder<> &Builder, + unsigned int i) { + PHINode *IPhi = dyn_cast(&BB->front()); + BasicBlock::iterator BeginIt, EndIt, ToIt; + SmallVector newInsts; + ValueToValueMapTy ValueMap; + Instruction *Add = nullptr; + Instruction *tailcallfmuladd = nullptr; + Instruction *duplicatedPhiNode = nullptr; + + // Find the range of instructions to duplicate + for (Instruction &I : *BB) { + if (auto *phi = dyn_cast(&I)) { + if (phi->getType()->isFloatTy()) { + BeginIt = I.getIterator(); + } + } else if (RecurrenceDescriptor::isFMulAddIntrinsic(&I)) { + EndIt = std::next(I.getIterator()); + tailcallfmuladd = &I; + ToIt = std::next(EndIt); + break; + } + } + + assert(&*BeginIt && &*EndIt && "Failed to find instruction range"); + + // Clone and modify instructions + int arrayidx = 0; + for (auto it = BeginIt; it != EndIt; ++it) { + Instruction *newInst = it->clone(); + if (newInst->getOpcode() == Instruction::PHI) + newInst->setName("acc" + Twine(i)); + + if (auto *GEP = dyn_cast(newInst)) { + if (!Add) + Add = BinaryOperator::CreateDisjoint( + Instruction::Or, IPhi, ConstantInt::get(Type::getInt32Ty(Ctx), i), + "add" + Twine(i), BB); + + newInst->setName("arrayidx" + Twine(i) + "_" + Twine(arrayidx)); + newInst->setOperand(1, Add); + arrayidx++; + } + newInsts.push_back(newInst); + ValueMap[&*it] = newInst; + } + + // Update operands and insert new instructions + updateOperands(newInsts, ValueMap); + for (Instruction *newInst : newInsts) { + if (newInst->getOpcode() == Instruction::PHI) + duplicatedPhiNode = newInst->clone(); + newInst->insertInto(BB, BB->end()); + } + + return duplicatedPhiNode; +} + +// Helper function to move PHI nodes to the top of a basic block +static void movePHINodesToTop(BasicBlock &BB, + BasicBlock *ForBodyPreheaderBB = nullptr) { + SmallVector PHIs; + for (Instruction &I : BB) { + if (PHINode *PHI = dyn_cast(&I)) { + if (ForBodyPreheaderBB) + PHI->setIncomingBlock(1, ForBodyPreheaderBB); + PHIs.push_back(PHI); + } + } + + // Move PHI nodes in reverse order + for (auto it = PHIs.rbegin(); it != PHIs.rend(); ++it) { + (*it)->moveBefore(&BB.front()); + } +} + +// Helper function to update predecessors to point to a new preheader +static void updatePredecessorsToPreheader(BasicBlock *ForBody, + BasicBlock *ForBodyPreheader) { + SmallVector predecessors_bb; + for (auto *Pred : predecessors(ForBody)) { + if (Pred != ForBody) + predecessors_bb.push_back(Pred); + } + + for (BasicBlock *Pred : predecessors_bb) { + Instruction *TI = Pred->getTerminator(); + for (unsigned i = 0; i < TI->getNumSuccessors(); ++i) { + if (TI->getSuccessor(i) == ForBody) { + TI->setSuccessor(i, ForBodyPreheader); + } + } + } + + if (!ForBodyPreheader->getTerminator()) { + BranchInst::Create(ForBody, ForBodyPreheader); + } +} + +// Helper function to get the 'len' value from the entry block +static Value *getLenFromEntryBlock(Function &F) { + ICmpInst *ICmp = nullptr; + for (BasicBlock &BB : F) { + ICmp = getFirstICmpInstWithPredicate(&BB, ICmpInst::ICMP_SGT); + if (ICmp) + break; + } + + assert(ICmp && "icmp sgt instruction not found"); + return ICmp->getOperand(0); +} + +// Helper function to find specific instructions in a basic block +static std::tuple +findKeyInstructions(BasicBlock *ForBody) { + PHINode *ThirdPHI = nullptr; + CallInst *callInst = nullptr; + BinaryOperator *addInst = nullptr; + int PHICount = 0; + + for (Instruction &I : *ForBody) { + if (auto *PHI = dyn_cast(&I)) { + PHICount++; + if (PHICount == 3) { + ThirdPHI = PHI; + } + } else if (auto *ci = dyn_cast(&I)) { + callInst = ci; + } else if (auto *BinOp = dyn_cast(&I)) { + if (BinOp->getOpcode() == Instruction::Add) { + addInst = BinOp; + } + } + } + + return std::make_tuple(ThirdPHI, callInst, addInst); +} + +// Helper function to rename instructions +static void renameInstruction(Instruction *inst) { + if (inst->getOpcode() == Instruction::PHI) { + inst->setName("acc"); + } else if (inst->getOpcode() == Instruction::GetElementPtr) { + inst->setName("arrayidx"); + } +} + +// Helper function to set add instruction in for body +static void setAddInForBody(Instruction *inst, Instruction *Add, + Instruction *InsertBefore) { + if (inst->getOpcode() == Instruction::PHI) { + Add->moveBefore(InsertBefore); + } else if (inst->getOpcode() == Instruction::GetElementPtr) { + inst->setOperand(1, Add); + } +} + +// Helper function to copy and remap instructions +static void copyAndRemapInstructions(Instruction *StartInst, + Instruction *EndInst, + Instruction *InsertBefore, + Instruction *Add) { + ValueToValueMapTy ValueMap; + SmallVector NewInsts; + + for (auto it = StartInst->getIterator(); &*it != EndInst; ++it) { + Instruction *newInst = it->clone(); + if (auto *BinOp = dyn_cast(newInst)) { + if (BinOp->getOpcode() == Instruction::Add) { + continue; + } + } + NewInsts.push_back(newInst); + ValueMap[&*it] = newInst; + } + + updateOperands(NewInsts, ValueMap); + + for (Instruction *newInst : NewInsts) { + renameInstruction(newInst); + newInst->insertBefore(InsertBefore); + setAddInForBody(newInst, Add, InsertBefore); + } +} + +// Helper function to preprocess the cloned for body +static void preProcessClonedForBody(BasicBlock *ClonedForBody, Value *sub) { + Instruction *addInst = nullptr; + for (Instruction &I : *ClonedForBody) { + if (auto *BinOp = dyn_cast(&I)) { + if (BinOp->getOpcode() == Instruction::Add) { + BinOp->setOperand(1, ConstantInt::get(BinOp->getType(), 8)); + addInst = BinOp; + } + } + if (auto *icmp = dyn_cast(&I)) { + icmp->setPredicate(CmpInst::Predicate::ICMP_SLT); + icmp->setOperand(0, addInst); + icmp->setOperand(1, sub); + icmp->setName("cmp11"); + } + } + LLVM_DEBUG(ClonedForBody->dump()); +} + +// Helper function to modify getelementptr instructions +static void modifyGetElementPtr(BasicBlock *BB) { + SmallVector gepInsts; + Value *firstGEPOperand0 = nullptr; + Value *secondGEPOperand1 = nullptr; + + for (Instruction &I : *BB) { + if (auto *GEP = dyn_cast(&I)) { + gepInsts.push_back(GEP); + } + } + + if (gepInsts.size() < 8 || gepInsts.size() % 2 != 0) { + return; + } + + firstGEPOperand0 = gepInsts[0]; + secondGEPOperand1 = gepInsts[1]; + + for (size_t i = 2; i < gepInsts.size(); ++i) { + if (i % 2 == 0) { + if (i < gepInsts.size() - 2) { + gepInsts[i]->setOperand(0, firstGEPOperand0); + } + } else { + gepInsts[i]->setOperand(0, secondGEPOperand1); + } + + if (i == 14) + continue; + + Instruction *operand1 = dyn_cast(gepInsts[i]->getOperand(1)); + gepInsts[i]->setOperand( + 1, ConstantInt::get(Type::getInt32Ty(BB->getContext()), i / 2)); + if (operand1 && operand1->use_empty()) { + operand1->eraseFromParent(); + } + } +} + +// Helper function to check if a PHI node has an incoming value of zero +static bool isIncomingValueZeroOfPhi(PHINode *phi) { + return phi->getType()->isIntegerTy(32) && + isa(phi->getIncomingValue(0)) && + cast(phi->getIncomingValue(0))->isZero(); +} + +// Helper function to find and set add instructions +static std::pair +findAndSetAddInstructions(BasicBlock *ClonedForBody) { + Instruction *FirstAdd = nullptr; + Instruction *SecondAdd = nullptr; + + for (Instruction &I : *ClonedForBody) { + if (BinaryOperator *BinOp = dyn_cast(&I)) { + if (BinOp->getOpcode() == Instruction::Add) { + if (!FirstAdd) { + FirstAdd = &I; + FirstAdd->setHasNoSignedWrap(true); + } else if (!SecondAdd) { + SecondAdd = &I; + break; + } + } + } + } + assert(FirstAdd && SecondAdd && "Failed to find matching add instructions"); + return std::make_pair(FirstAdd, SecondAdd); +} + +// Helper functions for PHI node manipulation + +static PHINode *findZeroInitializedPHI(BasicBlock *block) { + for (Instruction &I : *block) { + if (PHINode *phi = dyn_cast(&I)) { + if (isIncomingValueZeroOfPhi(phi)) { + return phi; + } + } + } + return nullptr; +} + +static PHINode *findIntegerPHI(BasicBlock *block) { + for (Instruction &I : *block) { + if (PHINode *phi = dyn_cast(&I)) { + if (phi->getType()->isIntegerTy(32) && !isIncomingValueZeroOfPhi(phi)) { + return phi; + } + } + } + return nullptr; +} + +// Helper function to unroll loop body +static void unrollLoopBody(BasicBlock *block, PHINode *thirdPHI, + Instruction *callInst, Instruction *addInst, + PHINode *zeroInitializedPHI, LLVMContext &context) { + for (int i = 1; i < 8; i++) { + Instruction *add = BinaryOperator::CreateDisjoint( + Instruction::Or, zeroInitializedPHI, + ConstantInt::get(Type::getInt32Ty(context), i), "add" + Twine(i), + block); + copyAndRemapInstructions(thirdPHI, callInst->getNextNode(), addInst, add); + } +} + +// Helper function to update add instruction +static void updateAddInstruction(Instruction *addInst, PHINode *integerPHI, + LLVMContext &context) { + if (addInst) { + addInst->setOperand(1, ConstantInt::get(Type::getInt32Ty(context), 8)); + addInst->setOperand(0, integerPHI); + } +} + +// Helper function to update block terminator +static void updateBlockTerminator(BasicBlock *block, BasicBlock *successor) { + Instruction *terminator = block->getTerminator(); + terminator->setSuccessor(0, block); + terminator->setSuccessor(1, successor); +} + +// Helper function to modify getelementptr for unrolling +static void modifyGetElementPtrForUnrolling(BasicBlock *block) { + SmallVector gepInsts; + for (Instruction &I : *block) { + if (auto *GEP = dyn_cast(&I)) { + gepInsts.push_back(GEP); + } + } + + for (size_t i = 2; i < gepInsts.size(); i += 2) { + gepInsts[i]->setOperand(0, gepInsts[0]); + gepInsts[i]->setOperand( + 1, ConstantInt::get(Type::getInt32Ty(block->getContext()), i / 2)); + } +} + +// Helper function to handle add instructions +static void handleAddInstructions(BasicBlock *block, unsigned int unrollFactor, + PHINode *zeroInitializedPHI, + LLVMContext &context) { + auto [firstAdd, secondAdd] = findAndSetAddInstructions(block); + + if (firstAdd && secondAdd) { + firstAdd->moveBefore(secondAdd); + + if (unrollFactor == 1) { + firstAdd->setOperand(1, ConstantInt::get(Type::getInt32Ty(context), 8)); + secondAdd->setOperand(0, zeroInitializedPHI); + } + } +} + +// Function to unroll the cloned for loop body +static void unrollClonedForBody(BasicBlock *clonedForBody, + BasicBlock *forCondPreheader, + unsigned int unrollFactor = 0) { + Function *function = clonedForBody->getParent(); + LLVMContext &context = function->getContext(); + + // Find key instructions in the cloned for body + auto [thirdPHI, callInst, addInst] = findKeyInstructions(clonedForBody); + PHINode *zeroInitializedPHI = findZeroInitializedPHI(clonedForBody); + PHINode *integerPHI = findIntegerPHI(clonedForBody); + + assert(zeroInitializedPHI && "No matching zero-initialized PHI node found"); + + // Unroll the loop body if key instructions are found + if (thirdPHI && callInst) { + unrollLoopBody(clonedForBody, thirdPHI, callInst, addInst, + zeroInitializedPHI, context); + } + + // Update the add instruction + updateAddInstruction(addInst, integerPHI, context); + + // Update the basic block terminator + updateBlockTerminator(clonedForBody, forCondPreheader); + + // Move PHI nodes to the top of the basic block + movePHINodesToTop(*clonedForBody); + + // Modify getelementptr instructions based on the unroll factor + if (unrollFactor == 0) { + modifyGetElementPtr(clonedForBody); + } else { + modifyGetElementPtrForUnrolling(clonedForBody); + } + + // Handle add instructions + handleAddInstructions(clonedForBody, unrollFactor, zeroInitializedPHI, + context); +} + +// Function to check if a call instruction can be moved +static bool canMoveCallInstruction(CallInst *callInst, + Instruction *insertPoint) { + for (unsigned i = 0; i < callInst->getNumOperands(); ++i) { + if (auto *operandInst = dyn_cast(callInst->getOperand(i))) { + if (operandInst->getParent() == callInst->getParent() && + insertPoint->comesBefore(operandInst)) { + return false; + } + } + } + return true; +} + +// Function to group and reorder instructions in a basic block +static void groupAndReorderInstructions(BasicBlock *clonedForBody) { + // Collect different types of instructions + SmallVector phiNodes; + SmallVector orInsts, gepInsts, loadInsts, storeInsts, mulInsts, + addInsts, subInsts, callInsts, ashrInsts, faddInsts, fmulInsts, fsubInsts; + + // Categorize instructions by type + for (Instruction &I : *clonedForBody) { + if (auto *phi = dyn_cast(&I)) { + phiNodes.push_back(phi); + } else if (I.getOpcode() == Instruction::Or) { + orInsts.push_back(&I); + } else if (isa(&I)) { + gepInsts.push_back(&I); + } else if (isa(&I)) { + loadInsts.push_back(&I); + } else if (isa(&I)) { + storeInsts.push_back(&I); + } else if (I.getOpcode() == Instruction::Mul) { + mulInsts.push_back(&I); + } else if (isa(&I)) { + callInsts.push_back(&I); + } else if (I.getOpcode() == Instruction::Add) { + addInsts.push_back(&I); + } else if (I.getOpcode() == Instruction::Sub) { + subInsts.push_back(&I); + } else if (I.getOpcode() == Instruction::FAdd) { + faddInsts.push_back(&I); + } else if (I.getOpcode() == Instruction::FMul) { + fmulInsts.push_back(&I); + } else if (I.getOpcode() == Instruction::FSub) { + fsubInsts.push_back(&I); + } else if (I.getOpcode() == Instruction::AShr) { + return; + } + } + + // If no PHI nodes are found, return + if (phiNodes.empty()) { + return; + } + + // Reorder instructions + Instruction *insertPoint = phiNodes.back()->getNextNode(); + bool canMoveCallInst = + callInsts.empty() || + canMoveCallInstruction(dyn_cast(callInsts[0]), insertPoint); + + auto moveInstructions = [&insertPoint](SmallVector &insts) { + for (auto *inst : insts) { + inst->moveBefore(insertPoint); + insertPoint = inst->getNextNode(); + } + }; + + // Move instructions in the desired order + moveInstructions(mulInsts); + moveInstructions(addInsts); + moveInstructions(orInsts); + moveInstructions(subInsts); + moveInstructions(gepInsts); + moveInstructions(loadInsts); + moveInstructions(faddInsts); + moveInstructions(fmulInsts); + moveInstructions(fsubInsts); + if (canMoveCallInst) { + moveInstructions(callInsts); + } +} + +// Function to transform a single loop depth (currently suitable for +// dotprod/dotprode example) +static bool transformOneLoopDepth(Function &F) { + LLVMContext &ctx = F.getContext(); + bool changed = false; + + // Get necessary basic blocks and values + Value *len = getLenFromEntryBlock(F); + BasicBlock *entryBB = &F.getEntryBlock(); + BasicBlock *forBodyBB = getBasicBlockByName(F, "for.body"); + BasicBlock *forBodyNewBB = getBasicBlockByName(F, "for.body.clone"); + BasicBlock *ifEnd = getBasicBlockByName(F, "if.end"); + BasicBlock *forCond46PreheaderBB = + getBasicBlockByName(F, "for.cond.preheader"); + + assert(forBodyBB && "Expected to find for.body!"); + assert(forBodyNewBB && "Expected to find for.body.clone!"); + assert(ifEnd && "Expected to find if.end!"); + assert(forCond46PreheaderBB && "Expected to find for.cond.preheader!"); + + // Create new basic blocks + BasicBlock *forCondPreheaderBB = + BasicBlock::Create(F.getContext(), "for.cond.preheader", &F, forBodyBB); + BasicBlock *forBodyPreheaderBB = + BasicBlock::Create(F.getContext(), "for.body.preheader", &F, forBodyBB); + BasicBlock *forCond31PreheaderBB = + BasicBlock::Create(F.getContext(), "for.cond31.preheader", &F, forBodyBB); + BasicBlock *forBody33BB = cloneBasicBlockWithRelations(forBodyBB, "33", &F); + forBody33BB->setName("for.body33"); + forBody33BB->moveAfter(forBodyBB); + BasicBlock *forEnd37BB = + BasicBlock::Create(F.getContext(), "for.end37", &F, forBodyNewBB); + + // Add instructions to forCondPreheaderBB + IRBuilder<> builder(forCondPreheaderBB); + Value *negativeSeven = ConstantInt::get(Type::getInt32Ty(F.getContext()), -7); + Value *sub = builder.CreateNSWAdd(len, negativeSeven, "sub"); + Value *seven = ConstantInt::get(Type::getInt32Ty(F.getContext()), 7); + Value *cmp1113 = builder.CreateICmpUGT(len, seven, "cmp1113"); + builder.CreateCondBr(cmp1113, forBodyPreheaderBB, forCond31PreheaderBB); + + // Add instructions to forBodyPreheaderBB + builder.SetInsertPoint(forBodyPreheaderBB); + Value *mask = ConstantInt::get(Type::getInt32Ty(F.getContext()), 2147483640); + Value *andValue = builder.CreateAnd(len, mask, ""); + builder.CreateBr(forBodyBB); + + // Modify for.body + PHINode *iPhi = dyn_cast(&forBodyBB->front()); + iPhi->setName("i.0122"); + + // copy first float phinode from forBodyBB to forCond31PreheaderBB + PHINode *firstFloatPhi = getFirstFloatPhi(forBodyBB); + PHINode *acc00Lcssa = PHINode::Create(firstFloatPhi->getType(), 2, + "acc0.0.lcssa", forCond31PreheaderBB); + acc00Lcssa->addIncoming(firstFloatPhi->getIncomingValue(0), + firstFloatPhi->getIncomingBlock(0)); + acc00Lcssa->addIncoming(firstFloatPhi->getIncomingValue(1), + forCondPreheaderBB); + // Unroll and duplicate loop iterations + SmallVector instructions; + for (int i = 0; i < 7; i++) { + Instruction *copyedPhiNode = + unrollAndDuplicateLoopIteration(ctx, forBodyBB, builder, i + 1); + if (PHINode *phi = dyn_cast(copyedPhiNode)) { + phi->setName("acc" + Twine(i + 1) + ".0.lcssa"); + phi->setIncomingBlock(1, forCondPreheaderBB); + phi->insertInto(forCond31PreheaderBB, forCond31PreheaderBB->end()); + instructions.push_back(phi); + } + } + + // Update for.body terminator + Instruction *incInst = nullptr; + MDNode *loopMD = nullptr; + for (auto &I : *forBodyBB) { + if (I.getOpcode() == Instruction::Add) { + incInst = &I; + Instruction *icmp = I.getNextNode(); + Instruction *br = icmp->getNextNode(); + assert(icmp->getOpcode() == Instruction::ICmp && + br->getOpcode() == Instruction::Br && + "Unexpected instruction sequence"); + I.moveAfter(&forBodyBB->back()); + loopMD = br->getMetadata(LLVMContext::MD_loop); + br->eraseFromParent(); + icmp->eraseFromParent(); + break; + } + } + + // Modify add instruction + incInst->setOperand(1, ConstantInt::get(Type::getInt32Ty(F.getContext()), 8)); + incInst->setName("add30"); + + builder.SetInsertPoint(forBodyBB); + Value *cmp1 = builder.CreateICmpSLT(incInst, sub, "cmp1"); + BranchInst *newBr = + builder.CreateCondBr(cmp1, forBodyBB, forCond31PreheaderBB); + newBr->setMetadata(LLVMContext::MD_loop, loopMD); + + movePHINodesToTop(*forBodyBB, forBodyPreheaderBB); + + // Add instructions to forCond31PreheaderBB + builder.SetInsertPoint(forCond31PreheaderBB); + PHINode *i0Lcssa = + builder.CreatePHI(Type::getInt32Ty(F.getContext()), 0, "i.0.lcssa"); + i0Lcssa->addIncoming(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0), + forCondPreheaderBB); + i0Lcssa->addIncoming(andValue, forBodyBB); + Value *cmp32132 = builder.CreateICmpSLT(i0Lcssa, len, "cmp32132"); + builder.CreateCondBr(cmp32132, forBody33BB, forEnd37BB); + + // Modify forBody33BB + Instruction *tempInstr = nullptr; + for (auto &I : *forBody33BB) { + if (PHINode *phi = dyn_cast(&I)) { + if (phi->getType()->isIntegerTy(32)) { + phi->setIncomingValue(1, i0Lcssa); + phi->setIncomingBlock(1, forCond31PreheaderBB); + } else if (phi->getType()->isFloatTy()) { + phi->setIncomingValue(1, acc00Lcssa); + phi->setIncomingBlock(1, forCond31PreheaderBB); + tempInstr = phi; + } + } + } + + // Modify forEnd37BB + Instruction *acc01Lcssa = tempInstr->clone(); + acc01Lcssa->setName("acc0.1.lcssa"); + acc01Lcssa->insertInto(forEnd37BB, forEnd37BB->end()); + builder.SetInsertPoint(forEnd37BB); + + // Create pairs of floating-point additions + Value *sum01 = builder.CreateFAdd(acc01Lcssa, instructions[0], "sum01"); + Value *sum23 = builder.CreateFAdd(instructions[1], instructions[2], "sum23"); + Value *sum45 = builder.CreateFAdd(instructions[3], instructions[4], "sum45"); + Value *sum67 = builder.CreateFAdd(instructions[5], instructions[6], "sum67"); + + // Combine pairs + Value *sum0123 = builder.CreateFAdd(sum01, sum23, "sum0123"); + Value *sum4567 = builder.CreateFAdd(sum45, sum67, "sum4567"); + + // Final addition + Value *currentAdd = builder.CreateFAdd(sum0123, sum4567, "add44"); + builder.CreateBr(ifEnd); + + // Modify entry basic block + BranchInst *entryBi = dyn_cast(entryBB->getTerminator()); + entryBi->setSuccessor(0, forCondPreheaderBB); + entryBi->setSuccessor(1, forCond46PreheaderBB); + + // Modify forCond46PreheaderBB + forCond46PreheaderBB->getTerminator()->getPrevNode()->setName("cmp47110"); + + // Modify for.body33 + BranchInst *forBody33Bi = dyn_cast(forBody33BB->getTerminator()); + forBody33Bi->setSuccessor(0, forEnd37BB); + forBody33Bi->setSuccessor(1, forBody33BB); + + // Modify if.end + PHINode *ifEndPhi = dyn_cast(&ifEnd->front()); + ifEndPhi->setIncomingValue(1, currentAdd); + ifEndPhi->setIncomingBlock(1, forEnd37BB); + + changed = true; + return changed; +} + +// Function to unroll the cloned for.cond.preheader +static void unrollClonedForCondPreheader(BasicBlock *clonedForBody, + BasicBlock *clonedForCondPreheader, + BasicBlock *forCondPreheader) { + Function *F = clonedForBody->getParent(); + BasicBlock *forBody = getBasicBlockByName(*F, "for.body"); + assert(forBody && "Expected to find for.body!"); + + // Find PHI instructions in clonedForBody + SmallVector phiNodes; + for (Instruction &I : *clonedForBody) { + if (PHINode *phi = dyn_cast(&I)) { + phiNodes.push_back(phi); + } + } + + // Remove unused PHI nodes in clonedForCondPreheader + SmallVector unusedPhiNodes; + for (Instruction &I : *clonedForCondPreheader) { + if (PHINode *phi = dyn_cast(&I)) { + if (phi->use_empty()) { + unusedPhiNodes.push_back(phi); + } + } + } + for (PHINode *phi : unusedPhiNodes) { + phi->eraseFromParent(); + } + + // Clone PHI instructions to the beginning of clonedForCondPreheader + Instruction *insertPoint = &clonedForCondPreheader->front(); + SmallVector clonedPhiNodes; + for (PHINode *phi : phiNodes) { + PHINode *clonedPhi = cast(phi->clone()); + clonedPhi->setName(phi->getName() + ".clone"); + clonedPhi->setIncomingBlock(0, forBody); + clonedPhi->insertBefore(insertPoint); + insertPoint = clonedPhi->getNextNode(); + clonedPhiNodes.push_back(clonedPhi); + } + + // Find and clone the unique icmp instruction in forBody + Value *specStoreSelect = nullptr; + Instruction *cmpSlt = nullptr; + for (Instruction &I : *forBody) { + if (auto *icmp = dyn_cast(&I)) { + specStoreSelect = icmp->getOperand(0); + cmpSlt = icmp->clone(); + cmpSlt->setName("cmp_slt"); + cmpSlt->insertAfter(insertPoint); + break; + } + } + assert(specStoreSelect && "Failed to find icmp instruction in ForBody"); + + // Replace the existing icmp in clonedForCondPreheader + for (Instruction &I : *clonedForCondPreheader) { + if (auto *icmp = dyn_cast(&I)) { + icmp->replaceAllUsesWith(cmpSlt); + icmp->eraseFromParent(); + break; + } + } + + // Set the operand of cmp_slt to the first cloned PHI node + cmpSlt->setOperand(0, clonedPhiNodes[0]); + + // Update the successor of clonedForCondPreheader + clonedForCondPreheader->getTerminator()->setSuccessor(1, forCondPreheader); +} + +static std::tuple +modifyForBodyPreheader(BasicBlock *ForBodyPreheader, + BasicBlock *ClonedForCondPreheader) { + PHINode *TargetPHI = nullptr; + PHINode *TargetPHI2 = nullptr; + PHINode *TargetPHI3 = nullptr; + for (Instruction &I : *ClonedForCondPreheader) { + if (auto *phi = dyn_cast(&I)) { + if (phi->getType()->isIntegerTy(32)) { + if (isIncomingValueZeroOfPhi(phi)) { + // Found the target PHI node + TargetPHI = phi; + } else { + TargetPHI2 = phi; + } + } else if (phi->getType()->isFloatTy()) { + if (TargetPHI3 == nullptr) { + TargetPHI3 = phi; + break; + } + } + } + } + BinaryOperator *NewSub = nullptr; + for (Instruction &I : *ForBodyPreheader) { + if (auto *BinOp = dyn_cast(&I)) { + if (BinOp->getOpcode() == Instruction::Sub) { + // Change to add + NewSub = BinaryOperator::CreateAdd(BinOp->getOperand(0), TargetPHI, + BinOp->getName(), BinOp); + BinOp->replaceAllUsesWith(NewSub); + BinOp->eraseFromParent(); + break; + } + } + } + + ForBodyPreheader->moveAfter(ClonedForCondPreheader); + assert(NewSub && "NewSub should not be nullptr"); + return std::make_tuple(NewSub, TargetPHI2, TargetPHI3); +} + +static Value *expandForCondPreheader( + BasicBlock *ForBody, BasicBlock *ForCondPreheader, + BasicBlock *ClonedForCondPreheader, + std::tuple NewSubAndTargetPHI3) { + Instruction *TargetInst = + getFirstCallInstWithName(ForBody, "llvm.fmuladd.f32"); + assert(TargetInst && "TargetInst not found"); + Value *NewSub = std::get<0>(NewSubAndTargetPHI3); + Value *TargetPHI2 = std::get<1>(NewSubAndTargetPHI3); + Value *TargetPHI3 = std::get<2>(NewSubAndTargetPHI3); + // Create new .loopexit basic block + BasicBlock *LoopExit = BasicBlock::Create( + ForCondPreheader->getContext(), ForCondPreheader->getName() + ".loopexit", + ForCondPreheader->getParent(), ForCondPreheader); + + // Create new sub instruction in .loopexit block + IRBuilder<> Builder(LoopExit); + Value *NewSubInst = Builder.CreateSub(NewSub, TargetPHI2); + + // Add unconditional branch to ForCondPreheader + Builder.CreateBr(ForCondPreheader); + + // Find the target PHI node in ClonedForCondPreheader + PHINode *TargetPHI = nullptr; + for (PHINode &Phi : ClonedForCondPreheader->phis()) { + if (isIncomingValueZeroOfPhi(&Phi)) { + TargetPHI = Φ + break; + } + } + + // Ensure we found the target PHI node + assert(TargetPHI && + "Failed to find target PHI node in ClonedForCondPreheader"); + + // Update the incoming value of the PHI nodes in ForCondPreheader to the + // result of the new sub instruction + for (PHINode &Phi : ForCondPreheader->phis()) { + if (Phi.getType()->isIntegerTy(32)) { + Phi.setIncomingValue(0, TargetPHI); + Phi.setIncomingBlock(0, ClonedForCondPreheader); + Phi.setIncomingValue(1, NewSubInst); + Phi.setIncomingBlock(1, LoopExit); + } else if (Phi.getType()->isFloatTy()) { + Phi.setIncomingValue(0, TargetPHI3); + Phi.setIncomingBlock(0, ClonedForCondPreheader); + // Phi.setIncomingValue(1, TargetInst); + Phi.setIncomingBlock(1, LoopExit); + } + } + + // Get the icmp instruction in ForCondPreheader + ICmpInst *icmpInst = getFirstICmpInst(ForCondPreheader); + + // Ensure we found the icmp instruction + assert(icmpInst && "Failed to find icmp instruction in ForCondPreheader"); + + // Set the operand 1 of icmpInst to constant 7 + LLVMContext &Ctx = ForCondPreheader->getContext(); + Value *const7 = ConstantInt::get(Type::getInt32Ty(Ctx), 7); + icmpInst->setOperand(1, const7); + + // Create a new add nsw instruction before icmpInst, with operand 0 the same + // as icmpInst, and operand 1 as -7. This instruction will be used as the + // return value of the function + Value *constNeg7 = ConstantInt::get(Type::getInt32Ty(Ctx), -7); + IRBuilder<> BuilderBeforeICmp(icmpInst); + Value *AddInst = + BuilderBeforeICmp.CreateNSWAdd(icmpInst->getOperand(0), constNeg7); + + ForBody->getTerminator()->setSuccessor(0, LoopExit); + + return AddInst; +} + +static void updateRealForBody(Function &F, Value *sub) { + BasicBlock *ForBody = getBasicBlockByName(F, "for.body"); + assert(ForBody && "Expected to find for.body!"); + ICmpInst *lastICmp = + getLastICmpInstWithPredicate(ForBody, ICmpInst::ICMP_SLT); + if (lastICmp) { + lastICmp->setOperand(1, sub); + } +} + +static void modifyForBody(BasicBlock *ClonedForCondPreheader, + BasicBlock *ForBody) { + // Find the unique float type PHI node in ForBody + PHINode *FloatPhiInForBody = getFirstFloatPhi(ForBody); + assert(FloatPhiInForBody && "Failed to find float type PHI node in ForBody"); + // Find the first float type PHI node in ClonedForCondPreheader + PHINode *FirstFloatPhiInClonedForCondPreheader = + getFirstFloatPhi(ClonedForCondPreheader); + assert(FloatPhiInForBody && "Failed to find float type PHI node in ForBody"); + // Set the incoming value of the float type PHI node in ForBody to the float + // type PHI node in ClonedForCondPreheader + FloatPhiInForBody->setIncomingValue(0, FirstFloatPhiInClonedForCondPreheader); + + // Find the unique icmp eq instruction in ForBody + ICmpInst *IcmpEq = getFirstICmpInstWithPredicate(ForBody, ICmpInst::ICMP_EQ); + + // Ensure we found the icmp eq instruction + assert(IcmpEq && "Failed to find icmp eq instruction in ForBody"); + + // Get the original operand 1 + Value *OriginalOperand1 = IcmpEq->getOperand(1); + + // Ensure the original operand 1 is an instruction + if (Instruction *OriginalOperand1Inst = + dyn_cast(OriginalOperand1)) { + // Set operand 1 to the operand 0 of the original operand 1 instruction + IcmpEq->setOperand(1, OriginalOperand1Inst->getOperand(0)); + } else { + assert(false && "The original operand 1 is not an instruction, " + "cannot get its operand 0\n"); + } + + // Find the phi i32 incoming value that is a variable in + // ClonedForCondPreheader + PHINode *TargetPHI = nullptr; + PHINode *TargetPHI2 = nullptr; + for (Instruction &I : *ClonedForCondPreheader) { + if (PHINode *Phi = dyn_cast(&I)) { + if (isIncomingValueZeroOfPhi(Phi)) { + TargetPHI = Phi; + } else { + TargetPHI2 = Phi; + } + if (TargetPHI && TargetPHI2) + break; + } + } + + // Ensure we found the target PHI node + assert(TargetPHI && + "Failed to find the target PHI node in ClonedForCondPreheader"); + + // Find the phi i32 incoming value that is a variable in ForBody + PHINode *TargetPHIInForBody = nullptr; + PHINode *TargetPHIInForBody2 = nullptr; + for (Instruction &I : *ForBody) { + if (PHINode *Phi = dyn_cast(&I)) { + if (isIncomingValueZeroOfPhi(Phi)) { + TargetPHIInForBody = Phi; + } else { + TargetPHIInForBody2 = Phi; + } + if (TargetPHIInForBody && TargetPHIInForBody2) + break; + } + } + + // Ensure that the target PHI nodes are found + assert(TargetPHIInForBody && TargetPHIInForBody2 && + "Failed to find matching PHI nodes in ForBody"); + + // Set the incoming value of the PHI nodes found in ForBody + // to the PHI nodes found in ClonedForCondPreheader + TargetPHIInForBody->setIncomingValue(0, TargetPHI); + TargetPHIInForBody2->setIncomingValue(0, TargetPHI2); + + IcmpEq->setOperand(0, TargetPHIInForBody2->getIncomingValue(1)); +} + +static void insertUnusedInstructionsBeforeIcmp(PHINode *phiI32InClonedForBody, + ICmpInst *lastIcmpEq) { + for (Use &U : phiI32InClonedForBody->uses()) { + if (Instruction *Used = dyn_cast(U.getUser())) { + if (Used->getParent() == nullptr) { + if (Used->use_empty()) { + Used->insertBefore(lastIcmpEq); + } + } + } + } +} + +static void modifyClonedForBody(BasicBlock *ClonedForBody) { + + ICmpInst *lastIcmpEq = getLastICmpInst(ClonedForBody); + assert(lastIcmpEq && + "Failed to find last icmp eq instruction in ClonedForBody"); + + PHINode *phiI32InClonedForBody = nullptr; + for (auto &Inst : *ClonedForBody) { + if (PHINode *Phi = dyn_cast(&Inst)) { + if (isIncomingValueZeroOfPhi(Phi)) { + phiI32InClonedForBody = Phi; + insertUnusedInstructionsBeforeIcmp(phiI32InClonedForBody, lastIcmpEq); + } + } + } + + // Ensure that the phi i32 node is found + assert(phiI32InClonedForBody && "phi i32 node not found in ClonedForBody"); +} + +static BasicBlock *getFirstSuccessorOfForBody(BasicBlock *ForBody) { + BasicBlock *ForCondPreheader = nullptr; + assert(succ_size(ForBody) == 2 && "ForBody should have 2 successors"); + for (auto *succ : successors(ForBody)) { + ForCondPreheader = succ; + break; + } + return ForCondPreheader; +} + +static std::tuple +cloneThreeBB(BasicBlock *ForBodyPreheader, BasicBlock *ForBody, + BasicBlock *ForCondPreheader, Function &F) { + ValueToValueMapTy VMap; + SmallVector NewBlocks; + + BasicBlock *ClonedForBodyPreheader = + CloneBasicBlock(ForBodyPreheader, VMap, ".modify", &F); + BasicBlock *ClonedForBody = CloneBasicBlock(ForBody, VMap, ".modify", &F); + BasicBlock *ClonedForCondPreheader = + CloneBasicBlock(ForCondPreheader, VMap, ".modify", &F); + + VMap[ForBodyPreheader] = ClonedForBodyPreheader; + VMap[ForBody] = ClonedForBody; + VMap[ForCondPreheader] = ClonedForCondPreheader; + + // Remap instructions and PHI nodes in the new loop + remapInstructionsInBlocks( + {ClonedForBodyPreheader, ClonedForBody, ClonedForCondPreheader}, VMap); + return std::make_tuple(ClonedForBodyPreheader, ClonedForBody, + ClonedForCondPreheader); +} + +static std::tuple +modifyFirstForBody(Loop *L, Function &F, BasicBlock *ForBody, Value *sub) { + + BasicBlock *ForBodyPreheader = L->getLoopPreheader(); + + // Find the predecessor of ForBodyPreheader + BasicBlock *PreForBody = nullptr; + assert(pred_size(ForBodyPreheader) == 1 && + "ForBodyPreheader should have only one predecessor"); + for (auto *Pred : predecessors(ForBodyPreheader)) { + PreForBody = Pred; + } + + // Find the first successor of ForBody, it should have two + BasicBlock *ForCondPreheader = getFirstSuccessorOfForBody(ForBody); + + std::tuple ClonedBBs = + cloneThreeBB(ForBodyPreheader, ForBody, ForCondPreheader, F); + BasicBlock *ClonedForBodyPreheader = std::get<0>(ClonedBBs); + BasicBlock *ClonedForBody = std::get<1>(ClonedBBs); + BasicBlock *ClonedForCondPreheader = std::get<2>(ClonedBBs); + + /* insert 2 cloned blocks between PreForBody and ForBody */ + // for.body -> for.body12.lr.ph + PreForBody->getTerminator()->setSuccessor(0, ClonedForBodyPreheader); + ClonedForBodyPreheader->moveAfter(PreForBody); + // for.body12.lr.ph -> for.body12 + ClonedForBodyPreheader->getTerminator()->setSuccessor(0, ClonedForBody); + + // for.body12 -> for.cond59.preheader + ClonedForBody->moveAfter(ClonedForBodyPreheader); + + // for.cond59.preheader -> for.body62.lr.ph + ClonedForCondPreheader->getTerminator()->setSuccessor(0, ForBodyPreheader); + + // for.cond59.preheader -> for.cond71.preheader + ClonedForCondPreheader->getTerminator()->setSuccessor(1, + ClonedForCondPreheader); + ClonedForCondPreheader->moveAfter(ClonedForBodyPreheader); + // for.body -> for.cond71.preheader + PreForBody->getTerminator()->setSuccessor(1, ClonedForCondPreheader); + + preProcessClonedForBody(ClonedForBody, sub); + updateRealForBody(F, sub); + unrollClonedForBody(ClonedForBody, ClonedForCondPreheader, 0); + modifyClonedForBody(ClonedForBody); + unrollClonedForCondPreheader(ClonedForBody, ClonedForCondPreheader, + ForCondPreheader); + + modifyForBody(ClonedForCondPreheader, ForBody); + std::tuple NewSubAndTargetPHI3 = + modifyForBodyPreheader(ForBodyPreheader, ClonedForCondPreheader); + + Value *AddInst = expandForCondPreheader( + ForBody, ForCondPreheader, ClonedForCondPreheader, NewSubAndTargetPHI3); + + ClonedForBodyPreheader->moveBefore(ClonedForBody); + groupAndReorderInstructions(ClonedForBody); + return std::make_tuple(ClonedForCondPreheader, ForCondPreheader, AddInst); +} + +static bool moveIfEndToEnd(Function &F) { + + BasicBlock &lastBB = F.back(); + if (lastBB.getName() == "if.end") { + return false; + } + + BasicBlock *ifEndBB = getBasicBlockByName(F, "if.end"); + assert(ifEndBB && "Expected to find if.end!"); + if (ifEndBB) { + ifEndBB->removeFromParent(); + ifEndBB->insertInto(&F); + } + return true; +} + +static Value *modifyForCondPreheader(Function &F) { + LLVMContext &Ctx = F.getContext(); + + BasicBlock *forCondPreheader = getBasicBlockByName(F, "for.cond.preheader"); + BasicBlock *forBodyLrPh = getBasicBlockByName(F, "for.body.lr.ph"); + assert(forCondPreheader && "Expected to find for.cond.preheader!"); + assert(forBodyLrPh && "Expected to find for.body.lr.ph!"); + forCondPreheader->replaceAllUsesWith(forBodyLrPh); + forCondPreheader->eraseFromParent(); + forBodyLrPh->setName("for.cond.preheader"); + + unsigned int loadnum = 0; + for (auto I = forBodyLrPh->begin(); I != forBodyLrPh->end(); ++I) { + if (auto *loadinst = dyn_cast(&*I)) { + loadnum++; + if (loadnum == 2) { + IRBuilder<> Builder(loadinst->getNextNode()); + Value *NegSeven = ConstantInt::get(Type::getInt32Ty(Ctx), -7); + Value *Sub = Builder.CreateNSWAdd(loadinst, NegSeven, "sub"); + return Sub; // Return the newly inserted instruction + } + } + } + assert(false && "it must not be here"); +} + +static void modifyForCondPreheader2(BasicBlock *ClonedForBody, + BasicBlock *ClonedForCondPreheader, + BasicBlock *ForCondPreheader, + Value *andinst) { + + // Find phi instructions of float type in ClonedForBody + SmallVector PhiNodes; + for (Instruction &I : *ClonedForBody) { + if (PHINode *Phi = dyn_cast(&I)) { + PhiNodes.push_back(Phi); + } + } + + // Clone the found phi instructions to the beginning of ClonedForCondPreheader + // in order + Instruction *InsertPoint = &ForCondPreheader->front(); + PHINode *phi = cast(InsertPoint); + + BasicBlock *lastForCondPreheader = phi->getIncomingBlock(0); + SmallVector ClonedPhiNodes; + unsigned int floatphicount = 0; + for (PHINode *Phi : PhiNodes) { + PHINode *ClonedPhi = cast(Phi->clone()); + ClonedPhi->setName(Phi->getName() + ".clone"); + // Modify the operand 0 basicblock of each phi instruction to ForBody + if (Phi->getType()->isFloatTy()) { + if (floatphicount == 0) { + ClonedPhi->setIncomingValue(0, phi->getIncomingValue(0)); + floatphicount++; + } + } + ClonedPhi->setIncomingBlock(0, lastForCondPreheader); + ClonedPhi->insertAfter(InsertPoint); + // Update the insertion point to after the newly inserted PHI node + InsertPoint = ClonedPhi; + + ClonedPhiNodes.push_back(ClonedPhi); + } + + // Find operand 1 of the icmp instruction from ClonedForBody + ICmpInst *firstIcmp = getFirstICmpInst(ClonedForBody); + assert(firstIcmp && "Unable to find icmp instruction in ClonedForBody"); + Value *IcmpOperand1 = firstIcmp->getOperand(1); + + // Set operand 0 of icmp in ForCondPreheader to ClonedPhiNodes[0], and operand + // 1 to IcmpOperand1 + for (Instruction &I : *ForCondPreheader) { + if (ICmpInst *Icmp = dyn_cast(&I)) { + Icmp->setOperand(0, ClonedPhiNodes[0]); + Icmp->setOperand(1, IcmpOperand1); + Icmp->setName("cmp"); + break; + } + } + + ForCondPreheader->getTerminator()->setSuccessor(1, ClonedForCondPreheader); + + // // Delete redundant getelementptr, store and add instructions + SmallVector InstructionsToRemove; + for (Instruction &I : *ForCondPreheader) { + if (isa(&I) || isa(&I) || + isa(&I)) { + InstructionsToRemove.push_back(&I); + } + } + for (auto Inst = InstructionsToRemove.rbegin(); + Inst != InstructionsToRemove.rend(); ++Inst) { + if ((*Inst)->use_empty()) { + (*Inst)->eraseFromParent(); + } + } + // Find the icmp instruction in ClonedForCondPreheader + ICmpInst *IcmpInForCondPreheader = + getFirstICmpInstWithPredicate(ForCondPreheader, ICmpInst::ICMP_EQ); + + // Ensure that the icmp instruction is found + assert(IcmpInForCondPreheader && + "icmp instruction not found in ClonedForCondPreheader"); + + // Get the original operand 1 + Value *OriginalOperand1 = IcmpInForCondPreheader->getOperand(1); + + // If the original operand 1 is an instruction, get its operand 0 + if (Instruction *OriginalOperand1Inst = + dyn_cast(OriginalOperand1)) { + Value *NewOperand1 = OriginalOperand1Inst->getOperand(0); + + // Set the new operand 1 + IcmpInForCondPreheader->setOperand(1, NewOperand1); + // Change the original eq to slt + + IcmpInForCondPreheader->setPredicate(CmpInst::ICMP_SLT); + + } else { + assert(false && "The original operand 1 is not an instruction, cannot get " + "its operand 0\n"); + } + + // Find phi i32 node in ForCondPreheader with incoming 0 value == 0 + PHINode *TargetPhi = nullptr; + for (Instruction &I : *ForCondPreheader) { + if (PHINode *Phi = dyn_cast(&I)) { + if (isIncomingValueZeroOfPhi(Phi)) { + TargetPhi = Phi; + break; + } + } + } + + // Ensure the target phi node is found + assert(TargetPhi && "No matching phi i32 node found in ForCondPreheader"); + + TargetPhi->setIncomingValue(1, andinst); +} + +static Value *modifyClonedForBodyPreheader(BasicBlock *ClonedForBodyPreheader, + BasicBlock *ForBody) { + ICmpInst *firstIcmp = getFirstICmpInst(ForBody); + assert(firstIcmp && "Unable to find icmp instruction in ForBody"); + + Value *IcmpOperand1 = firstIcmp->getOperand(1); + + IRBuilder<> Builder(ClonedForBodyPreheader->getTerminator()); + Value *AndInst = + Builder.CreateAnd(IcmpOperand1, Builder.getInt32(2147483640)); + return AndInst; +} + +static void modifyClonedForCondPreheader(BasicBlock *ClonedForCondPreheader, + BasicBlock *ForBody, + BasicBlock *ForCondPreheader) { + + // Find float type phi node in ForBody + PHINode *FloatPhiInForBody = nullptr; + for (Instruction &I : *ForBody) { + if (PHINode *Phi = dyn_cast(&I)) { + if (Phi->getType()->isFloatTy()) { + FloatPhiInForBody = cast(I.clone()); + break; + } + } + } + + // Find and replace float type phi node in ClonedForCondPreheader + if (FloatPhiInForBody) { + PHINode *phi = getFirstFloatPhi(ClonedForCondPreheader); + assert(phi && "phi node not found"); + FloatPhiInForBody->insertBefore(phi); + phi->replaceAllUsesWith(FloatPhiInForBody); + phi->eraseFromParent(); + } + + // Set incomingblock 0 of FloatPhiInForBody to ForCondPreheader + if (FloatPhiInForBody) { + FloatPhiInForBody->setIncomingBlock(0, ForCondPreheader); + } + + // Find float type phi nodes in ForCondPreheader + SmallVector FloatPhisInForCondPreheader; + for (Instruction &I : *ForCondPreheader) { + if (PHINode *Phi = dyn_cast(&I)) { + if (Phi->getType()->isFloatTy()) { + FloatPhisInForCondPreheader.push_back(Phi); + } + } + } + + // Create 7 fadd instructions + Value *LastFAdd = nullptr; + if (FloatPhisInForCondPreheader.size() >= 8) { + IRBuilder<> Builder(FloatPhiInForBody->getNextNode()); + + Value *PrevAdd = getFirstFloatPhi(ClonedForCondPreheader); + + assert(PrevAdd && + "Unable to find float type PHI node in ClonedForCondPreheader"); + Value *Add139 = + Builder.CreateFAdd(PrevAdd, FloatPhisInForCondPreheader[2], "add139"); + Value *Add140 = + Builder.CreateFAdd(FloatPhisInForCondPreheader[3], + FloatPhisInForCondPreheader[4], "add140"); + Value *Add141 = + Builder.CreateFAdd(FloatPhisInForCondPreheader[5], + FloatPhisInForCondPreheader[6], "add141"); + Value *Add142 = + Builder.CreateFAdd(FloatPhisInForCondPreheader[7], + FloatPhisInForCondPreheader[8], "add142"); + Value *Add143 = Builder.CreateFAdd(Add139, Add140, "add143"); + Value *Add144 = Builder.CreateFAdd(Add141, Add142, "add144"); + Value *Add145 = Builder.CreateFAdd(Add143, Add144, "add145"); + LastFAdd = Add145; + } else { + llvm_unreachable("Unable to find float type PHI node in ForCondPreheader"); + } + + // Find store instruction in ForCondPreheader and update its operand + if (LastFAdd) { + for (auto &Inst : *ClonedForCondPreheader) { + if (auto *si = dyn_cast(&Inst)) { + si->setOperand(0, LastFAdd); + break; + } + } + } + + Value *addinst = nullptr; + // Iterate through instructions in ClonedForCondPreheader, looking for addnuw + // instruction + for (auto &Inst : *ClonedForCondPreheader) { + if (auto *AddInst = dyn_cast(&Inst)) { + if (AddInst->getOpcode() == Instruction::Add && + AddInst->hasNoUnsignedWrap()) { + addinst = AddInst; + break; + } + } + } + // Get the second successor of ClonedForCondPreheader + BasicBlock *SecondSuccessor = nullptr; + int SuccCount = 0; + for (auto *Succ : successors(ClonedForCondPreheader)) { + if (SuccCount == 1) { + SecondSuccessor = Succ; + break; + } + SuccCount++; + } + + if (SecondSuccessor && addinst) { + // Iterate through all PHI nodes in SecondSuccessor + int phiCount = 0; + for (PHINode &Phi : SecondSuccessor->phis()) { + if (phiCount == 1) { // Second phi node + // Set the second predecessor to ClonedForCondPreheader and its value to + // addinst + Phi.setIncomingBlock(1, ClonedForCondPreheader); + Phi.setIncomingValue(1, addinst); + } else { + // For other phi nodes, only update the predecessor basic block + Phi.setIncomingBlock(1, ClonedForCondPreheader); + } + phiCount++; + } + } +} + +static void modifyClonedForBody2(BasicBlock *ClonedForBody, + BasicBlock *ClonedForCondPreheader, + Value *AddInst, BasicBlock *ForCondPreheader) { + SmallVector floatPhiNodes; + + // Iterate through all instructions in ClonedForCondPreheader + for (Instruction &I : *ClonedForCondPreheader) { + if (PHINode *Phi = dyn_cast(&I)) { + if (Phi->getType()->isFloatTy()) { + floatPhiNodes.push_back(Phi); + if (floatPhiNodes.size() == 8) { + break; // Stop after finding 8 float type PHI nodes + } + } + } + } + + // Ensure we found 8 float type PHI nodes + assert(floatPhiNodes.size() == 8 && + "Unable to find 8 float type PHI nodes in ClonedForCondPreheader"); + + // Now floatPhiNodes contains 8 float type PHI nodes in order + + // Iterate through all PHI nodes in ClonedForBody + int phiIndex = 0; + for (PHINode &Phi : ClonedForBody->phis()) { + if (Phi.getType()->isFloatTy()) { + // Ensure we don't access floatPhiNodes out of bounds + if (phiIndex < floatPhiNodes.size()) { + // Set the 0th incoming value of the PHI node to the corresponding node + // in floatPhiNodes + if (phiIndex > + 0) { // Don't set the first phi node, as it's floatPhiInForBody + Phi.setIncomingValue(0, floatPhiNodes[phiIndex]); + } + phiIndex++; + } else { + // If the number of float type PHI nodes in ClonedForBody exceeds the + // size of floatPhiNodes, output a warning + assert(false && "Warning: Number of float type PHI nodes in " + "ClonedForBody exceeds expectations\n"); + break; + } + } + } + + // Ensure we processed all expected PHI nodes + if (phiIndex < floatPhiNodes.size()) { + assert(false && "Warning: Number of float type PHI nodes in ClonedForBody " + "is less than expected\n"); + } + + // Find the last icmp eq instruction in ClonedForBody + ICmpInst *lastIcmpEq = + getLastICmpInstWithPredicate(ClonedForBody, ICmpInst::ICMP_EQ); + + // Ensure we found the icmp eq instruction + assert(lastIcmpEq && "Unable to find icmp eq instruction in ClonedForBody"); + + // Set operand 1 to addInst + lastIcmpEq->setOperand(1, AddInst); + // Change the predicate of the icmp eq instruction to slt (signed less than) + lastIcmpEq->setPredicate(ICmpInst::ICMP_SLT); + // Change the name to cmp + lastIcmpEq->setName("cmp"); + + ClonedForBody->getTerminator()->setSuccessor(1, ForCondPreheader); + + // Find phi i32 node in ClonedForBody + PHINode *phiI32InClonedForBody = nullptr; + for (auto &Inst : *ClonedForBody) { + if (PHINode *Phi = dyn_cast(&Inst)) { + if (Phi->getType()->isIntegerTy(32)) { + phiI32InClonedForBody = Phi; + insertUnusedInstructionsBeforeIcmp(phiI32InClonedForBody, lastIcmpEq); + } + } + } + + // Ensure we found the phi i32 node + assert(phiI32InClonedForBody && + "Unable to find phi i32 node in ClonedForBody"); +} + +static std::pair findTwoI32PhiInBB(BasicBlock *ForBody) { + // Find the first i32 type PHI instruction in ForBody + PHINode *firstI32PhiInBB = nullptr; + PHINode *secondI32PhiInBB = nullptr; + int i32PhiCount2 = 0; + for (auto &Inst : *ForBody) { + if (PHINode *Phi = dyn_cast(&Inst)) { + if (Phi->getType()->isIntegerTy(32)) { + if (i32PhiCount2 == 0) { + firstI32PhiInBB = Phi; + i32PhiCount2++; + } else if (i32PhiCount2 == 1) { + secondI32PhiInBB = Phi; + break; + } + } + } + } + + // Ensure we found two i32 type PHI instructions in ForBody + assert(firstI32PhiInBB && secondI32PhiInBB && + "Unable to find two i32 type PHI instructions in BB"); + + return std::make_pair(firstI32PhiInBB, secondI32PhiInBB); +} +static void modifyForBody2(BasicBlock *ClonedForCondPreheader, + BasicBlock *ForBody, BasicBlock *ForCondPreheader) { + // Find the first i32 type PHI instruction in ForCondPreheader + auto [firstI32PhiInForCondPreheader, secondI32PhiInForCondPreheader] = + findTwoI32PhiInBB(ForCondPreheader); + + // Find the first i32 type PHI instruction in ForBody + auto [firstI32PhiInForBody, secondI32PhiInForBody] = + findTwoI32PhiInBB(ForBody); + + // Set the incoming 0 value of the two i32 type PHI instructions found in + // ForBody to the firstI32Phi found in ForCondPreheader + firstI32PhiInForBody->setIncomingValue(0, firstI32PhiInForCondPreheader); + secondI32PhiInForBody->setIncomingValue(0, secondI32PhiInForCondPreheader); + + ForBody->getTerminator()->setSuccessor(0, ClonedForCondPreheader); + + // Find the first float type PHI instruction in ForCondPreheader + PHINode *SecondFloatPhiInForCondPreheader = nullptr; + int floatPhiCount = 0; + for (auto &Inst : *ForCondPreheader) { + if (PHINode *Phi = dyn_cast(&Inst)) { + if (Phi->getType()->isFloatTy()) { + floatPhiCount++; + if (floatPhiCount == 2) { + SecondFloatPhiInForCondPreheader = Phi; + break; + } + } + } + } + + // Ensure we found a float type PHI instruction in ForCondPreheader + assert(SecondFloatPhiInForCondPreheader && + "Unable to find float type PHI instruction in ForCondPreheader"); + + // Find the only float type PHI instruction in ForBody + PHINode *FloatPhiInForBody = getFirstFloatPhi(ForBody); + assert(FloatPhiInForBody && "Unable to find float type PHI instruction in " + "ForBody"); + + // Set incoming value 0 of the float type PHI instruction in ForBody + FloatPhiInForBody->setIncomingValue(0, SecondFloatPhiInForCondPreheader); + + // Find the unique float type PHI instruction in ClonedForCondPreheader + PHINode *FloatPhiInClonedForCondPreheader = + getFirstFloatPhi(ClonedForCondPreheader); + assert(FloatPhiInClonedForCondPreheader && + "Float type PHI instruction not found in ClonedForCondPreheader"); + + // Set incoming value 0 of the float type PHI instruction in + // ClonedForCondPreheader + FloatPhiInClonedForCondPreheader->setIncomingValue( + 0, SecondFloatPhiInForCondPreheader); +} + +// Helper function to run dead code elimination +static void runDeadCodeElimination(Function &F) { + legacy::FunctionPassManager FPM(F.getParent()); + FPM.add(createDeadCodeEliminationPass()); + FPM.run(F); + LLVM_DEBUG(F.dump()); +} + +static bool modifySecondForBody(Loop *L, Function &F, BasicBlock *ForBody, + BasicBlock *FirstClonedForCondPreheader, + BasicBlock *FirstForCondPreheader, + Value *AddInst) { + BasicBlock *ForBodyPreheader = L->getLoopPreheader(); + + // Find the 0th successor of ForBody, it should have two + BasicBlock *ForCondPreheader = getFirstSuccessorOfForBody(ForBody); + + std::tuple ClonedBBs = + cloneThreeBB(ForBodyPreheader, ForBody, ForCondPreheader, F); + BasicBlock *ClonedForBodyPreheader = std::get<0>(ClonedBBs); + BasicBlock *ClonedForBody = std::get<1>(ClonedBBs); + BasicBlock *ClonedForCondPreheader = std::get<2>(ClonedBBs); + + ClonedForCondPreheader->setName("for.end"); + ClonedForBody->moveBefore(ForBody); + ClonedForBodyPreheader->moveBefore(ClonedForBody); + ForCondPreheader->moveBefore(ClonedForBodyPreheader); + ClonedForCondPreheader->moveAfter(ForBody); + ForCondPreheader->getTerminator()->setSuccessor(0, ForBodyPreheader); + + unrollClonedForBody(ClonedForBody, ClonedForCondPreheader, 1); + modifyClonedForBody2(ClonedForBody, FirstClonedForCondPreheader, AddInst, + ForCondPreheader); + + Value *andinst = + modifyClonedForBodyPreheader(ClonedForBodyPreheader, ForBody); + modifyForCondPreheader2(ClonedForBody, ClonedForCondPreheader, + ForCondPreheader, andinst); + modifyClonedForCondPreheader(ClonedForCondPreheader, ForBody, + ForCondPreheader); + modifyForBody2(ClonedForCondPreheader, ForBody, ForCondPreheader); + + FirstForCondPreheader->getTerminator()->setSuccessor(0, + ClonedForBodyPreheader); + + // Run Dead Code Elimination optimization + runDeadCodeElimination(F); + + groupAndReorderInstructions(ClonedForBody); + + return true; +} +static void insertDoublePreheader(Function &F) { + BasicBlock *entry = &F.getEntryBlock(); + BasicBlock *ifend = &F.back(); + BasicBlock *entry_successor1 = entry->getTerminator()->getSuccessor(1); + + // Create a new basic block + BasicBlock *newBB = BasicBlock::Create( + F.getContext(), entry_successor1->getName() + ".preheader", &F, + entry_successor1); + + Value *len = getLenFromEntryBlock(F); + + // Insert instructions in the new basic block + IRBuilder<> builder(newBB); + Value *cmp151349 = builder.CreateICmpSGT( + len, ConstantInt::get(len->getType(), 0), "cmp151349"); + + // Create a conditional branch + builder.CreateCondBr(cmp151349, entry_successor1, ifend); + + // Modify the terminator of entry to jump to the new basic block + entry->getTerminator()->setSuccessor(1, newBB); +} +static bool unrollFir(Function &F, Loop *L) { + + bool Changed = false; + static BasicBlock *FirstClonedForCondPreheader = nullptr; + static BasicBlock *FirstForCondPreheader = nullptr; + static Value *AddInst = nullptr; + + for (auto *BB : L->blocks()) { + + assert(BB->getName().contains("for.body") && "BB must is for.body"); + Changed = moveIfEndToEnd(F); + // Temporarily skip processing the second loop + + if (Changed) { + insertDoublePreheader(F); + Value *sub = modifyForCondPreheader(F); + std::tuple result = + modifyFirstForBody(L, F, BB, sub); + FirstClonedForCondPreheader = std::get<0>(result); + FirstForCondPreheader = std::get<1>(result); + AddInst = std::get<2>(result); + } else { + modifySecondForBody(L, F, BB, FirstClonedForCondPreheader, + FirstForCondPreheader, AddInst); + } + } + LLVM_DEBUG(F.dump()); + + return Changed; +} + +// Preprocessing function +static PHINode *preprocessClonedForBody(BasicBlock *ClonedForBody) { + // Find the unique PHI node + PHINode *phiNode = nullptr; + for (auto &I : *ClonedForBody) { + if (auto *phi = dyn_cast(&I)) { + phiNode = phi; + break; + } + } + + // Ensure that the PHI node is found + assert(phiNode && "PHI node not found"); + + // Find two mul nsw instructions + SmallVector mulInsts; + for (auto &I : *ClonedForBody) { + if (auto *binOp = dyn_cast(&I)) { + if (binOp->getOpcode() == Instruction::Mul && binOp->hasNoSignedWrap()) { + mulInsts.push_back(binOp); + } + } + } + + // Replace mul nsw instructions with the PHI node + for (auto *mulInst : mulInsts) { + mulInst->replaceAllUsesWith(phiNode); + mulInst->eraseFromParent(); + } + return phiNode; +} + +static Instruction *modifyAddToOrInClonedForBody(BasicBlock *ClonedForBody) { + // Find the unique add nuw nsw instruction + Instruction *addInst = nullptr; + for (auto &I : *ClonedForBody) { + if (auto *binOp = dyn_cast(&I)) { + if (binOp->getOpcode() == Instruction::Add && + binOp->hasNoUnsignedWrap()) { + addInst = binOp; + break; + } + } + } + + // Ensure that the add nuw nsw instruction is found + assert(addInst && "add nuw nsw instruction not found"); + + // Create a new or disjoint instruction + Instruction *orInst = BinaryOperator::CreateDisjoint( + Instruction::Or, addInst->getOperand(0), + ConstantInt::get(addInst->getType(), 1), "add", addInst); + + // Replace all uses of the add instruction + addInst->replaceAllUsesWith(orInst); + + // Delete the original add instruction + addInst->eraseFromParent(); + orInst->setName("add"); + return orInst; +} + +static void modifyAddToOr(BasicBlock *ClonedForBody) { + SmallVector addInsts; + + // Collect all add instructions that meet the criteria + for (auto &I : *ClonedForBody) { + if (auto *binOp = dyn_cast(&I)) { + if (binOp->getOpcode() == Instruction::Add) { + addInsts.push_back(binOp); + } + } + } + if (addInsts.empty()) { + return; + } + // Replace each add instruction with an or disjoint instruction + for (auto it = addInsts.begin(); it != std::prev(addInsts.end()); ++it) { + auto *addInst = *it; + // Create a new or disjoint instruction + Instruction *orInst = + BinaryOperator::CreateDisjoint(Instruction::Or, addInst->getOperand(0), + addInst->getOperand(1), "add", addInst); + + // Replace all uses of the add instruction + addInst->replaceAllUsesWith(orInst); + + // Delete the original add instruction + addInst->eraseFromParent(); + orInst->setName("add"); + } +} + +static Value *unrolladdcClonedForBody(BasicBlock *ClonedForBody, + int unroll_factor) { + + // Call the preprocessing function + PHINode *phiNode = preprocessClonedForBody(ClonedForBody); + + // Replace add instructions with or instructions + Instruction *orInst = modifyAddToOrInClonedForBody(ClonedForBody); + + // Find the first non-PHI instruction and or instruction + Instruction *firstNonPHI = ClonedForBody->getFirstNonPHI(); + + // Ensure that the start and end instructions are found + assert(firstNonPHI && orInst && "Start or end instruction not found"); + + // Find the icmp instruction + Instruction *icmpInst = getFirstICmpInst(ClonedForBody); + + // Ensure that the icmp instruction is found + assert(icmpInst && "icmp instruction not found"); + + // Print information about the icmp instruction + + Instruction *newOrInst = orInst; + // Copy instructions 15 times + for (int i = 1; i <= (unroll_factor - 1); i++) { + ValueToValueMapTy VMap; + for (auto it = firstNonPHI->getIterator(); &*it != orInst; ++it) { + Instruction *newInst = it->clone(); + // For getelementptr instructions, set the second operand to orInst + if (GetElementPtrInst *GEP = dyn_cast(newInst)) { + newInst->setOperand(1, newOrInst); + newInst->setName("arrayidx"); + } + // If it's a fadd instruction, change its name to add + if (newInst->getOpcode() == Instruction::FAdd) { + newInst->setName("add"); + } + VMap[&*it] = newInst; + newInst->insertBefore(icmpInst); + } + + // Update operands of new instructions + for (auto it = firstNonPHI->getIterator(); &*it != orInst; ++it) { + Instruction *newInst = cast(VMap[&*it]); + for (unsigned j = 0; j < newInst->getNumOperands(); j++) { + Value *op = newInst->getOperand(j); + if (VMap.count(op)) { + newInst->setOperand(j, VMap[op]); + } + } + } + // Clone orInst and insert before icmpInst + newOrInst = orInst->clone(); + // Set the second operand of newOrInst to i+1 + newOrInst->setOperand(1, ConstantInt::get(newOrInst->getType(), i + 1)); + newOrInst->setName("add"); + newOrInst->insertBefore(icmpInst); + VMap[orInst] = newOrInst; + } + + // Replace or instruction with add nuw nsw instruction + IRBuilder<> Builder(newOrInst); + Value *newAddInst = + Builder.CreateNUWAdd(newOrInst->getOperand(0), newOrInst->getOperand(1)); + newOrInst->replaceAllUsesWith(newAddInst); + newOrInst->eraseFromParent(); + + // Create a new add instruction, subtracting 16 from len + Builder.SetInsertPoint(icmpInst); + Value *len = icmpInst->getOperand(1); + Value *sub = Builder.CreateNSWAdd( + len, ConstantInt::get(len->getType(), -unroll_factor), "sub"); + // Set the icmp instruction's predicate to sgt, and operands to newAddInst + if (ICmpInst *icmp = dyn_cast(icmpInst)) { + icmp->setPredicate(ICmpInst::ICMP_SGT); + icmp->setOperand(0, newAddInst); + icmp->setOperand(1, sub); + } + + phiNode->setIncomingValue(0, newAddInst); + return sub; +} + +static void expandForCondPreheaderaddc(Function &F, + BasicBlock *ForCondPreheader, + BasicBlock *ClonedForBody, + BasicBlock *ForBody, Value *sub, + int unroll_factor) { + // Create a new ForCondPreheader after the original ForCondPreheader + BasicBlock *NewForCondPreheader = BasicBlock::Create( + ForCondPreheader->getContext(), "for.cond.preheader.new", + ForCondPreheader->getParent(), ForCondPreheader->getNextNode()); + // Create a new empty BasicBlock after NewForCondPreheader + BasicBlock *NewForCondPreheader2 = BasicBlock::Create( + NewForCondPreheader->getContext(), "for.cond.preheader.new2", + NewForCondPreheader->getParent(), NewForCondPreheader->getNextNode()); + + // Move sub to the new ForCondPreheader + if (Instruction *SubInst = dyn_cast(sub)) { + SubInst->removeFromParent(); + SubInst->insertInto(NewForCondPreheader, NewForCondPreheader->begin()); + } + + // Create new comparison instruction in NewForCondPreheader + IRBuilder<> Builder(NewForCondPreheader); + Value *len = getLenFromEntryBlock(F); + + assert(len && "Parameter named 'len' not found"); + + Value *cmp6not207 = Builder.CreateICmpULT( + len, ConstantInt::get(len->getType(), unroll_factor), "cmp6.not207"); + + // Create conditional branch instruction + Builder.CreateCondBr(cmp6not207, NewForCondPreheader2, ClonedForBody); + + // Find if.end basic block + BasicBlock *ifEndBB = getBasicBlockByName(F, "if.end"); + BasicBlock *returnBB = getBasicBlockByName(F, "return"); + assert(ifEndBB && "Expected to find if.end!"); + assert(returnBB && "Expected to find return!"); + // Get the terminator instruction of if.end + Instruction *terminator = ifEndBB->getTerminator(); + if (!terminator) { + assert(false && "if.end basic block has no terminator instruction\n"); + return; + } + + // Replace the first operand of the terminator instruction with + // NewForCondPreheader + terminator->setOperand(2, NewForCondPreheader); + + // Find the unique PHINode in clonedForBody + PHINode *uniquePHI = nullptr; + for (Instruction &I : *ClonedForBody) { + if (auto *phi = dyn_cast(&I)) { + if (uniquePHI) { + // If we've already found a PHINode but find another, it's not unique + + uniquePHI = nullptr; + break; + } + uniquePHI = phi; + } + } + + assert(uniquePHI && "No unique PHINode found in ForBody\n"); + + uniquePHI->setIncomingBlock(1, NewForCondPreheader); + auto *clonedphi = uniquePHI->clone(); + clonedphi->insertInto(NewForCondPreheader2, NewForCondPreheader2->begin()); + + // Create comparison instruction + ICmpInst *cmp85209 = + new ICmpInst(ICmpInst::ICMP_SLT, clonedphi, len, "cmp85209"); + cmp85209->insertAfter(clonedphi); + + // Create conditional branch instruction + BranchInst *br = BranchInst::Create(ForBody, returnBB, cmp85209); + + br->insertAfter(cmp85209); + + // Get the terminator instruction of ClonedForBody + BranchInst *clonedTerminator = + dyn_cast(ClonedForBody->getTerminator()); + assert(clonedTerminator && + "ClonedForBody's terminator should be a BranchInst"); + if (!clonedTerminator) { + assert(false && "ClonedForBody has no terminator instruction\n"); + return; + } + + // Set the first operand of ClonedForBody's terminator to NewForCondPreheader2 + clonedTerminator->setOperand(2, NewForCondPreheader2); + + // Find the unique PHI node in ForBody + PHINode *uniquePHI2 = nullptr; + for (Instruction &I : *ForBody) { + if (auto *phi = dyn_cast(&I)) { + if (uniquePHI2) { + // If we've already found a PHINode but find another, it's not unique + + uniquePHI = nullptr; + break; + } + uniquePHI2 = phi; + } + } + + assert(uniquePHI2 && "No unique PHINode found in ForBody\n"); + + uniquePHI2->setIncomingValue(1, clonedphi); + uniquePHI2->setIncomingBlock(1, NewForCondPreheader2); + + // Find the unique PHI node in returnBB + PHINode *returnBBPHI = nullptr; + for (Instruction &I : *returnBB) { + if (auto *phi = dyn_cast(&I)) { + if (returnBBPHI) { + // If we've already found a PHINode but find another, it's not unique + returnBBPHI = nullptr; + break; + } + returnBBPHI = phi; + } + } + + if (returnBBPHI) { + // Add [0, NewForCondPreheader2] + returnBBPHI->addIncoming(ConstantInt::get(returnBBPHI->getType(), 0), + NewForCondPreheader2); + } else { + assert(false && "No unique PHI node found in returnBB\n"); + } +} + +static void addnoalias(Function &F) { + for (Argument &Arg : F.args()) { + if (Arg.getType()->isPointerTy()) { + Arg.addAttr(Attribute::NoAlias); + } + } +} +static BasicBlock *cloneForBody(Function &F, BasicBlock *ForBody, + const std::string &Suffix) { + ValueToValueMapTy VMap; + BasicBlock *ClonedForBody = CloneBasicBlock(ForBody, VMap, Suffix, &F); + VMap[ForBody] = ClonedForBody; + remapInstructionsInBlocks({ClonedForBody}, VMap); + return ClonedForBody; +} + +static void unrollAddc(Function &F, ScalarEvolution &SE, Loop *L, + int unroll_factor) { + + // Get the basic block containing the function body from L + BasicBlock *ForBody = L->getHeader(); + + // Ensure that the basic block containing the function body is found + if (!ForBody) { + assert(ForBody && "ForBody not found"); + return; + } + + // clone for body + + BasicBlock *ClonedForBody = cloneForBody(F, ForBody, ".modify"); + ClonedForBody->moveBefore(ForBody); + + Value *sub = unrolladdcClonedForBody(ClonedForBody, unroll_factor); + + // Find the ForCondPreheader basic block from F + BasicBlock *ForCondPreheader = getBasicBlockByName(F, "for.cond.preheader"); + assert(ForCondPreheader && "Expected to find for.cond.preheader!"); + expandForCondPreheaderaddc(F, ForCondPreheader, ClonedForBody, ForBody, sub, + unroll_factor); + modifyAddToOr(ClonedForBody); + groupAndReorderInstructions(ClonedForBody); + + // Verify the function + if (verifyFunction(F, &errs())) { + LLVM_DEBUG(errs() << "Function verification failed\n"); + return; + } +} + +static void unrollCorr(Function &F, Loop *L, int unroll_factor) { + + // Get the basic block containing the function body from L + BasicBlock *ForBody = L->getHeader(); + assert(ForBody && "ForBody not found"); + + // clone for body + BasicBlock *ClonedForBody = cloneForBody(F, ForBody, ".unroll"); + + BasicBlock *returnBB = getBasicBlockByName(F, "return"); + assert(returnBB && "Expected to find return!"); + BasicBlock *ForCondPreheader = getBasicBlockByName(F, "for.cond.preheader"); + assert(ForCondPreheader && "Expected to find for.cond.preheader!"); + BasicBlock *ForCond11PreheaderUs = L->getLoopPreheader(); + assert(ForCond11PreheaderUs && "Expected to find for.cond.preheader!"); + + ClonedForBody->moveBefore(returnBB); + + ForCondPreheader->setName("if.end"); + + // Find the first instruction in ForCondPreheader + Instruction *FirstInst = &*ForCondPreheader->begin(); + Instruction *SecondInst = FirstInst->getNextNode(); + // Ensure the first instruction is a sub nsw instruction + if (BinaryOperator *SubInst = dyn_cast(FirstInst)) { + if (SubInst->getOpcode() == Instruction::Sub && + SubInst->hasNoSignedWrap()) { + ; + } else { + assert(false && "The first instruction in ForCondPreheader is not a sub " + "nsw instruction\n"); + } + } else { + assert(false && "The first instruction in ForCondPreheader is not a binary " + "operation\n"); + } + // Insert new instruction after FirstInst + IRBuilder<> Builder(FirstInst->getNextNode()); + Value *Sub6 = Builder.CreateNSWAdd( + FirstInst, ConstantInt::get(FirstInst->getType(), 1 - unroll_factor), + "sub6"); + + if (ICmpInst *CmpInst = dyn_cast(SecondInst)) { + if (CmpInst->getPredicate() == ICmpInst::ICMP_EQ) { + CmpInst->setOperand(0, FirstInst); + CmpInst->setOperand( + 1, ConstantInt::get(FirstInst->getType(), unroll_factor - 1)); + CmpInst->setPredicate(ICmpInst::ICMP_SGT); + } + } + // Create new basic blocks + BasicBlock *ForCond11PreheaderPreheader = ForCondPreheader->getNextNode(); + BasicBlock *ForCond8PreheaderLrPh = + BasicBlock::Create(F.getContext(), "for.cond8.preheader.lr.ph", &F, + ForCond11PreheaderPreheader); + BasicBlock *ForCond8Preheader = BasicBlock::Create( + F.getContext(), "for.cond8.preheader", &F, ForCond11PreheaderPreheader); + BasicBlock *ForBody10LrPh = BasicBlock::Create( + F.getContext(), "for.body10.lr.ph", &F, ForCond11PreheaderPreheader); + BasicBlock *ForCond91Preheader = BasicBlock::Create( + F.getContext(), "for.cond91.preheader", &F, ForCond11PreheaderPreheader); + BasicBlock *ForCond95PreheaderLrPh = + BasicBlock::Create(F.getContext(), "for.cond95.preheader.lr.ph", &F, + ForCond11PreheaderPreheader); + + // Set predecessors for the basic blocks + ForCondPreheader->getTerminator()->setSuccessor(0, ForCond8PreheaderLrPh); + ForCondPreheader->getTerminator()->setSuccessor(1, ForCond91Preheader); + + // Find the parameter named patlen from the function arguments + Value *PatlenArg = F.getArg(3); + Value *SignalArg = F.getArg(0); + assert(PatlenArg && "Parameter named patlen not found\n"); + assert(SignalArg && "Parameter named signal not found\n"); + + // Add instructions to the for.cond8.preheader.lr.ph basic block + Builder.SetInsertPoint(ForCond8PreheaderLrPh); + Value *Cmp9242 = Builder.CreateICmpSGT( + PatlenArg, ConstantInt::get(PatlenArg->getType(), 0), "cmp9242"); + Builder.CreateBr(ForCond8Preheader); + + // Add instructions to the for.cond8.preheader basic block + Builder.SetInsertPoint(ForCond8Preheader); + PHINode *N0276 = + Builder.CreatePHI(Type::getInt32Ty(F.getContext()), 2, "n.0276"); + N0276->addIncoming(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0), + ForCond8PreheaderLrPh); + + // Create conditional branch instruction + Builder.CreateCondBr(Cmp9242, ForBody10LrPh, nullptr); + + // Add instructions to the for.body10.lr.ph basic block + Builder.SetInsertPoint(ForBody10LrPh); + + // Create getelementptr instruction + Value *GEP = + Builder.CreateGEP(Type::getFloatTy(F.getContext()), SignalArg, N0276, ""); + + // Create unconditional branch instruction to ClonedForBody + Builder.CreateBr(ClonedForBody); + + // Add instructions to the for.cond91.preheader basic block + Builder.SetInsertPoint(ForCond91Preheader); + + // Create PHI node + PHINode *N0Lcssa = + Builder.CreatePHI(Type::getInt32Ty(F.getContext()), 2, "n.0.lcssa"); + N0Lcssa->addIncoming(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0), + ForCondPreheader); + // Note: [ %add89, %for.cond.cleanup ] part not added yet + + // Create comparison instruction + Value *Cmp92Not282 = + Builder.CreateICmpSGT(N0Lcssa, FirstInst, "cmp92.not282"); + + // Create conditional branch instruction + Builder.CreateCondBr(Cmp92Not282, returnBB, ForCond95PreheaderLrPh); + + // Add instructions to the for.cond95.preheader.lr.ph basic block + Builder.SetInsertPoint(ForCond95PreheaderLrPh); + + Value *Cmp92678 = Builder.CreateICmpSGT( + PatlenArg, ConstantInt::get(Type::getInt32Ty(F.getContext()), 0), + "Cmp92678"); + // Insert Cmp92678 + Builder.CreateCondBr(Cmp92678, ForCond11PreheaderUs, + ForCond11PreheaderPreheader); + + Builder.SetInsertPoint(ForCond11PreheaderPreheader, + ForCond11PreheaderPreheader->begin()); + + Instruction *ForCond11PreheaderPreheaderterminater = + ForCond11PreheaderPreheader->getTerminator(); + Instruction *ForCond11PreheaderPreheaderFirstInst = + &*ForCond11PreheaderPreheader->begin(); + Value *SiglenArg = ForCond11PreheaderPreheaderFirstInst->getOperand(0); + // Calculate the result of n.0.lcssa left shifted by 2 bits + Value *ShiftedN = Builder.CreateShl( + N0Lcssa, ConstantInt::get(Type::getInt32Ty(F.getContext()), 2), ""); + + // Create getelementptr instruction + // Find memset function call + CallInst *MemsetCall = getFirstCallInstWithName(ForCond11PreheaderPreheader, + "llvm.memset.p0.i32"); + + // Ensure memset call is found + assert(MemsetCall && "memset call not found"); + + // Get DestArg + Value *DestArg = MemsetCall->getArgOperand(0); + + // Create new GEP instruction + Value *Scevgep = Builder.CreateGEP(Type::getInt8Ty(F.getContext()), DestArg, + ShiftedN, "scevgep"); + MemsetCall->setOperand(0, Scevgep); + // Calculate siglen + 1 + Value *SiglenPlus1 = Builder.CreateAdd( + SiglenArg, ConstantInt::get(Type::getInt32Ty(F.getContext()), 1), ""); + + // Calculate n.0.lcssa + patlen + Value *NplusPatlen = Builder.CreateAdd(N0Lcssa, PatlenArg, ""); + + // Calculate (siglen + 1) - (n.0.lcssa + patlen) + Value *SubResult = Builder.CreateSub(SiglenPlus1, NplusPatlen, ""); + + // Calculate the final memset length + Value *MemsetLen = Builder.CreateShl( + SubResult, ConstantInt::get(Type::getInt32Ty(F.getContext()), 2), ""); + Instruction *addinst = dyn_cast(MemsetCall->getOperand(2)); + MemsetCall->setOperand(2, MemsetLen); + if (addinst && addinst->use_empty()) + addinst->eraseFromParent(); + if (ForCond11PreheaderPreheaderFirstInst->use_empty()) + ForCond11PreheaderPreheaderFirstInst->eraseFromParent(); + + // Create a Preheader for ForCond11PreheaderUs + BasicBlock *ForCond11PreheaderUsPreheader = + BasicBlock::Create(F.getContext(), "for.cond11.preheader.us.preheader", + &F, ForCond11PreheaderUs); + + // Add an unconditional branch to ForCond11PreheaderUs in the new Preheader + BranchInst::Create(ForCond11PreheaderUs, ForCond11PreheaderUsPreheader); + + // Insert new instructions in ForCond11PreheaderUsPreheader + Builder.SetInsertPoint(ForCond11PreheaderUsPreheader->getTerminator()); + + // Add %6 = add i32 %siglen, 1 + Value *SiglenPlus2 = Builder.CreateAdd( + SiglenArg, ConstantInt::get(Type::getInt32Ty(F.getContext()), 1), ""); + + // Add %7 = sub i32 %6, %patlen + Value *SubResult2 = Builder.CreateSub(SiglenPlus2, PatlenArg, ""); + + // Find PHI node + PHINode *PhiNode = nullptr; + for (PHINode &Phi : ForCond11PreheaderUs->phis()) { + PhiNode = Φ + break; + } + + assert(PhiNode && "PHI node not found in for.cond11.preheader.us\n"); + + // Modify incoming values of the PHI node + PhiNode->setIncomingBlock(1, ForCond11PreheaderUsPreheader); + PhiNode->setIncomingValue(1, N0Lcssa); + + BasicBlock *ForCond11ForCondCleanup13CritEdgeUs = ForBody->getNextNode(); + // Find icmp ult instruction in ForCond11ForCondCleanup13CritEdgeUs + ICmpInst *IcmpUltInst = getLastICmpInstWithPredicate( + ForCond11ForCondCleanup13CritEdgeUs, ICmpInst::ICMP_ULT); + + assert(IcmpUltInst && "icmp ult instruction not found in " + "ForCond11ForCondCleanup13CritEdgeUs\n"); + + IcmpUltInst->setOperand(0, PhiNode->getIncomingValue(0)); + IcmpUltInst->setOperand(1, SubResult2); + IcmpUltInst->setPredicate(ICmpInst::ICMP_EQ); + + swapTerminatorSuccessors(ForCond11ForCondCleanup13CritEdgeUs); + + // Find PHI nodes in ClonedForBody + for (PHINode &Phi : ClonedForBody->phis()) { + Phi.setIncomingBlock(0, ForBody10LrPh); + } + + // Find phi float instruction in ClonedForBody + PHINode *FloatPhi = getFirstFloatPhi(ClonedForBody); + assert(FloatPhi && "phi float node not found"); + // Find getelementptr inbounds instructions in ClonedForBody + GetElementPtrInst *GEPInst = nullptr; + GetElementPtrInst *GEPInst2 = nullptr; + for (auto &I : *ClonedForBody) { + if (auto *GEP = dyn_cast(&I)) { + if (GEP->isInBounds()) { + GEPInst = GEP; + } else { + GEPInst2 = GEP; + } + } + } + assert(GEPInst && + "getelementptr inbounds instruction not found in ClonedForBody\n"); + assert(GEPInst2 && + "getelementptr inbounds instruction not found in ClonedForBody\n"); + + GEPInst2->setOperand(0, GEP); + + Instruction *loadinst = GEPInst->getNextNode(); + GEPInst->moveBefore(FloatPhi); + loadinst->moveBefore(FloatPhi); + + if (FloatPhi) { + // Find the llvm.fmuladd.f32 instruction + Instruction *FMulAdd = + getFirstCallInstWithName(ClonedForBody, "llvm.fmuladd.f32"); + assert(FMulAdd && "llvm.fmuladd.f32 instruction not found\n"); + Instruction *InsertPoint = FMulAdd->getNextNode(); + if (FMulAdd) { + // Copy instructions unroll_factor-1 times + for (int i = 0; i < (unroll_factor - 1); ++i) { + ValueToValueMapTy VMap; + for (auto It = FloatPhi->getIterator(); &*It != FMulAdd->getNextNode(); + ++It) { + Instruction *NewInst = It->clone(); + VMap[&*It] = NewInst; + NewInst->insertBefore(InsertPoint); + } + + // Update operands of new instructions + for (auto It = FloatPhi->getIterator(); &*It != FMulAdd->getNextNode(); + ++It) { + Instruction *NewInst = cast(VMap[&*It]); + for (unsigned j = 0; j < NewInst->getNumOperands(); j++) { + Value *Op = NewInst->getOperand(j); + if (VMap.count(Op)) { + NewInst->setOperand(j, VMap[Op]); + } + } + // If NewInst is a getelementptr instruction, set its operand 1 to i+1 + if (GetElementPtrInst *GEP = dyn_cast(NewInst)) { + GEP->setOperand(0, GEPInst); + GEP->setOperand( + 1, ConstantInt::get(GEP->getOperand(1)->getType(), i + 1)); + GEP->setName("arrayidx" + std::to_string(i + 1)); + } + } + } + + } else { + assert(false && "llvm.fmuladd.f32 instruction not found\n"); + } + } else { + assert(false && "phi float instruction not found\n"); + } + movePHINodesToTop(*ClonedForBody); + groupAndReorderInstructions(ClonedForBody); + + // Create new basic block for.cond.cleanup + BasicBlock *ForCondCleanup = + BasicBlock::Create(F.getContext(), "for.cond.cleanup", &F, ClonedForBody); + + ForCond8Preheader->getTerminator()->setSuccessor(1, ForCondCleanup); + // Create unconditional branch to ClonedForBody in for.cond.cleanup + BranchInst::Create(ClonedForBody, ForCondCleanup); + + // Get the terminator instruction of ClonedForBody + Instruction *Terminator = ClonedForBody->getTerminator(); + + // Set the first successor of ClonedForBody to for.cond.cleanup + if (Terminator->getNumSuccessors() > 0) { + Terminator->setSuccessor(0, ForCondCleanup); + } + + // Clone phi float nodes from ClonedForBody to ForCondCleanup + int i = 0; + for (PHINode &Phi : ClonedForBody->phis()) { + if (Phi.getType()->isFloatTy()) { + Instruction *newPhi = Phi.clone(); + cast(newPhi)->setIncomingBlock(0, ForCond8Preheader); + newPhi->insertBefore(ForCondCleanup->getTerminator()); + if (i == 0) { + GetElementPtrInst *arrayidx = GetElementPtrInst::Create( + Type::getFloatTy(F.getContext()), DestArg, N0276, "arrayidx", + ForCondCleanup->getTerminator()); + StoreInst *storeInst = + new StoreInst(newPhi, arrayidx, ForCondCleanup->getTerminator()); + } else { + Instruction *orInst = BinaryOperator::CreateDisjoint( + Instruction::Or, N0276, ConstantInt::get(N0276->getType(), i), + "add"); + orInst->insertBefore(ForCondCleanup->getTerminator()); + GetElementPtrInst *arrayidx = GetElementPtrInst::Create( + Type::getFloatTy(F.getContext()), DestArg, orInst, "arrayidx", + ForCondCleanup->getTerminator()); + + StoreInst *storeInst = + new StoreInst(newPhi, arrayidx, ForCondCleanup->getTerminator()); + } + i++; + } + } + + // Insert new instructions at the end of ClonedForBody + Builder.SetInsertPoint(ForCondCleanup->getTerminator()); + Value *add89 = Builder.CreateAdd( + N0276, ConstantInt::get(N0276->getType(), unroll_factor), "add89", true, + true); + Value *cmp7 = Builder.CreateICmpSLT(add89, Sub6, "cmp7"); + + // Get the original terminator instruction + Instruction *OldTerminator = ForCondCleanup->getTerminator(); + + // Create new conditional branch instruction + BranchInst *NewBr = + BranchInst::Create(ForCond8Preheader, ForCond91Preheader, cmp7); + + // Insert new branch instruction and delete the old terminator + ReplaceInstWithInst(OldTerminator, NewBr); + + movePHINodesToTop(*ForCondCleanup); + groupAndReorderInstructions(ForCondCleanup); + + // Update PHI nodes in for.cond8.preheader + for (PHINode &Phi : ForCond8Preheader->phis()) { + Phi.addIncoming(add89, ForCondCleanup); + } + + // Update PHI nodes in for.cond91.preheader + for (PHINode &Phi : ForCond91Preheader->phis()) { + Phi.addIncoming(add89, ForCondCleanup); + } + + // Iterate through all PHI nodes in returnBB + for (PHINode &Phi : returnBB->phis()) { + // Add new incoming value for each PHI node + Phi.addIncoming(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0), + ForCond91Preheader); + } + // for.cond95.preheader.lr.ph -> for.cond11.preheader.us.preheader + ForCond95PreheaderLrPh->getTerminator()->setSuccessor( + 0, ForCond11PreheaderUsPreheader); +} + +static bool checkIfDotProdSimplest(Function &F) { + bool flag = false; + + if (F.size() == 3) { + BasicBlock *entryBB = getBasicBlockByName(F, "entry"); + BasicBlock *forCondCleanup = getBasicBlockByName(F, "for.cond.cleanup"); + BasicBlock *forBody = getBasicBlockByName(F, "for.body"); + if (entryBB && forCondCleanup && forBody) { + CallInst *fmuladd = getFirstCallInstWithName(forBody, "llvm.fmuladd.f32"); + if (fmuladd) { + if (forBody->getTerminator()->getSuccessor(0) == forCondCleanup && + forBody->getTerminator()->getSuccessor(1) == forBody) { + if (entryBB->getTerminator()->getSuccessor(0) == forBody) { + flag = true; + } + } + } + } + } + return flag; +} +// for dotprod, llvm.fmuladd.f32 is in for.body +static bool checkIfDotProdComplicated(Function &F) { + bool flag1 = false; + bool flag2 = false; + bool flag3 = false; + if (F.size() == 3) { + BasicBlock *entryBB = getBasicBlockByName(F, "entry"); + BasicBlock *forCondCleanup = getBasicBlockByName(F, "for.cond.cleanup"); + BasicBlock *forBody = getBasicBlockByName(F, "for.body"); + if (entryBB && forCondCleanup && forBody) { + CallInst *fmuladd = getFirstCallInstWithName(forBody, "llvm.fmuladd.f32"); + if (fmuladd) { + + if (forBody->getTerminator()->getSuccessor(0) == forCondCleanup && + forBody->getTerminator()->getSuccessor(1) == forBody) { + if (entryBB->getTerminator()->getSuccessor(0) == forBody) { + flag1 = true; + } + } + } + } + if (forBody) { + for (Instruction &I : *forBody) { + if (auto *BinOp = dyn_cast(&I)) { + if (BinOp->getOpcode() == Instruction::FAdd || + BinOp->getOpcode() == Instruction::FMul || + BinOp->getOpcode() == Instruction::FSub || + BinOp->getOpcode() == Instruction::FDiv) { + flag2 = true; + } + } + } + + // Check if forBody has exactly one float PHI node + int floatPhiCount = 0; + for (PHINode &Phi : forBody->phis()) { + if (Phi.getType()->isFloatTy()) { + floatPhiCount++; + } + } + if (floatPhiCount == 1) { + flag3 = true; + } + } + } + + return flag1 && flag2 && flag3; +} +static bool shouldUnrollLoopWithCount(Function &F, Loop *L, + ScalarEvolution &SE) { + if (!checkIfDotProdSimplest(F)) { + return false; + } + // Check if the loop is suitable for unrolling + if (!L->getLoopLatch()) + return false; + if (!L->getExitingBlock()) + return false; + + // Check if the loop count is fixed and appropriate, loop count is constant + const SCEV *TripCount = SE.getBackedgeTakenCount(L); + if (isa(TripCount)) { + // More condition checks can be added here + return true; + } + return false; +} + +static void +insertPhiNodesForFMulAdd(BasicBlock *LoopHeader, BasicBlock *LoopPreheader, + SmallVector &FMulAddCalls) { + // Collect all tail call float @llvm.fmuladd.f32 in LoopHeader + for (Instruction &I : *LoopHeader) { + if (CallInst *CI = dyn_cast(&I)) { + if (Function *F = CI->getCalledFunction()) { + if (F->getName() == "llvm.fmuladd.f32" && CI->isTailCall()) { + FMulAddCalls.push_back(CI); + } + } + } + } + + // Insert phi nodes for each FMulAdd call + for (CallInst *CI : FMulAddCalls) { + // Create new phi node + PHINode *PHI = + PHINode::Create(CI->getType(), 2, CI->getName() + ".phi", CI); + + // Set incoming values for phi node + PHI->addIncoming(ConstantFP::get(CI->getType(), 0), LoopPreheader); + PHI->addIncoming(CI, LoopHeader); + + CI->setOperand(2, PHI); + } +} + +static void postUnrollLoopWithCount(Function &F, Loop *L, int unroll_count) { + BasicBlock *LoopHeader = L->getHeader(); + BasicBlock *LoopPreheader = L->getLoopPreheader(); + // Collect all tail call float @llvm.fmuladd.f32 in LoopHeader + SmallVector FMulAddCalls; + insertPhiNodesForFMulAdd(LoopHeader, LoopPreheader, FMulAddCalls); + + movePHINodesToTop(*LoopHeader); + modifyAddToOr(LoopHeader); + groupAndReorderInstructions(LoopHeader); + + // Create for.end basic block after LoopHeader + ICmpInst *LastICmp = getLastICmpInst(LoopHeader); + LastICmp->setPredicate(ICmpInst::ICMP_ULT); + // Get the first operand of LastICmp + Value *Operand1 = LastICmp->getOperand(1); + + // Directly set the first operand of LastICmp to a new constant value + LastICmp->setOperand( + 1, ConstantInt::get(Operand1->getType(), + dyn_cast(Operand1)->getSExtValue() - + (2 * unroll_count - 1))); + LastICmp->setName("cmp"); + + swapTerminatorSuccessors(LoopHeader); + + // After swapping, succ 0 is LoopHeader, succ 1 is returnBB + BasicBlock *ExitingBlock = L->getExitBlock(); + ExitingBlock->setName("for.end"); + + // Get ret instruction in ExitingBlock + ReturnInst *RetInst = dyn_cast(ExitingBlock->getTerminator()); + if (!RetInst) { + assert(false && "ret instruction not found\n"); + return; + } + + // Get the original return value + Value *OriginalRetValue = RetInst->getOperand(0); + + // Create IRBuilder, set insertion point before ret instruction + IRBuilder<> Builder(RetInst); + + // Create a series of fadd instructions + Value *CurrentSum = OriginalRetValue; + Value *add37 = Builder.CreateFAdd(FMulAddCalls[1], CurrentSum, "add37"); + Value *add38 = Builder.CreateFAdd(FMulAddCalls[2], FMulAddCalls[3], "add38"); + Value *add39 = Builder.CreateFAdd(FMulAddCalls[4], FMulAddCalls[5], "add39"); + Value *add40 = Builder.CreateFAdd(FMulAddCalls[6], FMulAddCalls[7], "add40"); + Value *add41 = Builder.CreateFAdd(add37, add38, "add41"); + Value *add42 = Builder.CreateFAdd(add39, add40, "add42"); + CurrentSum = Builder.CreateFAdd(add41, add42, "add43"); + + // Replace the original ret instruction + RetInst->setOperand(0, CurrentSum); + + // Verify function + if (verifyFunction(F, &errs())) { + LLVM_DEBUG(errs() << "Function verification failed\n"); + return; + } +} + +static bool shouldUnrollComplexLoop(Function &F, Loop *L, ScalarEvolution &SE, + DominatorTree &DT, LoopInfo &LI) { + if (!checkIfDotProdComplicated(F)) { + return false; + } + // Check if the loop is suitable for unrolling + if (!L->getLoopLatch()) + return false; + if (!L->getExitingBlock()) + return false; + + if (L->getCanonicalInductionVariable()) + return false; + // Check if the loop count is fixed and appropriate, loop count is constant + BasicBlock *LoopPreheader = L->getLoopPreheader(); + // Get the start value of the loop + if (LoopPreheader) { + return false; + } + + BasicBlock *LoopHeader = L->getHeader(); + BasicBlock *NewPreheader = + BasicBlock::Create(LoopHeader->getContext(), "for.cond.preheader", + LoopHeader->getParent(), LoopHeader); + // Redirect all external predecessors to the new preheader basic block + for (BasicBlock *pred : predecessors(LoopHeader)) { + if (!L->contains(pred)) { + pred->getTerminator()->replaceUsesOfWith(LoopHeader, NewPreheader); + // Update PHI nodes in the loop header to point to the new preheader basic + // block + for (PHINode &PN : LoopHeader->phis()) { + int Index = PN.getBasicBlockIndex(pred); + if (Index != -1) { + PN.setIncomingBlock(Index, NewPreheader); + } + } + } + } + // Jump from the new preheader to the loop header + BranchInst::Create(LoopHeader, NewPreheader); + return true; +} + +static bool shouldUnrollAddcType(Function &F, LoopInfo *LI) { + // Check the number of basic blocks + if (F.size() != 6) + return false; + + // Check the loop nesting level + unsigned int maxLoopDepth = 0; + for (auto &BB : F) { + maxLoopDepth = std::max(maxLoopDepth, LI->getLoopDepth(&BB)); + } + if (maxLoopDepth != 1) { + return false; + } + + BasicBlock *Entry = getBasicBlockByName(F, "entry"); + BasicBlock *IfEnd = getBasicBlockByName(F, "if.end"); + BasicBlock *ForCondPreheader = getBasicBlockByName(F, "for.cond.preheader"); + BasicBlock *ForBody = getBasicBlockByName(F, "for.body"); + BasicBlock *ForBodyClone = getBasicBlockByName(F, "for.body.clone"); + BasicBlock *Return = getBasicBlockByName(F, "return"); + + if (!Entry || !IfEnd || !ForCondPreheader || !ForBody || !ForBodyClone || + !Return) + return false; + + if (Entry->getTerminator()->getSuccessor(0) != Return || + Entry->getTerminator()->getSuccessor(1) != IfEnd || + IfEnd->getTerminator()->getSuccessor(0) != ForBody || + IfEnd->getTerminator()->getSuccessor(1) != ForCondPreheader || + ForCondPreheader->getTerminator()->getSuccessor(0) != ForBodyClone || + ForCondPreheader->getTerminator()->getSuccessor(1) != Return || + ForBody->getTerminator()->getSuccessor(0) != Return || + ForBody->getTerminator()->getSuccessor(1) != ForBody || + ForBodyClone->getTerminator()->getSuccessor(0) != Return || + ForBodyClone->getTerminator()->getSuccessor(1) != ForBodyClone) + return false; + + // Check if there are three outer loops, each with one inner loop + int outerLoopCount = 0; + int innerLoopCount = 0; + for (Loop *L : LI->getLoopsInPreorder()) { + if (L->getLoopDepth() == 1) { + outerLoopCount++; + if (L->getSubLoops().size() == 1) { + innerLoopCount++; + } + } + } + + if (outerLoopCount != 2 || innerLoopCount != 0) { + return false; + } + + return true; +} + +static bool shouldUnrollDotprodType(Function &F, LoopInfo *LI) { + // Check the number of basic blocks + if (F.size() != 5) + return false; + + // Check the loop nesting level + unsigned int maxLoopDepth = 0; + for (auto &BB : F) { + maxLoopDepth = std::max(maxLoopDepth, LI->getLoopDepth(&BB)); + } + if (maxLoopDepth != 1) { + return false; + } + + BasicBlock *Entry = getBasicBlockByName(F, "entry"); + BasicBlock *ForCondPreheader = getBasicBlockByName(F, "for.cond.preheader"); + BasicBlock *IfEnd = getBasicBlockByName(F, "if.end"); + BasicBlock *ForBody = getBasicBlockByName(F, "for.body"); + BasicBlock *ForBodyClone = getBasicBlockByName(F, "for.body.clone"); + + if (!Entry || !IfEnd || !ForCondPreheader || !ForBody || !ForBodyClone) + return false; + + if (Entry->getTerminator()->getSuccessor(0) != ForBody || + Entry->getTerminator()->getSuccessor(1) != ForCondPreheader || + ForCondPreheader->getTerminator()->getSuccessor(0) != ForBodyClone || + ForCondPreheader->getTerminator()->getSuccessor(1) != IfEnd || + ForBody->getTerminator()->getSuccessor(0) != IfEnd || + ForBody->getTerminator()->getSuccessor(1) != ForBody || + ForBodyClone->getTerminator()->getSuccessor(0) != IfEnd || + ForBodyClone->getTerminator()->getSuccessor(1) != ForBodyClone) + return false; + + // Check if there are three outer loops, each with one inner loop + int outerLoopCount = 0; + int innerLoopCount = 0; + for (Loop *L : LI->getLoopsInPreorder()) { + if (L->getLoopDepth() == 1) { + outerLoopCount++; + if (L->getSubLoops().size() == 1) { + innerLoopCount++; + } + } + } + + if (outerLoopCount != 2 || innerLoopCount != 0) { + return false; + } + + return true; +} + +static std::pair modifyEntryBB(BasicBlock &entryBB) { + ICmpInst *icmp = getLastICmpInst(&entryBB); + assert(icmp && "icmp not found"); + Value *start_index = icmp->getOperand(0); + Value *end_index = icmp->getOperand(1); + // Insert new instructions before icmp + IRBuilder<> Builder(icmp); + Value *sub = Builder.CreateNSWAdd( + end_index, ConstantInt::get(end_index->getType(), -8), "sub"); + icmp->setOperand(0, sub); + icmp->setOperand(1, start_index); + return std::make_pair(sub, end_index); +} + +static void postUnrollLoopWithVariable(Function &F, Loop *L, int unroll_count) { + BasicBlock *LoopPreheader = L->getLoopPreheader(); + // Get the basic blocks to merge + SmallVector BBsToMerge; + BasicBlock *ForBody1 = getBasicBlockByName(F, "for.body.1"); + BasicBlock *ForBody2 = getBasicBlockByName(F, "for.body.2"); + BasicBlock *ForBody3 = getBasicBlockByName(F, "for.body.3"); + BasicBlock *ForBody4 = getBasicBlockByName(F, "for.body.4"); + BasicBlock *ForBody5 = getBasicBlockByName(F, "for.body.5"); + BasicBlock *ForBody6 = getBasicBlockByName(F, "for.body.6"); + BasicBlock *ForBody7 = getBasicBlockByName(F, "for.body.7"); + assert(ForBody1 && ForBody2 && ForBody3 && ForBody4 && ForBody5 && ForBody6 && + ForBody7 && "basic block not found"); + BBsToMerge.push_back(ForBody1); + BBsToMerge.push_back(ForBody2); + BBsToMerge.push_back(ForBody3); + BBsToMerge.push_back(ForBody4); + BBsToMerge.push_back(ForBody5); + BBsToMerge.push_back(ForBody6); + BBsToMerge.push_back(ForBody7); + + BasicBlock *LoopHeader = L->getHeader(); + BasicBlock *LoopHeaderClone = + cloneBasicBlockWithRelations(LoopHeader, ".clone", &F); + LoopHeaderClone->moveAfter(LoopHeader); + // Create a new basic block as for.end + BasicBlock *ForEnd = getBasicBlockByName(F, "for.cond.cleanup"); + assert(ForEnd && "basic block not found"); + ForEnd->setName("for.end"); + + LoopHeaderClone->getTerminator()->setSuccessor(1, LoopHeaderClone); + for (PHINode &Phi : LoopHeaderClone->phis()) { + Phi.setIncomingBlock(1, LoopHeaderClone); + } + + for (BasicBlock *BB : BBsToMerge) { + MergeBasicBlockIntoOnlyPred(BB); + } + + // Adjust positions + LoopHeaderClone->moveAfter(getBasicBlockByName(F, "for.body.7")); + assert(LoopHeaderClone && "basic block not found"); + ForEnd->moveAfter(LoopHeaderClone); + + BasicBlock &entryBB = F.getEntryBlock(); + auto [Sub, end_index] = modifyEntryBB(entryBB); + entryBB.getTerminator()->setSuccessor(1, ForBody7); + + SmallVector FAMSDInsts; + for (Instruction &I : *ForBody7) { + if (auto *BinOp = dyn_cast(&I)) { + if (BinOp->getOpcode() == Instruction::FAdd || + BinOp->getOpcode() == Instruction::FMul || + BinOp->getOpcode() == Instruction::FSub || + BinOp->getOpcode() == Instruction::FDiv) { + FAMSDInsts.push_back(BinOp); + } + } + } + assert(!FAMSDInsts.empty() && "fadd/fmul/fsub/fdiv instruction not found"); + PHINode *firstFloatPhi = getFirstFloatPhi(ForBody7); + assert(firstFloatPhi && "phi node not found"); + // Clone phi node 7 times + for (int i = 0; i < 7; i++) { + PHINode *clonedPhi = cast(firstFloatPhi->clone()); + clonedPhi->setName("result" + Twine(i)); + clonedPhi->insertAfter(firstFloatPhi); + auto *temp = FAMSDInsts[i]; + clonedPhi->setIncomingValue(1, temp); + temp->setOperand(0, clonedPhi); + } + + for (PHINode &Phi : ForBody7->phis()) { + Phi.setIncomingBlock(0, &entryBB); + auto *temp = Phi.clone(); + temp->setName("result0.0.lcssa"); + temp->insertBefore(LoopPreheader->getTerminator()); + } + + ICmpInst *lastICmp = getLastICmpInst(ForBody7); + assert(lastICmp && "icmp not found"); + lastICmp->setOperand(1, Sub); + lastICmp->setPredicate(ICmpInst::ICMP_SLT); + + ForBody7->getTerminator()->setSuccessor(0, LoopPreheader); + ForBody7->getTerminator()->setSuccessor(1, ForBody7); + + PHINode *firstI32Phi = getFirstI32Phi(LoopPreheader); + assert(firstI32Phi && "phi node not found"); + // Insert icmp slt instruction in LoopPreheader + IRBuilder<> Builder(LoopPreheader->getTerminator()); + ICmpInst *NewICmp = + cast(Builder.CreateICmpSLT(firstI32Phi, end_index, "cmp")); + + // Convert the original unconditional branch to a conditional branch + BranchInst *OldBr = cast(LoopPreheader->getTerminator()); + BranchInst *NewBr = BranchInst::Create(LoopHeaderClone, ForEnd, NewICmp); + ReplaceInstWithInst(OldBr, NewBr); + + Instruction *faddInst = nullptr; + Instruction *addNswInst = nullptr; + + for (auto &I : *LoopHeaderClone) { + if (auto *BinOp = dyn_cast(&I)) { + if ((BinOp->getOpcode() == Instruction::FAdd || + BinOp->getOpcode() == Instruction::FMul || + BinOp->getOpcode() == Instruction::FSub || + BinOp->getOpcode() == Instruction::FDiv) && + BinOp->getType()->isFloatTy()) { + faddInst = BinOp; + } else if (BinOp->getOpcode() == Instruction::Add && + BinOp->hasNoSignedWrap()) { + addNswInst = BinOp; + } + } + + if (faddInst && addNswInst) { + break; + } + } + assert(faddInst && addNswInst && + "fadd/fmul/fsub/fdiv float and add nsw instructions not found"); + PHINode *firstI32PhiLoopHeaderClone = getFirstI32Phi(LoopHeaderClone); + assert(firstI32PhiLoopHeaderClone && "phi node not found"); + firstI32PhiLoopHeaderClone->setIncomingValue(0, firstI32Phi); + firstI32PhiLoopHeaderClone->setIncomingValue(1, addNswInst); + + PHINode *firstFloatPhiLoopHeaderClone = getFirstFloatPhi(LoopHeaderClone); + assert(firstFloatPhiLoopHeaderClone && "phi node not found"); + PHINode *lastFloatPhiLoopPreheader = getLastFloatPhi(LoopPreheader); + assert(lastFloatPhiLoopPreheader && "phi node not found"); + firstFloatPhiLoopHeaderClone->setIncomingValue(0, lastFloatPhiLoopPreheader); + firstFloatPhiLoopHeaderClone->setIncomingValue(1, faddInst); + + // Collect all phi float instructions in LoopPreheader + SmallVector floatPhis; + for (auto &I : *LoopPreheader) { + if (auto *Phi = dyn_cast(&I)) { + if (Phi->getType()->isFloatTy()) { + floatPhis.push_back(Phi); + } + } + } + + // Get the ret instruction in ExitingBlock + ReturnInst *RetInst = dyn_cast(ForEnd->getTerminator()); + if (!RetInst) { + assert(false && "ret instruction not found in ExitingBlock"); + return; + } + + // Get the original return value + Value *OriginalRetValue = RetInst->getOperand(0); + + // Create IRBuilder, set insertion point before the ret instruction + + Builder.SetInsertPoint(RetInst); + // Create a series of fadd instructions + assert(floatPhis.size() == 8 && "expected floatPhis has 8 phi node"); + Value *CurrentSum = nullptr; + Value *add64 = Builder.CreateFAdd(floatPhis[0], OriginalRetValue, "add64"); + Value *add65 = Builder.CreateFAdd(floatPhis[1], floatPhis[2], "add65"); + Value *add66 = Builder.CreateFAdd(floatPhis[3], floatPhis[4], "add66"); + Value *add67 = Builder.CreateFAdd(floatPhis[5], floatPhis[6], "add67"); + Value *add68 = Builder.CreateFAdd(add64, add65, "add68"); + Value *add69 = Builder.CreateFAdd(add66, add67, "add69"); + CurrentSum = Builder.CreateFAdd(add68, add69, "add70"); + + // Replace the original ret instruction + RetInst->setOperand(0, CurrentSum); + PHINode *firstFloatPhiForEnd = getFirstFloatPhi(ForEnd); + assert(firstFloatPhiForEnd && "phi node not found"); + // Remove existing incoming values from firstFloatPhiForEnd + while (firstFloatPhiForEnd->getNumIncomingValues() > 0) { + firstFloatPhiForEnd->removeIncomingValue(0u, false); + } + // Add two incoming values to firstFloatPhiForEnd + firstFloatPhiForEnd->addIncoming(faddInst, LoopHeaderClone); + firstFloatPhiForEnd->addIncoming(lastFloatPhiLoopPreheader, LoopPreheader); + + runDeadCodeElimination(F); +} + +static bool shouldUnrollCorr(Function &F, LoopInfo *LI) { + if (F.size() != 7) + return false; + + BasicBlock *Entry = getBasicBlockByName(F, "entry"); + BasicBlock *ForCondPreheader = getBasicBlockByName(F, "for.cond.preheader"); + BasicBlock *Return = getBasicBlockByName(F, "return"); + + if (!Entry || !ForCondPreheader || !Return) + return false; + + if (Entry->getTerminator()->getSuccessor(0) != Return || + Entry->getTerminator()->getSuccessor(1) != ForCondPreheader) { + return false; + } + + // Feature 2: Has 5 parameters + if (F.arg_size() != 5) { + return false; + } + + unsigned int loopNestLevel = 0; + for (auto &BB : F) { + if (isa(BB.getTerminator())) { + loopNestLevel = std::max(loopNestLevel, LI->getLoopDepth(&BB)); + } + } + if (loopNestLevel != 2) { + return false; + } + + bool hasFMulAdd = false; + for (auto &BB : F) { + for (auto &I : BB) { + if (RecurrenceDescriptor::isFMulAddIntrinsic(&I)) { + hasFMulAdd = true; + break; + } + } + if (hasFMulAdd) + break; + } + if (!hasFMulAdd) { + return false; + } + + return true; +} + +static bool shouldUnrollConvccorr(Function &F, LoopInfo *LI) { + // Check the number of basic blocks + if (F.size() != 17) + return false; + + // Check the number of parameters + if (F.arg_size() != 5) { + return false; + } + + // Check the loop nesting level + unsigned int maxLoopDepth = 0; + for (auto &BB : F) { + maxLoopDepth = std::max(maxLoopDepth, LI->getLoopDepth(&BB)); + } + if (maxLoopDepth != 2) { + return false; + } + + // Check if the fmuladd.f32 inline function is used + bool hasFMulAdd = false; + for (auto &BB : F) { + for (auto &I : BB) { + if (RecurrenceDescriptor::isFMulAddIntrinsic(&I)) { + hasFMulAdd = true; + break; + } + } + if (hasFMulAdd) + break; + } + if (!hasFMulAdd) { + return false; + } + + BasicBlock *Entry = getBasicBlockByName(F, "entry"); + BasicBlock *ForBody = getBasicBlockByName(F, "for.body"); + BasicBlock *ForEnd = getBasicBlockByName(F, "for.end"); + BasicBlock *Return = getBasicBlockByName(F, "return"); + + if (!Entry || !ForBody || !ForEnd || !Return) + return false; + + if (Entry->getTerminator()->getSuccessor(0) != Return || + ForEnd->getTerminator()->getSuccessor(1) != ForBody) + return false; + + // Check if there are three outer loops, each with one inner loop + int outerLoopCount = 0; + int innerLoopCount = 0; + for (Loop *L : LI->getLoopsInPreorder()) { + if (L->getLoopDepth() == 1) { + outerLoopCount++; + if (L->getSubLoops().size() == 1) { + innerLoopCount++; + } + } + } + + if (outerLoopCount != 3 || innerLoopCount != 3) { + return false; + } + + // Check if there are three icmp eq instructions in the entry basic block + int icmpEqCount = 0; + for (auto &I : *Entry) { + if (auto *ICmp = dyn_cast(&I)) { + if (ICmp->getPredicate() == ICmpInst::ICMP_EQ) { + icmpEqCount++; + } + } + } + + if (icmpEqCount != 3) { + return false; + } + + return true; +} + +static bool shouldUnrollFird(Function &F, LoopInfo *LI) { + + // Check the number of basic blocks + if (F.size() != 14) + return false; + + // Check the number of parameters + if (F.arg_size() != 4) { + return false; + } + + // Check the loop nesting level + unsigned int maxLoopDepth = 0; + for (auto &BB : F) { + maxLoopDepth = std::max(maxLoopDepth, LI->getLoopDepth(&BB)); + } + if (maxLoopDepth != 2) { + return false; + } + + // Check if the fmuladd.f32 inline function is used + bool hasFMulAdd = false; + for (auto &BB : F) { + for (auto &I : BB) { + if (RecurrenceDescriptor::isFMulAddIntrinsic(&I)) { + hasFMulAdd = true; + break; + } + } + if (hasFMulAdd) + break; + } + if (!hasFMulAdd) { + return false; + } + + BasicBlock *Entry = getBasicBlockByName(F, "entry"); + BasicBlock *ForCondCleanup = getBasicBlockByName(F, "for.cond.cleanup"); + + if (!Entry || !ForCondCleanup) + return false; + + if (Entry->getTerminator()->getSuccessor(1) != ForCondCleanup) + return false; + + // Check if there are three outer loops, each with one inner loop + int outerLoopCount = 0; + int innerLoopCount = 0; + for (Loop *L : LI->getLoopsInPreorder()) { + if (L->getLoopDepth() == 1) { + outerLoopCount++; + } else if (L->getLoopDepth() == 2) { + innerLoopCount++; + } else { + return false; + } + } + + if (outerLoopCount != 1 || innerLoopCount != 3) { + return false; + } + + return true; +} + +static bool shouldUnrollFirType(Function &F, LoopInfo *LI) { + // Check the number of basic blocks + if (F.size() != 19) + return false; + + // Check the number of parameters + if (F.arg_size() != 4) { + return false; + } + + // Check the loop nesting level + unsigned int maxLoopDepth = 0; + for (auto &BB : F) { + maxLoopDepth = std::max(maxLoopDepth, LI->getLoopDepth(&BB)); + } + if (maxLoopDepth != 2) { + return false; + } + + // Check if the fmuladd.f32 inline function is used + bool hasFMulAdd = false; + for (auto &BB : F) { + for (auto &I : BB) { + if (RecurrenceDescriptor::isFMulAddIntrinsic(&I)) { + hasFMulAdd = true; + break; + } + } + if (hasFMulAdd) + break; + } + if (!hasFMulAdd) { + return false; + } + + BasicBlock *Entry = getBasicBlockByName(F, "entry"); + BasicBlock *ForCondPreheader = getBasicBlockByName(F, "for.cond.preheader"); + BasicBlock *ForBodyLrPh = getBasicBlockByName(F, "for.body.lr.ph"); + BasicBlock *IfEnd = getBasicBlockByName(F, "if.end"); + BasicBlock *ForBody = getBasicBlockByName(F, "for.body"); + BasicBlock *ForBodyClone = getBasicBlockByName(F, "for.body.clone"); + BasicBlock *ForBodyLrPhClone = getBasicBlockByName(F, "for.body.lr.ph.clone"); + + if (!Entry || !ForCondPreheader || !ForBodyLrPh || !IfEnd || !ForBody || + !ForBodyClone || !ForBodyLrPhClone) + return false; + + if (Entry->getTerminator()->getSuccessor(0) != ForCondPreheader || + Entry->getTerminator()->getSuccessor(1) != ForBodyLrPhClone || + ForCondPreheader->getTerminator()->getSuccessor(0) != ForBodyLrPh || + ForCondPreheader->getTerminator()->getSuccessor(1) != IfEnd || + ForBodyLrPh->getSingleSuccessor() != ForBody || + ForBodyLrPhClone->getSingleSuccessor() != ForBodyClone) + return false; + + // Check if there are three outer loops, each with one inner loop + int outerLoopCount = 0; + int innerLoopCount = 0; + for (Loop *L : LI->getLoopsInPreorder()) { + if (L->getLoopDepth() == 1) { + outerLoopCount++; + } else if (L->getLoopDepth() == 2) { + innerLoopCount++; + } else { + return false; + } + } + // for opt is 4, for clang is 2. + if (outerLoopCount != 2 || (innerLoopCount != 2 && innerLoopCount != 4)) { + return false; + } + + return true; +} + +static void eraseAllStoreInstInBB(BasicBlock *BB) { + assert(BB && "BasicBlock is nullptr"); + // Erase all store instructions in BB + for (auto it = BB->begin(); it != BB->end();) { + if (isa(&*it)) { + it = it->eraseFromParent(); + } else { + ++it; + } + } +} + +static GetElementPtrInst *getUniqueGetElementPtrInst(BasicBlock *BB) { + assert(BB && "BasicBlock is nullptr"); + // Get the unique getelementptr instruction in BB + GetElementPtrInst *GEP = nullptr; + for (Instruction &I : *BB) { + if (auto *GEPI = dyn_cast(&I)) { + if (!GEP) { + GEP = GEPI; + } else { + // If multiple getelementptr instructions are found, set GEP to nullptr + // and exit the loop + GEP = nullptr; + break; + } + } + } + assert(GEP && "getelementptr instruction not found"); + return GEP; +} + +static void createCriticalEdgeAndMoveStoreInst(BasicBlock *CloneForBody, + BasicBlock *ForEnd37) { + CloneForBody->getTerminator()->setSuccessor(1, CloneForBody); + // Create a new BasicBlock: for.cond.for.end_crit_edge + BasicBlock *CriticalEdge = BasicBlock::Create( + CloneForBody->getContext(), "for.cond.for.end_crit_edge", + CloneForBody->getParent(), ForEnd37); + + // Update the terminator instruction of CloneForBody + CloneForBody->getTerminator()->setSuccessor(0, CriticalEdge); + + // Create an unconditional branch instruction to jump to OldForEnd + BranchInst::Create(ForEnd37, CriticalEdge); + + // Find and move the StoreInst in CloneForBody to CriticalEdge + StoreInst *StoreToMove = nullptr; + for (auto &Inst : *CloneForBody) { + if (auto *Store = dyn_cast(&Inst)) { + StoreToMove = Store; + break; + } + } + + if (StoreToMove) { + StoreToMove->removeFromParent(); + StoreToMove->insertBefore(CriticalEdge->getTerminator()); + } +} +static std::tuple +modifyOuterLoop4(Loop *L, BasicBlock *ForBodyMerged, + BasicBlock *CloneForBodyPreheader) { + BasicBlock *BB = L->getHeader(); + PHINode *phi = getLastPhi(BB); + // Add new instructions + IRBuilder<> Builder(BB); + Builder.SetInsertPoint(phi->getNextNode()); + + // and i32 %n.0551, -8 + Value *Add2 = Builder.CreateAnd(phi, ConstantInt::get(phi->getType(), -8)); + + // %sub = and i32 %n.0551, 2147483644 + Value *Sub = + Builder.CreateAnd(phi, ConstantInt::get(phi->getType(), 2147483640)); + + // %cmp12538.not = icmp eq i32 %sub, 0 + Value *Cmp = Builder.CreateICmpEQ(Sub, ConstantInt::get(phi->getType(), 0)); + + // br i1 %cmp12538.not, label %for.cond.cleanup, label %for.body.preheader + // Move the conditional branch instruction to the end of BB + auto *newcondBr = + Builder.CreateCondBr(Cmp, CloneForBodyPreheader, ForBodyMerged); + + // Erase the terminator instruction of BB + Instruction *oldTerminator = BB->getTerminator(); + newcondBr->moveAfter(oldTerminator); + oldTerminator->eraseFromParent(); + + // Erase all store instructions in BB + eraseAllStoreInstInBB(BB); + for (PHINode &Phi : ForBodyMerged->phis()) { + Phi.setIncomingBlock(1, CloneForBodyPreheader); + } + // Get the unique getelementptr instruction in BB + GetElementPtrInst *GEP = getUniqueGetElementPtrInst(BB); + return std::make_tuple(Sub, GEP, Add2); +} + +static void modifyInnerLoop4(Loop *L, BasicBlock *ForBodyMerged, Value *Sub, + BasicBlock *CloneForBody, GetElementPtrInst *GEP, + Value *Add2, BasicBlock *CloneForBodyPreheader) { + BasicBlock *OuterBB = L->getHeader(); + SmallVector FMulAddCalls; + insertPhiNodesForFMulAdd(ForBodyMerged, OuterBB, FMulAddCalls); + movePHINodesToTop(*ForBodyMerged); + + groupAndReorderInstructions(ForBodyMerged); + ICmpInst *LastICmp = getLastICmpInst(ForBodyMerged); + LastICmp->setPredicate(ICmpInst::ICMP_ULT); + LastICmp->setOperand(1, Sub); + swapTerminatorSuccessors(ForBodyMerged); + eraseAllStoreInstInBB(ForBodyMerged); + + Function *F = ForBodyMerged->getParent(); + + BasicBlock *NewForEnd = + BasicBlock::Create(F->getContext(), "for.end", F, ForBodyMerged); + NewForEnd->moveAfter(ForBodyMerged); + + // Create an instruction to add the results of four FMulAdd calls + assert(FMulAddCalls.size() == 8 && "Expected 8 FMulAdd calls"); + Value *Sum = nullptr; + Value *sum = BinaryOperator::CreateFAdd(FMulAddCalls[0], FMulAddCalls[1], + "sum", NewForEnd); + Value *sum23 = BinaryOperator::CreateFAdd(FMulAddCalls[2], FMulAddCalls[3], + "sum23", NewForEnd); + Value *sum24 = BinaryOperator::CreateFAdd(FMulAddCalls[4], FMulAddCalls[5], + "sum24", NewForEnd); + Value *sum25 = BinaryOperator::CreateFAdd(FMulAddCalls[6], FMulAddCalls[7], + "sum25", NewForEnd); + Value *sum26 = BinaryOperator::CreateFAdd(sum, sum23, "sum26", NewForEnd); + Value *sum27 = BinaryOperator::CreateFAdd(sum24, sum25, "sum27", NewForEnd); + Sum = BinaryOperator::CreateFAdd(sum26, sum27, "sum28", NewForEnd); + IRBuilder<> Builder(NewForEnd); + Builder.SetInsertPoint(NewForEnd); + // Create a new StoreInst instruction + Builder.CreateStore(Sum, GEP); + // Create a comparison instruction + Value *Cmp = Builder.CreateICmpUGT(Add2, GEP->getOperand(1), "cmp37.not548"); + + // Create a conditional branch instruction + Builder.CreateCondBr(Cmp, ForBodyMerged->getTerminator()->getSuccessor(1), + CloneForBodyPreheader); + ForBodyMerged->getTerminator()->setSuccessor(1, NewForEnd); + CloneForBodyPreheader->moveAfter(NewForEnd); + CloneForBody->moveAfter(CloneForBodyPreheader); + + // Create a PHI node in CloneForBodyPreheader + PHINode *SumPHI = PHINode::Create(Sum->getType(), 2, "sum.phi", + CloneForBodyPreheader->getFirstNonPHI()); + + // Set the incoming values of the PHI node + SumPHI->addIncoming(ConstantFP::get(Sum->getType(), 0.0), OuterBB); + SumPHI->addIncoming(Sum, NewForEnd); + + // Create a PHI node in CloneForBodyPreheader + PHINode *AddPHI = PHINode::Create(Add2->getType(), 2, "add.phi", + CloneForBodyPreheader->getFirstNonPHI()); + + // Set the incoming values of the PHI node + AddPHI->addIncoming(ConstantInt::get(Add2->getType(), 0), OuterBB); + AddPHI->addIncoming(Add2, NewForEnd); + Value *phifloatincomingvalue0 = + getFirstCallInstWithName(CloneForBody, "llvm.fmuladd.f32"); + Value *phii32incomingvalue0 = getLastICmpInst(CloneForBody)->getOperand(0); + for (PHINode &Phi : CloneForBody->phis()) { + if (Phi.getType()->isIntegerTy(32)) { + Phi.setIncomingValue(0, AddPHI); + Phi.setIncomingBlock(0, CloneForBodyPreheader); + Phi.setIncomingValue(1, phii32incomingvalue0); + Phi.setIncomingBlock(1, CloneForBody); + } else if (Phi.getType()->isFloatTy()) { + Phi.setIncomingValue(0, SumPHI); + Phi.setIncomingBlock(0, CloneForBodyPreheader); + Phi.setIncomingValue(1, phifloatincomingvalue0); + Phi.setIncomingBlock(1, CloneForBody); + } + } + BasicBlock *OldForEnd = CloneForBody->getTerminator()->getSuccessor(0); + createCriticalEdgeAndMoveStoreInst(CloneForBody, OldForEnd); + + getFirstI32Phi(ForBodyMerged)->setIncomingBlock(1, ForBodyMerged); +} + +static std::tuple +modifyOuterLoop8(Loop *L) { + BasicBlock *BB = L->getHeader(); + ICmpInst *LastICmp = getLastICmpInst(BB); + LastICmp->setPredicate(ICmpInst::ICMP_ULT); + swapTerminatorSuccessors(BB); + + eraseAllStoreInstInBB(BB); + Value *lsig_0 = getFirstI32Phi(BB)->getIncomingValue(0); + Value *add207 = LastICmp->getOperand(0); + Value *sub206 = cast(add207)->getOperand(0); + // Add new instructions before LastICmp + IRBuilder<> Builder(LastICmp); + + // %add207.neg = xor i32 %sub206, -1 + Value *Add207Neg = Builder.CreateXor( + sub206, ConstantInt::get(sub206->getType(), -1), "add207.neg"); + + // %add211 = add i32 %lsig.0, %add207.neg + Value *Add211 = Builder.CreateAdd(lsig_0, Add207Neg, "add211"); + + // %div212535 = and i32 %add211, -8 + Value *Div212535 = Builder.CreateAnd( + Add211, ConstantInt::get(Add211->getType(), -8), "div212535"); + + // %add214 = add i32 %div212535, %add207 + Value *Add214 = Builder.CreateAdd(Div212535, add207, "add214"); + + // Set the second operand of LastICmp to Add214 + LastICmp->setOperand(1, Add214); + + // Get the unique getelementptr instruction in BB + GetElementPtrInst *GEP = getUniqueGetElementPtrInst(BB); + + return std::make_tuple(Add214, add207, GEP); +} + +static std::tuple +modifyOuterLoop16(Loop *L) { + BasicBlock *BB = L->getHeader(); + BasicBlock *BBLoopPreHeader = L->getLoopPreheader(); + ICmpInst *LastICmp = getLastICmpInst(BB); + LastICmp->setPredicate(ICmpInst::ICMP_ULT); + swapTerminatorSuccessors(BB); + + eraseAllStoreInstInBB(BB); + Value *lkern_0 = getFirstI32Phi(BB)->getIncomingValue(1); + // Insert an and instruction in BBLoopPreHeader + IRBuilder<> Builder(BBLoopPreHeader->getTerminator()); + Value *Div536 = Builder.CreateAnd(lkern_0, -16, "div536"); + // Get the first operand of LastICmp + Value *Add56 = LastICmp->getOperand(0); + + // Create an add instruction before LastICmp + Builder.SetInsertPoint(LastICmp); + Value *Add60 = Builder.CreateAdd(Div536, Add56, "add60"); + + // Set the second operand of LastICmp to Add60 + LastICmp->setOperand(1, Add60); + + // Get the unique getelementptr instruction in BB + GetElementPtrInst *GEP = getUniqueGetElementPtrInst(BB); + + return std::make_tuple(Add60, Add56, GEP); +} + +static void modifyInnerLoop(Loop *L, BasicBlock *ForBodyMerged, Value *Add60, + BasicBlock *CloneForBody, Value *Add56, + GetElementPtrInst *GEP, uint32_t unroll_count) { + assert((unroll_count == 8 || unroll_count == 16) && + "unroll_count must be 8 or 16"); + BasicBlock *OuterBB = L->getHeader(); + + // Find the predecessor BasicBlock of ForBodyMergedPreheader + BasicBlock *PredBB = ForBodyMerged->getSinglePredecessor(); + if (!PredBB) { + // If there is no single predecessor, traverse all predecessors + for (BasicBlock *Pred : predecessors(ForBodyMerged)) { + PredBB = Pred; + break; // Take the first predecessor + } + } + assert(PredBB && "can't find predecessor of ForBodyMerged"); + + SmallVector FMulAddCalls; + insertPhiNodesForFMulAdd(ForBodyMerged, PredBB, FMulAddCalls); + + movePHINodesToTop(*ForBodyMerged); + + groupAndReorderInstructions(ForBodyMerged); + ICmpInst *LastICmp = getLastICmpInst(ForBodyMerged); + LastICmp->setPredicate(ICmpInst::ICMP_ULT); + LastICmp->setOperand(1, Add60); + swapTerminatorSuccessors(ForBodyMerged); + eraseAllStoreInstInBB(ForBodyMerged); + + BasicBlock *ForEndLoopExit = ForBodyMerged->getTerminator()->getSuccessor(1); + // Create an instruction to add the results of four FMulAdd calls + Value *Sum = nullptr; + if (unroll_count == 16) { + Value *sum45 = + BinaryOperator::CreateFAdd(FMulAddCalls[0], FMulAddCalls[1], "sum45", + ForEndLoopExit->getTerminator()); + Value *sum46 = + BinaryOperator::CreateFAdd(FMulAddCalls[2], FMulAddCalls[3], "sum46", + ForEndLoopExit->getTerminator()); + Value *sum47 = + BinaryOperator::CreateFAdd(FMulAddCalls[4], FMulAddCalls[5], "sum47", + ForEndLoopExit->getTerminator()); + Value *sum48 = + BinaryOperator::CreateFAdd(FMulAddCalls[6], FMulAddCalls[7], "sum48", + ForEndLoopExit->getTerminator()); + Value *sum49 = + BinaryOperator::CreateFAdd(FMulAddCalls[8], FMulAddCalls[9], "sum49", + ForEndLoopExit->getTerminator()); + Value *sum50 = + BinaryOperator::CreateFAdd(FMulAddCalls[10], FMulAddCalls[11], "sum50", + ForEndLoopExit->getTerminator()); + Value *sum51 = + BinaryOperator::CreateFAdd(FMulAddCalls[12], FMulAddCalls[13], "sum51", + ForEndLoopExit->getTerminator()); + Value *sum52 = + BinaryOperator::CreateFAdd(FMulAddCalls[14], FMulAddCalls[15], "sum52", + ForEndLoopExit->getTerminator()); + + Value *sum53 = BinaryOperator::CreateFAdd(sum45, sum46, "sum53", + ForEndLoopExit->getTerminator()); + Value *sum54 = BinaryOperator::CreateFAdd(sum47, sum48, "sum54", + ForEndLoopExit->getTerminator()); + Value *sum55 = BinaryOperator::CreateFAdd(sum49, sum50, "sum55", + ForEndLoopExit->getTerminator()); + Value *sum56 = BinaryOperator::CreateFAdd(sum51, sum52, "sum56", + ForEndLoopExit->getTerminator()); + + Value *sum57 = BinaryOperator::CreateFAdd(sum53, sum54, "sum57", + ForEndLoopExit->getTerminator()); + Value *sum58 = BinaryOperator::CreateFAdd(sum55, sum56, "sum58", + ForEndLoopExit->getTerminator()); + + Sum = BinaryOperator::CreateFAdd(sum57, sum58, "sum59", + ForEndLoopExit->getTerminator()); + } else if (unroll_count == 8) { + Value *sum60 = + BinaryOperator::CreateFAdd(FMulAddCalls[0], FMulAddCalls[1], "sum60", + ForEndLoopExit->getTerminator()); + Value *sum61 = + BinaryOperator::CreateFAdd(FMulAddCalls[2], FMulAddCalls[3], "sum61", + ForEndLoopExit->getTerminator()); + Value *sum62 = + BinaryOperator::CreateFAdd(FMulAddCalls[4], FMulAddCalls[5], "sum62", + ForEndLoopExit->getTerminator()); + Value *sum63 = + BinaryOperator::CreateFAdd(FMulAddCalls[6], FMulAddCalls[7], "sum63", + ForEndLoopExit->getTerminator()); + + Value *sum64 = BinaryOperator::CreateFAdd(sum60, sum61, "sum64", + ForEndLoopExit->getTerminator()); + Value *sum65 = BinaryOperator::CreateFAdd(sum62, sum63, "sum65", + ForEndLoopExit->getTerminator()); + Sum = BinaryOperator::CreateFAdd(sum64, sum65, "sum66", + ForEndLoopExit->getTerminator()); + } + + // Create a new basic block for.end164 + BasicBlock *ForEnd164 = BasicBlock::Create( + ForEndLoopExit->getContext(), "for.end164", ForEndLoopExit->getParent(), + ForEndLoopExit->getNextNode()); + + // Set the target of the terminator instruction of ForEndLoopExit to + // for.end164 + Instruction *Terminator = ForEndLoopExit->getTerminator(); + BasicBlock *OldSuccessor = Terminator->getSuccessor(0); + Terminator->setSuccessor(0, ForEnd164); + + // Create an unconditional branch instruction in for.end164, jumping to the + // original successor basic block + BranchInst::Create(OldSuccessor, ForEnd164); + + // Create a new phi node in for.end164 + PHINode *PhiSum = PHINode::Create(Type::getInt32Ty(ForEnd164->getContext()), + 2, "phi.sum", ForEnd164->getFirstNonPHI()); + + // Set the incoming values of the phi node + PhiSum->addIncoming(Add56, OuterBB); + PhiSum->addIncoming(LastICmp->getOperand(0), ForEndLoopExit); + + // Create a new phi float node in for.end164 + PHINode *PhiFloat = + PHINode::Create(Type::getFloatTy(ForEnd164->getContext()), 2, "phi.float", + ForEnd164->getFirstNonPHI()); + + // Set the incoming values of the phi node + PhiFloat->addIncoming( + ConstantFP::get(Type::getFloatTy(ForEnd164->getContext()), 0.0), OuterBB); + PhiFloat->addIncoming(Sum, ForEndLoopExit); + // Create a new StoreInst instruction in for.end164 + new StoreInst(PhiFloat, GEP, ForEnd164->getTerminator()); + + Value *operand1 = unroll_count == 16 + ? getFirstI32Phi(OuterBB) + : getLastICmpInst(CloneForBody)->getOperand(1); + // Create a new comparison instruction + ICmpInst *NewCmp = + new ICmpInst(ICmpInst::ICMP_UGT, PhiSum, operand1, "cmp182.not587"); + NewCmp->insertBefore(ForEnd164->getTerminator()); + + // Replace the original unconditional branch with a conditional branch + BranchInst *OldBr = cast(ForEnd164->getTerminator()); + BasicBlock *ForEnd37 = OldBr->getSuccessor(0); + BranchInst *NewBr = BranchInst::Create(ForEnd37, CloneForBody, NewCmp); + ReplaceInstWithInst(OldBr, NewBr); + + CloneForBody->moveAfter(ForEnd164); + Instruction *TargetInst = + getFirstCallInstWithName(CloneForBody, "llvm.fmuladd.f32"); + for (PHINode &Phi : CloneForBody->phis()) { + if (Phi.getType()->isIntegerTy(32)) { + Phi.setIncomingValue(0, getLastICmpInst(CloneForBody)->getOperand(0)); + Phi.setIncomingBlock(0, CloneForBody); + Phi.setIncomingValue(1, PhiSum); + Phi.setIncomingBlock(1, ForEnd164); + } else if (Phi.getType()->isFloatTy()) { + Phi.setIncomingValue(0, TargetInst); + Phi.setIncomingBlock(0, CloneForBody); + Phi.setIncomingValue(1, PhiFloat); + Phi.setIncomingBlock(1, ForEnd164); + } + } + + createCriticalEdgeAndMoveStoreInst(CloneForBody, ForEnd37); + + OuterBB->getTerminator()->setSuccessor(1, ForEnd164); +} + +static void PostUnrollConv(Function &F, Loop *L, int unroll_count, + int unroll_index) { + BasicBlock *ForBody = L->getHeader(); + BasicBlock *CloneForBody = + cloneBasicBlockWithRelations(ForBody, ".clone", &F); + CloneForBody->moveAfter(ForBody); + // Set the second branch of the terminator instruction of CloneForBody to + // ForBody + CloneForBody->getTerminator()->setSuccessor(1, ForBody); + + StringRef ForBodyName = ForBody->getName(); + // Get the basic blocks to merge + std::vector BBsToMerge; + for (int i = 1; i < unroll_count; ++i) { + std::string BBName = (ForBodyName + "." + std::to_string(i)).str(); + BasicBlock *ForBodyClone = getBasicBlockByName(F, BBName); + if (ForBodyClone) { + BBsToMerge.push_back(ForBodyClone); + } + } + + if (BBsToMerge.size() == static_cast(unroll_count - 1)) { + for (BasicBlock *BB : BBsToMerge) { + MergeBasicBlockIntoOnlyPred(BB); + } + } + // Get the outer loop of L + Loop *OuterLoop = L->getParentLoop(); + if (unroll_count == 8 && unroll_index == 0) { + BasicBlock *CloneForBodyPreheader = BasicBlock::Create( + CloneForBody->getContext(), CloneForBody->getName() + ".preheader", + CloneForBody->getParent(), CloneForBody); + + updatePredecessorsToPreheader(CloneForBody, CloneForBodyPreheader); + auto [Sub, GEP, Add2] = + modifyOuterLoop4(OuterLoop, BBsToMerge[6], CloneForBodyPreheader); + modifyInnerLoop4(OuterLoop, BBsToMerge[6], Sub, CloneForBody, GEP, Add2, + CloneForBodyPreheader); + } else if (unroll_count == 16) { + auto [Add60, Add56, GEP] = modifyOuterLoop16(OuterLoop); + modifyInnerLoop(OuterLoop, BBsToMerge[14], Add60, CloneForBody, Add56, GEP, + unroll_count); + } else if (unroll_count == 8) { + auto [Add214, Add207, GEP] = modifyOuterLoop8(OuterLoop); + modifyInnerLoop(OuterLoop, BBsToMerge[6], Add214, CloneForBody, Add207, GEP, + unroll_count); + } + LLVM_DEBUG(F.dump()); +} + +static void modifyFirstCloneForBody(BasicBlock *CloneForBody, + PHINode *N_0_lcssa, + BasicBlock *ForBody27LrPh, + PHINode *CoeffPosLcssa, Value *Operand1) { + CloneForBody->getTerminator()->setSuccessor(1, CloneForBody); + for (PHINode &Phi : CloneForBody->phis()) { + Phi.setIncomingBlock(0, ForBody27LrPh); + Phi.setIncomingBlock(1, CloneForBody); + } + PHINode *FirstI32Phi = getFirstI32Phi(CloneForBody); + PHINode *LastI32Phi = getLastI32Phi(CloneForBody); + FirstI32Phi->setIncomingValue(0, N_0_lcssa); + FirstI32Phi->setIncomingBlock(0, ForBody27LrPh); + + Instruction *firstAddInst = nullptr; + Instruction *lastAddInst = nullptr; + for (Instruction &I : *CloneForBody) { + if (I.getOpcode() == Instruction::Add) { + if (!firstAddInst) { + firstAddInst = &I; + } + lastAddInst = &I; + } + } + ICmpInst *LastCmpInst = getLastICmpInst(CloneForBody); + LastCmpInst->setOperand(0, lastAddInst); + LastCmpInst->setOperand(1, Operand1); + FirstI32Phi->setIncomingValue(1, lastAddInst); + + LastI32Phi->setIncomingValue(0, CoeffPosLcssa); + LastI32Phi->setIncomingBlock(0, ForBody27LrPh); + + LastI32Phi->setIncomingValue(1, firstAddInst); +} + +static bool setBBFromOtherBB(Function &F, StringRef BBName, + BasicBlock *ForBodyMerged) { + // Find the first and last load instructions in ForBody27LrPh + LoadInst *FirstLoad = nullptr; + LoadInst *LastLoad = nullptr; + BasicBlock *ForBody27LrPh = getBasicBlockByName(F, BBName); + for (Instruction &I : *ForBody27LrPh) { + if (auto *LI = dyn_cast(&I)) { + if (!FirstLoad) { + FirstLoad = LI; + } + LastLoad = LI; + } + } + + assert(FirstLoad && LastLoad && "Find load instructions in ForBody27LrPh"); + + // modify getelementptr + // Traverse the GEP instructions in ForBodyMerged + std::vector GEPInsts; + for (Instruction &I : *ForBodyMerged) { + if (auto *GEP = dyn_cast(&I)) { + GEPInsts.push_back(GEP); + } + } + // Ensure there is at least one GEP instruction + if (!GEPInsts.empty()) { + for (size_t i = 0; i < GEPInsts.size(); ++i) { + GetElementPtrInst *CurrentGEP = GEPInsts[i]; + + if (i % 2 == 1) { // Odd + CurrentGEP->setOperand(0, LastLoad); + } else { // Even + CurrentGEP->setOperand(0, FirstLoad); + } + } + } + return true; +} + +// Function to modify the first loop in FIRD (Finite Impulse Response Design) +// transformation +static void modifyFirdFirstLoop(Function &F, Loop *L, BasicBlock *ForBodyMerged, + BasicBlock *CloneForBody) { + BasicBlock *ForCond23Preheader = + ForBodyMerged->getTerminator()->getSuccessor(0)->getSingleSuccessor(); + assert(ForCond23Preheader && + "ForCondPreheader should have single predecessor"); + + BasicBlock *ForCondCleanup3 = + getFirstI32Phi(ForCond23Preheader)->getIncomingBlock(0); + Instruction *FirstI32Phi = getFirstI32Phi(ForCondCleanup3); + + ICmpInst *LastICmp = getLastICmpInst(ForCondCleanup3); + // Create new add instruction + IRBuilder<> Builder(LastICmp); + Value *Add269 = Builder.CreateNSWAdd( + FirstI32Phi, ConstantInt::get(FirstI32Phi->getType(), 8), "add269"); + LastICmp->setOperand(0, Add269); + LastICmp->setPredicate(ICmpInst::ICMP_SGT); + swapTerminatorSuccessors(ForCondCleanup3); + + PHINode *N_069 = getFirstI32Phi(ForBodyMerged); + Value *Inc20_7 = N_069->getIncomingValue(1); + BasicBlock *ForBodyMergedLoopPreheader = N_069->getIncomingBlock(0); + // Create new phi node at the beginning of ForBodyMerged + PHINode *Add281 = PHINode::Create(Type::getInt32Ty(F.getContext()), 2, + "add281", &ForBodyMerged->front()); + + // Set incoming values for phi node + Add281->addIncoming(Add269, ForBodyMergedLoopPreheader); + Add281->addIncoming(Inc20_7, ForBodyMerged); + + N_069->setIncomingValue(1, Add281); + + ICmpInst *LastICmpInPreheader = getLastICmpInst(ForCond23Preheader); + // Create new phi node + PHINode *N_0_lcssa = PHINode::Create(Type::getInt32Ty(F.getContext()), 2, + "n.0.lcssa", LastICmpInPreheader); + + // Set incoming values for phi node + N_0_lcssa->addIncoming(FirstI32Phi, ForCondCleanup3); + N_0_lcssa->addIncoming(Add281, ForBodyMerged); + + // Replace operand of LastICmpInPreheader with new phi node + LastICmpInPreheader->setOperand(0, N_0_lcssa); + LastICmpInPreheader->setPredicate(ICmpInst::ICMP_SLT); + + Value *Operand1 = LastICmp->getOperand(1); + LastICmpInPreheader->setOperand(1, Operand1); + + // Get %coeff_pos.0.lcssa + PHINode *CoeffPosLcssa = getFirstI32Phi(ForCond23Preheader); + + // Insert new add instruction at the end of ForBodyMergedLoopPreheader + BasicBlock *ForBody27LrPh = + ForCond23Preheader->getTerminator()->getSuccessor(0); + Builder.SetInsertPoint(ForBody27LrPh->getTerminator()); + Value *Add11 = Builder.CreateAdd(Operand1, CoeffPosLcssa); + + ForBody27LrPh->getTerminator()->setSuccessor(0, CloneForBody); + ICmpInst *LastICmpInForBodyMerged = getLastICmpInst(ForBodyMerged); + LastICmpInForBodyMerged->setOperand(1, Operand1); + LastICmpInForBodyMerged->setOperand(0, Inc20_7); + + modifyFirstCloneForBody(CloneForBody, N_0_lcssa, ForBody27LrPh, CoeffPosLcssa, + Operand1); + + PHINode *acc_0_lcssa = getFirstFloatPhi(ForCond23Preheader); + BasicBlock *ForCond23PreheaderLoopExit = acc_0_lcssa->getIncomingBlock(1); + PHINode *_lcssa = getFirstFloatPhi(ForCond23PreheaderLoopExit); + acc_0_lcssa->setIncomingValue(1, _lcssa->getIncomingValue(0)); + acc_0_lcssa->setIncomingBlock(1, _lcssa->getIncomingBlock(0)); + + Value *floatZero = acc_0_lcssa->getIncomingValue(0); + + // Get all incoming values and blocks for PHINode + for (unsigned i = 1; i < _lcssa->getNumIncomingValues(); ++i) { + Value *IncomingValue = _lcssa->getIncomingValue(i); + BasicBlock *IncomingBlock = _lcssa->getIncomingBlock(i); + + // Create new phi node in ForCond23Preheader + PHINode *NewPhi = + PHINode::Create(floatZero->getType(), 2, + "acc." + std::to_string(i) + ".lcssa", CoeffPosLcssa); + // Add incoming values + NewPhi->addIncoming(floatZero, ForCondCleanup3); + NewPhi->addIncoming(IncomingValue, IncomingBlock); + } + Value *coeff_pos_068 = getLastI32Phi(ForBodyMerged)->getIncomingValue(1); + CoeffPosLcssa->setIncomingValue(1, coeff_pos_068); + + getLastFloatPhi(CloneForBody)->setIncomingValue(0, acc_0_lcssa); + + BasicBlock *PredBB = ForBodyMerged->getSinglePredecessor(); + if (!PredBB) { + // If no single predecessor, iterate through all predecessors + for (BasicBlock *Pred : predecessors(ForBodyMerged)) { + PredBB = Pred; + break; // Only take first predecessor + } + } + SmallVector FMulAddCalls; + // insertPhiNodesForFMulAdd(ForBodyMerged, ForCond23PreHeader, FMulAddCalls); + // Collect all tail call float @llvm.fmuladd.f32 in LoopHeader + for (Instruction &I : *ForBodyMerged) { + if (CallInst *CI = dyn_cast(&I)) { + if (Function *F = CI->getCalledFunction()) { + if (F->getName() == "llvm.fmuladd.f32" && CI->isTailCall()) { + FMulAddCalls.push_back(CI); + } + } + } + } + + // Insert phi nodes for each FMulAdd call + for (CallInst *CI : FMulAddCalls) { + // Create new phi node + PHINode *PHI = PHINode::Create(CI->getType(), 2, CI->getName() + "acc", CI); + + // Set incoming values for phi node + PHI->addIncoming(ConstantFP::get(CI->getType(), 0), PredBB); + PHI->addIncoming(CI, ForBodyMerged); + + CI->setOperand(2, PHI); + } + movePHINodesToTop(*ForBodyMerged); + modifyAddToOr(ForBodyMerged); + + ICmpInst *LastICmpForBodyMerged = getLastICmpInst(ForBodyMerged); + LastICmpForBodyMerged->setPredicate(ICmpInst::ICMP_SGT); + cast(LastICmpForBodyMerged->getOperand(0)) + ->setOperand(0, getFirstI32Phi(ForBodyMerged)); + + // Find first and last load instructions in ForBody14LrPh + LoadInst *FirstLoad = nullptr; + LoadInst *LastLoad = nullptr; + BasicBlock *ForBody14LrPh = getBasicBlockByName(F, "for.body14.lr.ph"); + for (Instruction &I : *ForBody14LrPh) { + if (auto *LI = dyn_cast(&I)) { + if (!FirstLoad) { + FirstLoad = LI; + } + LastLoad = LI; + } + } + + assert(FirstLoad && LastLoad && + "Failed to find load instructions in ForBody14LrPh"); + + // modify getelementptr + // Iterate through getelementptr instructions in ForBodyMerged + std::vector GEPInsts; + for (Instruction &I : *ForBodyMerged) { + if (auto *GEP = dyn_cast(&I)) { + GEPInsts.push_back(GEP); + } + } + // Ensure at least one getelementptr instruction exists + if (!GEPInsts.empty()) { + for (size_t i = 0; i < GEPInsts.size(); ++i) { + GetElementPtrInst *CurrentGEP = GEPInsts[i]; + + if (i % 2 == 1) { // Odd + CurrentGEP->setOperand(0, LastLoad); + } else { // Even + CurrentGEP->setOperand(0, FirstLoad); + } + } + } + + // Ensure at least one getelementptr instruction exists + if (!GEPInsts.empty()) { + // Get first getelementptr instruction + GetElementPtrInst *SecondGEP = GEPInsts[1]; + + // Starting from index 1, process every other getelementptr + for (size_t i = 3; i < GEPInsts.size(); i += 2) { + GetElementPtrInst *CurrentGEP = GEPInsts[i]; + + // Set current getelementptr's operand 0 to first getelementptr's value + CurrentGEP->setOperand(0, SecondGEP); + + // Set operand 1 to current index value + // ConstantInt *IndexValue = + // ConstantInt::get(CurrentGEP->getOperand(1)->getType(), i); + CurrentGEP->setOperand( + 1, ConstantInt::get(CurrentGEP->getOperand(1)->getType(), (i) / 2)); + } + } + + setBBFromOtherBB(F, "for.body27.lr.ph", CloneForBody); + + BasicBlock *ForCondCleanup26LoopExit = CloneForBody->getNextNode(); + BasicBlock *ForCondCleanup26 = ForCondCleanup26LoopExit->getSingleSuccessor(); + Instruction *tailcallInst = + getFirstCallInstWithName(CloneForBody, "llvm.fmuladd.f32"); + + // Find add instruction in ForBody27LrPh + Instruction *AddInst = nullptr; + for (Instruction &I : *ForBody27LrPh) { + if (I.getOpcode() == Instruction::Add) { + AddInst = &I; + break; + } + } + + // Insert new instructions in ForCondCleanup26LoopExit + Builder.SetInsertPoint(ForCondCleanup26LoopExit->getFirstNonPHI()); + Value *SubResult = Builder.CreateSub(AddInst, N_0_lcssa); + PHINode *firstFloatPhi = getFirstFloatPhi(ForCondCleanup26); + firstFloatPhi->setIncomingValue(1, tailcallInst); + + ForCond23Preheader->setName("for.cond63.preheader"); + // Create new PHI node in ForCondCleanup26 + PHINode *CoeffPosLcssaPhi = + PHINode::Create(CoeffPosLcssa->getType(), 2, "coeff_pos.1.lcssa", + &ForCondCleanup26->front()); + + // Set incoming values and blocks for PHI node + CoeffPosLcssaPhi->addIncoming(CoeffPosLcssa, ForCond23Preheader); + CoeffPosLcssaPhi->addIncoming(SubResult, ForCondCleanup26LoopExit); + // eraseAllStoreInstInBB(ForCondCleanup26); + + ICmpInst *LastICmpForCondCleanup26 = getLastICmpInst(ForCondCleanup26); + + LastICmpForCondCleanup26->setPredicate(ICmpInst::ICMP_SLT); + PHINode *FirstI32ForCondCleanup3 = getFirstI32Phi(ForCondCleanup3); + LastICmpForCondCleanup26->setOperand(0, FirstI32ForCondCleanup3); + LastICmpForCondCleanup26->setOperand( + 1, + ConstantInt::get(LastICmpForCondCleanup26->getOperand(1)->getType(), 8)); + + BasicBlock *ForBody79LrPh = + cloneBasicBlockWithRelations(ForBody27LrPh, ".clone", &F); + ForBody79LrPh->setName("for.body79.lr.ph"); + ForBody79LrPh->moveBefore(CloneForBody); + ForBody79LrPh->getTerminator()->setSuccessor(0, ForBodyMerged); + ForCondCleanup26->getTerminator()->setSuccessor(1, ForBody79LrPh); + // Create new and instruction in ForBody79LrPh + Builder.SetInsertPoint(ForBody79LrPh->getTerminator()); + Value *AndResult = Builder.CreateAnd( + FirstI32ForCondCleanup3, + ConstantInt::get(FirstI32ForCondCleanup3->getType(), 2147483640)); + + BasicBlock *ForCond130Preheader = + cloneBasicBlockWithRelations(ForCond23Preheader, ".clone", &F); + ForCond130Preheader->setName("for.cond130.preheader"); + ForCond130Preheader->moveAfter(CloneForBody); + ForCondCleanup26->getTerminator()->setSuccessor(0, ForCond130Preheader); + for (PHINode &Phi : ForCond130Preheader->phis()) { + Phi.setIncomingBlock(0, ForCondCleanup26); + } + // Iterate through phi nodes in ForCond130Preheader and ForCond23Preheader + // simultaneously + auto it130 = ForCond130Preheader->begin(); + auto it23 = ForCond23Preheader->begin(); + + while (it130 != ForCond130Preheader->end() && + it23 != ForCond23Preheader->end()) { + if (auto *phi130 = dyn_cast(&*it130)) { + if (auto *phi23 = dyn_cast(&*it23)) { + if (phi130->getType()->isFloatTy() && phi23->getType()->isFloatTy()) { + // Write phi float from ForCond23Preheader to incomingvalue 0 position + // in ForCond130Preheader + phi130->setIncomingValue(0, phi23); + } + } + ++it23; + } + ++it130; + } + getFirstFloatPhi(ForCond130Preheader)->setIncomingValue(0, firstFloatPhi); + + getFirstI32Phi(ForCond130Preheader) + ->setIncomingValue(0, getFirstI32Phi(ForCondCleanup26)); + + PHINode *LastI32Phi130 = getLastI32Phi(ForCond130Preheader); + LastI32Phi130->setIncomingValue( + 0, ConstantInt::get(getLastI32Phi(ForCond130Preheader)->getType(), 0)); + LastI32Phi130->setIncomingValue(1, AndResult); + + ICmpInst *LastICmp130 = getLastICmpInst(ForCond130Preheader); + LastICmp130->setOperand(1, FirstI32ForCondCleanup3); + + PHINode *LastI32PhiClone = getLastFloatPhi(CloneForBody); + LastI32PhiClone->setIncomingValue(1, tailcallInst); + + // modify for.cond23.preheader.loopexit + // modify for.cond63.preheader + for (PHINode &Phi : ForCond23Preheader->phis()) { + Phi.setIncomingBlock(1, ForBodyMerged); + } + ForBodyMerged->getTerminator()->setSuccessor(0, ForCond130Preheader); + + CloneForBody->getTerminator()->setSuccessor(0, ForCondCleanup26LoopExit); + + // Get for.cond.cleanup.loopexit basic block + BasicBlock *ForCondCleanupLoopExit = + getBasicBlockByName(F, "for.cond23.preheader.loopexit"); + + // Check if for.cond.cleanup.loopexit exists + if (ForCondCleanupLoopExit) { + // Check if for.cond.cleanup.loopexit has no predecessors + if (pred_empty(ForCondCleanupLoopExit)) { + // Delete for.cond.cleanup.loopexit basic block + ForCondCleanupLoopExit->eraseFromParent(); + } + } + + ForBodyMerged->getTerminator()->setSuccessor(0, ForCond23Preheader); +} + +static bool copyFloatPhiIncomingValue(int i, BasicBlock *srcBB, + BasicBlock *tarBB) { + assert(srcBB && tarBB && "srcBB or tarBB should not be nullptr"); + // Collect phi float nodes from ForCond130Preheader in reverse order into + // vector + SmallVector floatPhis; + + for (auto it = srcBB->rbegin(); it != srcBB->rend(); ++it) { + if (PHINode *phi = dyn_cast(&*it)) { + if (phi->getType()->isFloatTy()) { + floatPhis.push_back(phi->getIncomingValue(i)); + } + } + } + + // Traverse phi float nodes in ForBodyMerged in reverse order and store values + // from floatPhis into their incoming value 0 + auto floatPhiIt = floatPhis.begin(); + for (auto it = tarBB->rbegin(); + it != tarBB->rend() && floatPhiIt != floatPhis.end(); ++it) { + if (PHINode *phi = dyn_cast(&*it)) { + if (phi->getType()->isFloatTy()) { + phi->setIncomingValue(i, *floatPhiIt); + ++floatPhiIt; + } + } + } + return true; +} + +static void modifyFirdSecondLoop(Function &F, Loop *L, + BasicBlock *ForBodyMerged, + BasicBlock *CloneForBody) { + BasicBlock *ForBody = L->getHeader(); + + BasicBlock *ForBody133LrPh = + BasicBlock::Create(CloneForBody->getContext(), "for.body133.lr.ph", + CloneForBody->getParent(), CloneForBody); + + updatePredecessorsToPreheader(CloneForBody, ForBody133LrPh); + + BasicBlock *PredBB = ForBodyMerged->getSinglePredecessor(); + if (!PredBB) { + // If there is no single predecessor, iterate through all predecessors + for (BasicBlock *Pred : predecessors(ForBodyMerged)) { + PredBB = Pred; + break; // Only take the first predecessor + } + } + SmallVector FMulAddCalls; + // Collect all tail call float @llvm.fmuladd.f32 in LoopHeader + for (Instruction &I : *ForBodyMerged) { + if (CallInst *CI = dyn_cast(&I)) { + if (Function *F = CI->getCalledFunction()) { + if (F->getName() == "llvm.fmuladd.f32" && CI->isTailCall()) { + FMulAddCalls.push_back(CI); + } + } + } + } + + // Insert phi nodes for each FMulAdd call + for (CallInst *CI : FMulAddCalls) { + // Create new phi node + PHINode *PHI = PHINode::Create(CI->getType(), 2, CI->getName() + "acc", CI); + + // Set incoming values for phi node + PHI->addIncoming(ConstantFP::get(CI->getType(), 0), PredBB); + PHI->addIncoming(CI, ForBodyMerged); + + CI->setOperand(2, PHI); + } + PHINode *n22_075 = getFirstI32Phi(ForBodyMerged); + // Create new phi node in ForBodyMerged + PHINode *Add76310 = PHINode::Create(Type::getInt32Ty(F.getContext()), 2, + "add76310", &ForBodyMerged->front()); + Add76310->addIncoming(ConstantInt::get(Type::getInt32Ty(F.getContext()), 8), + ForBody133LrPh); + n22_075->setIncomingValue(1, Add76310); + // Create new add instruction in ForBodyMerged + IRBuilder<> Builder(ForBodyMerged->getTerminator()); + Value *Add76 = Builder.CreateAdd( + Add76310, ConstantInt::get(Type::getInt32Ty(F.getContext()), 8), "add76", + true, true); + + // Update phi node's loop edge + Add76310->addIncoming(Add76, ForBodyMerged); + + movePHINodesToTop(*ForBodyMerged); + modifyAddToOr(ForBodyMerged); + + ICmpInst *LastICmp = getLastICmpInst(ForBodyMerged); + LastICmp->setPredicate(ICmpInst::ICMP_SGT); + cast(Add76)->moveBefore(LastICmp); + LastICmp->setOperand(0, Add76); + for (PHINode &Phi : ForBodyMerged->phis()) { + Phi.setIncomingBlock(0, PredBB); + } + + BasicBlock *NewForEnd141 = + BasicBlock::Create(F.getContext(), "for.end141", &F, CloneForBody); + NewForEnd141->moveAfter(CloneForBody); + + BasicBlock *ForCond1Preheader = getBasicBlockByName(F, "for.cond1.preheader"); + for (PHINode &Phi : ForCond1Preheader->phis()) { + Phi.setIncomingBlock(1, NewForEnd141); + } + PHINode *ForCond1PreheaderLastI32Phi = getLastI32Phi(ForCond1Preheader); + // Insert new add instruction in NewForEnd141 + Builder.SetInsertPoint(NewForEnd141); + Value *Inc152 = + Builder.CreateAdd(ForCond1PreheaderLastI32Phi, + ConstantInt::get(Type::getInt32Ty(F.getContext()), 1), + "inc152", true, true); + Inc152->setName("inc152"); + + // Update PHI nodes in ForCond1Preheader + ForCond1PreheaderLastI32Phi->setIncomingValue(1, Inc152); + + BasicBlock *ForCondCleanup = getBasicBlockByName(F, "for.cond.cleanup"); + getFirstI32Phi(ForCondCleanup)->setIncomingBlock(1, NewForEnd141); + + // Find len parameter in function F + Value *LenArg = getLenFromEntryBlock(F); + assert(LenArg && "LenArg should be"); + + // Create comparison instruction + Value *ExitCond350 = Builder.CreateICmpEQ(Inc152, LenArg, "exitcond350.not"); + + // Create conditional branch instruction + Builder.CreateCondBr(ExitCond350, ForCondCleanup, ForCond1Preheader); + + BasicBlock *ForCond130Preheader = + getBasicBlockByName(F, "for.cond130.preheader"); + for (PHINode &phi : ForCond130Preheader->phis()) { + phi.setIncomingBlock(1, ForBodyMerged); + } + ForCond130Preheader->getTerminator()->setSuccessor(0, ForBody133LrPh); + ForCond130Preheader->getTerminator()->setSuccessor(1, NewForEnd141); + + // ForBody133LrPh + // Create new instructions in ForBody133LrPh + BasicBlock *ForBody79LrPh = getBasicBlockByName(F, "for.body79.lr.ph"); + ForBody79LrPh->getTerminator()->setSuccessor(0, ForBodyMerged); + // Copy loadinst from ForBody79LrPh to ForBody133LrPh + Builder.SetInsertPoint(ForBody133LrPh->getTerminator()); + for (Instruction &I : *ForBody79LrPh) { + if (isa(I)) { + Instruction *ClonedInst = I.clone(); + ClonedInst->setName(I.getName()); + Builder.Insert(ClonedInst); + } + } + + // modify ForBodyMerged + for (PHINode &Phi : ForBodyMerged->phis()) { + Phi.setIncomingBlock(0, ForBody79LrPh); + } + + PHINode *coeff_pos174 = getLastI32Phi(ForBodyMerged); + PHINode *coeff_pos_0_lcssa_clone = getFirstI32Phi(ForCond130Preheader); + coeff_pos_0_lcssa_clone->setIncomingValue(1, + coeff_pos174->getIncomingValue(1)); + coeff_pos174->setIncomingValue(0, + coeff_pos_0_lcssa_clone->getIncomingValue(0)); + + bool res = copyFloatPhiIncomingValue(0, ForCond130Preheader, ForBodyMerged); + assert(res && "copyFloatPhiIncomingZeroValue failed"); + + bool res1 = copyFloatPhiIncomingValue(1, ForBodyMerged, ForCond130Preheader); + assert(res1 && "copyFloatPhiIncomingValue failed"); + // Find first and last load instructions in ForBody79LrPh + LoadInst *FirstLoad = nullptr; + LoadInst *LastLoad = nullptr; + + for (Instruction &I : *ForBody79LrPh) { + if (auto *LI = dyn_cast(&I)) { + if (!FirstLoad) { + FirstLoad = LI; + } + LastLoad = LI; + } + } + + assert(FirstLoad && LastLoad && + "Could not find load instructions in ForBody79LrPh"); + // Iterate through GetElementPtrInst + std::vector GEPInsts; + for (Instruction &I : *ForBodyMerged) { + if (auto *GEP = dyn_cast(&I)) { + GEPInsts.push_back(GEP); + } + } + + // Ensure there is at least one getelementptr instruction + if (!GEPInsts.empty()) { + for (size_t i = 0; i < GEPInsts.size(); ++i) { + GetElementPtrInst *CurrentGEP = GEPInsts[i]; + + if (i % 2 == 1) { // odd + CurrentGEP->setOperand(0, LastLoad); + } else { // even + CurrentGEP->setOperand(0, FirstLoad); + } + } + } + + // Ensure there is at least one getelementptr instruction + if (!GEPInsts.empty()) { + // Get first getelementptr instruction + GetElementPtrInst *FirstGEP = GEPInsts[0]; + + // Starting from index 1, process every other getelementptr + for (size_t i = 2; i < GEPInsts.size(); i += 2) { + GetElementPtrInst *CurrentGEP = GEPInsts[i]; + + // Set current getelementptr's operand 0 to first getelementptr's value + CurrentGEP->setOperand(0, FirstGEP); + + // Set operand 1 to current index value + CurrentGEP->setOperand( + 1, ConstantInt::get(CurrentGEP->getOperand(1)->getType(), (i) / 2)); + } + } + + ForBodyMerged->getTerminator()->setSuccessor(0, ForCond130Preheader); + + // modify for.body27.clone + PHINode *n_0_lcssa_clone = getLastI32Phi(ForCond130Preheader); + PHINode *acc_0_lcssa_clone = getFirstFloatPhi(ForCond130Preheader); + Instruction *tailcallInst = + getFirstCallInstWithName(CloneForBody, "llvm.fmuladd.f32"); + Instruction *firstAddInst = nullptr; + Instruction *lastAddInst = nullptr; + for (Instruction &I : *CloneForBody) { + if (I.getOpcode() == Instruction::Add) { + if (!firstAddInst) { + firstAddInst = &I; + } + lastAddInst = &I; + } + } + int index = 0; + for (PHINode &Phi : CloneForBody->phis()) { + Phi.setIncomingBlock(0, ForBody133LrPh); + Phi.setIncomingBlock(1, CloneForBody); + if (index == 0) { + Phi.setIncomingValue(0, n_0_lcssa_clone); + Phi.setIncomingValue(1, lastAddInst); + } else if (index == 1) { + Phi.setIncomingValue(0, coeff_pos_0_lcssa_clone); + Phi.setIncomingValue(1, firstAddInst); + } else if (index == 2) { + Phi.setIncomingValue(0, acc_0_lcssa_clone); + Phi.setIncomingValue(1, tailcallInst); + } + index++; + } + + CloneForBody->getTerminator()->setSuccessor(0, NewForEnd141); + CloneForBody->getTerminator()->setSuccessor(1, CloneForBody); + + // modify for.end141 + // Create phi float node in NewForEnd141 + PHINode *AccPhi = PHINode::Create(Type::getFloatTy(F.getContext()), 2, + "acc0.3.lcssa", &NewForEnd141->front()); + AccPhi->addIncoming(acc_0_lcssa_clone, ForCond130Preheader); + AccPhi->addIncoming(tailcallInst, CloneForBody); + + int i = 0; + Value *Sum = nullptr; + Instruction *insertPoint = AccPhi->getNextNode(); + // Count the number of float type phi nodes in ForCond130Preheader + SmallVector floatPhis; + for (PHINode &phi : ForCond130Preheader->phis()) { + if (phi.getType()->isFloatTy()) { + floatPhis.push_back(&phi); + } + } + assert(floatPhis.size() == 8 && + "Expected 8 float phi nodes in ForCond130Preheader"); + // Create parallel add instructions for better performance + Value *Add60 = + BinaryOperator::CreateFAdd(floatPhis[1], AccPhi, "add60", insertPoint); + Value *Add61 = BinaryOperator::CreateFAdd(floatPhis[2], floatPhis[3], "add61", + insertPoint); + Value *Add62 = BinaryOperator::CreateFAdd(floatPhis[4], floatPhis[5], "add62", + insertPoint); + Value *Add63 = BinaryOperator::CreateFAdd(floatPhis[6], floatPhis[7], "add63", + insertPoint); + Value *Add64 = BinaryOperator::CreateFAdd(Add60, Add61, "add64", insertPoint); + Value *Add65 = BinaryOperator::CreateFAdd(Add62, Add63, "add65", insertPoint); + Value *Add66 = BinaryOperator::CreateFAdd(Add64, Add65, "add66", insertPoint); + Sum = Add66; + + // Move getelementptr and store instructions from for.cond.cleanup26 to + // NewForEnd141 + BasicBlock *ForCondCleanup26 = getBasicBlockByName(F, "for.cond.cleanup26"); + + SmallVector instructionsToMove; + + // Collect instructions to move + for (Instruction &I : *ForCondCleanup26) { + if (isa(I) || isa(I)) { + instructionsToMove.push_back(&I); + } + } + + // Move instructions + for (Instruction *I : instructionsToMove) { + I->moveBefore(insertPoint); + if (isa(I)) { + I->setOperand(0, Sum); + } + } + + // Update instructions that used moved instructions + for (Instruction &I : *NewForEnd141) { + I.replaceUsesOfWith(ForCondCleanup26, NewForEnd141); + } + + // Get for.cond.cleanup.loopexit basic block + BasicBlock *ForCondCleanupLoopExit = + getBasicBlockByName(F, "for.cond.cleanup.loopexit"); + + // Check if for.cond.cleanup.loopexit exists + if (ForCondCleanupLoopExit) { + // Check if for.cond.cleanup.loopexit has no predecessors + if (pred_empty(ForCondCleanupLoopExit)) { + // Delete for.cond.cleanup.loopexit basic block + ForCondCleanupLoopExit->eraseFromParent(); + } + } + + setBBFromOtherBB(F, "for.body133.lr.ph", CloneForBody); +} + +// Main function to perform FIRD unrolling +static void PostUnrollFird(Function &F, Loop *L, int loop_index) { + BasicBlock *ForBody = L->getHeader(); + BasicBlock *CloneForBody = + cloneBasicBlockWithRelations(ForBody, ".clone", &F); + CloneForBody->moveAfter(ForBody); + CloneForBody->getTerminator()->setSuccessor(1, ForBody); + + // Merge basic blocks + std::vector BBsToMerge; + for (int i = 1; i < 8; ++i) { + std::string BBName = (ForBody->getName() + "." + std::to_string(i)).str(); + BasicBlock *ForBodyClone = getBasicBlockByName(F, BBName); + if (ForBodyClone) { + BBsToMerge.push_back(ForBodyClone); + } else { + llvm_unreachable("can't find ForBodyClone"); + } + } + if (BBsToMerge.size() == 7) { + for (BasicBlock *BB : BBsToMerge) { + MergeBasicBlockIntoOnlyPred(BB); + } + } + BasicBlock *ForBodyMerged = BBsToMerge[6]; + CloneForBody->moveAfter(ForBodyMerged); + + // Perform loop-specific modifications + if (loop_index == 1) { + modifyFirdFirstLoop(F, L, ForBodyMerged, CloneForBody); + } else if (loop_index == 2) { + modifyFirdSecondLoop(F, L, ForBodyMerged, CloneForBody); + } +} + +// Helper function to check if a loop is simple (single-level, innermost, and +// outermost) +static bool isSimpleLoop(const Loop *L) { + return L->getLoopDepth() == 1 && L->isInnermost() && L->isOutermost(); +} + +// Handle simple loops +static bool handleSimpleLoop(Function &F, Loop *L, ScalarEvolution &SE, + LoopInfo *LI, DominatorTree &DT, + AssumptionCache &AC, + const TargetTransformInfo &TTI, + OptimizationRemarkEmitter &ORE) { + if (shouldUnrollLoopWithCount(F, L, SE)) { + LLVM_DEBUG(errs() << "Unrolling loop with count\n"); + auto UnrollResult = + UnrollLoop(L, + {/*Count*/ 8, /*Force*/ true, /*Runtime*/ false, + /*AllowExpensiveTripCount*/ true, + /*UnrollRemainder*/ true, true}, + LI, &SE, &DT, &AC, &TTI, /*ORE*/ &ORE, true); + postUnrollLoopWithCount(F, L, 8); + return true; + } + + if (shouldUnrollComplexLoop(F, L, SE, DT, *LI)) { + LLVM_DEBUG(errs() << "Unrolling complex loop\n"); + auto UnrollResult = + UnrollLoop(L, + {/*Count*/ 8, /*Force*/ true, /*Runtime*/ false, + /*AllowExpensiveTripCount*/ true, + /*UnrollRemainder*/ true, true}, + LI, &SE, &DT, &AC, &TTI, /*ORE*/ &ORE, true); + postUnrollLoopWithVariable(F, L, 8); + return true; + } + + if (shouldUnrollAddcType(F, LI)) { + LLVM_DEBUG(errs() << "Unrolling ADDC type loop\n"); + unrollAddc(F, SE, L, 16); + currentUnrollType = UnrollType::ADD_ADDC_SUB_MUL_MULC_SQRT; + return true; + } + + if (shouldUnrollDotprodType(F, LI)) { + LLVM_DEBUG(errs() << "Transforming dot product type loop\n"); + currentUnrollType = UnrollType::DOTPROD; + transformOneLoopDepth(F); + return true; + } + + LLVM_DEBUG(errs() << "No unrolling performed for this loop\n"); + return false; +} + +// Helper function to simplify loop and form LCSSA +static void simplifyAndFormLCSSA(Loop *L, DominatorTree &DT, LoopInfo *LI, + ScalarEvolution &SE, AssumptionCache &AC) { + simplifyLoop(L, &DT, LI, &SE, &AC, nullptr, false); + formLCSSARecursively(*L, DT, LI, &SE); +} + +// Helper function to get CONV unroll factor +static unsigned int getConvUnrollFactor(uint32_t unrollCount) { + static const unsigned int unrollFactors[] = {8, 16, 8}; + return unrollFactors[unrollCount % 3]; +} + +// Handle CONV type unrolling +static bool handleConvUnroll(Function &F, Loop *L, ScalarEvolution &SE, + LoopInfo *LI, DominatorTree &DT, + AssumptionCache &AC, + const TargetTransformInfo &TTI, + OptimizationRemarkEmitter &ORE, + uint32_t &unrollCount) { + LLVM_DEBUG(errs() << "Unrolling CONV type loop\n"); + currentUnrollType = UnrollType::CONV_CCORR; + + unsigned int unrollFactor = getConvUnrollFactor(unrollCount); + simplifyAndFormLCSSA(L, DT, LI, SE, AC); + + auto UnrollResult = + UnrollLoop(L, {unrollFactor, true, false, true, true, true}, LI, &SE, &DT, + &AC, &TTI, &ORE, true); + + unrollCount++; + return true; +} + +// Handle FIRD type unrolling +static bool handleFirdUnroll(Function &F, Loop *L, ScalarEvolution &SE, + LoopInfo *LI, DominatorTree &DT, + AssumptionCache &AC, + const TargetTransformInfo &TTI, + OptimizationRemarkEmitter &ORE, + uint32_t &unroll_times) { + LLVM_DEBUG(errs() << "Unrolling FIRD type loop\n"); + currentUnrollType = UnrollType::FIRD; + + if (unroll_times == 0) { + unroll_times++; + return false; + } + + simplifyAndFormLCSSA(L, DT, LI, SE, AC); + + auto UnrollResult = UnrollLoop(L, {8, true, false, true, true, true}, LI, &SE, + &DT, &AC, &TTI, &ORE, false); + + return true; +} + +// Handle innermost loops +static bool handleInnermostLoop(Function &F, Loop *L, ScalarEvolution &SE, + LoopInfo *LI, DominatorTree &DT, + AssumptionCache &AC, + const TargetTransformInfo &TTI, + OptimizationRemarkEmitter &ORE, + uint32_t &unrollCount) { + if (shouldUnrollCorr(F, LI)) { + LLVM_DEBUG(errs() << "Unrolling correlation type loop\n"); + unrollCorr(F, L, 16); + currentUnrollType = UnrollType::CORR; + return true; + } + + if (shouldUnrollFirType(F, LI) || currentUnrollType == UnrollType::FIR) { + LLVM_DEBUG(errs() << "Transforming FIR type loop\n"); + unrollFir(F, L); + currentUnrollType = UnrollType::FIR; + return true; + } + + if (shouldUnrollConvccorr(F, LI) || + currentUnrollType == UnrollType::CONV_CCORR) { + return handleConvUnroll(F, L, SE, LI, DT, AC, TTI, ORE, unrollCount); + } + + if (shouldUnrollFird(F, LI) || currentUnrollType == UnrollType::FIRD) { + return handleFirdUnroll(F, L, SE, LI, DT, AC, TTI, ORE, unrollCount); + } + + LLVM_DEBUG(errs() << "No unrolling performed for this innermost loop\n"); + return false; +} + +static LoopUnrollResult +tryToUnrollLoop(Function &F, Loop *L, DominatorTree &DT, LoopInfo *LI, + ScalarEvolution &SE, const TargetTransformInfo &TTI, + AssumptionCache &AC, OptimizationRemarkEmitter &ORE, + BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI) { + // Initialize variables + bool changed = false; + static uint32_t unrollCount = 0; + // Handle single-level loops + if (isSimpleLoop(L)) { + changed = handleSimpleLoop(F, L, SE, LI, DT, AC, TTI, ORE); + } + // Handle innermost loops + else if (L->isInnermost()) { + changed = handleInnermostLoop(F, L, SE, LI, DT, AC, TTI, ORE, unrollCount); + } + + return changed ? LoopUnrollResult::PartiallyUnrolled + : LoopUnrollResult::Unmodified; +} + +// Helper function to process CONV unroll type +void processConvUnroll(Function &F, const SmallVector &InnerLoops) { + static const int unroll_counts[] = {8, 16, 8}; + static int unroll_index = 0; + for (auto *L : InnerLoops) { + PostUnrollConv(F, L, unroll_counts[unroll_index], unroll_index); + unroll_index = (unroll_index + 1) % 3; + } +} + +// Helper function to process FIRD unroll type +void processFirdUnroll(Function &F, const SmallVector &InnerLoops) { + static int loop_index = 0; + for (auto &L : InnerLoops) { + if (loop_index == 0) { + loop_index++; + continue; + } + PostUnrollFird(F, L, loop_index); + loop_index++; + } +} + +static void addCommonOptimizationPasses(Function &F) { + // Create necessary analysis managers + LoopAnalysisManager LAM; + FunctionAnalysisManager FAM; + CGSCCAnalysisManager CGAM; + ModuleAnalysisManager MAM; + + // Create pass builder + PassBuilder PB; + + // Register analyses + PB.registerModuleAnalyses(MAM); + PB.registerCGSCCAnalyses(CGAM); + PB.registerFunctionAnalyses(FAM); + PB.registerLoopAnalyses(LAM); + PB.crossRegisterProxies(LAM, FAM, CGAM, MAM); + + // Create function-level optimization pipeline + FunctionPassManager FPM; + + if (currentUnrollType == UnrollType::CORR || + currentUnrollType == UnrollType::FIRD) + FPM.addPass(createFunctionToLoopPassAdaptor(LoopStrengthReducePass())); + FPM.addPass(EarlyCSEPass(true)); + FPM.addPass(ReassociatePass()); + + FPM.run(F, FAM); +} + +static void addLegacyCommonOptimizationPasses(Function &F) { + legacy::FunctionPassManager FPM(F.getParent()); + FPM.add(createLoopSimplifyPass()); + FPM.add(createLICMPass()); // Loop Invariant Code Motion + + // Add SimplifyCFG pass with common options + FPM.add(createCFGSimplificationPass( + SimplifyCFGOptions() + .bonusInstThreshold(1) // Set instruction bonus threshold + .forwardSwitchCondToPhi( + true) // Allow forwarding switch conditions to phi + .convertSwitchToLookupTable( + true) // Allow converting switch to lookup table + .needCanonicalLoops(false) // Don't require canonical loop form + .hoistCommonInsts(true) // Hoist common instructions + .sinkCommonInsts(true) // Sink common instructions + )); + + // Initialize and run passes + FPM.doInitialization(); + FPM.run(F); + FPM.doFinalization(); +} + +PreservedAnalyses +RISCVLoopUnrollAndRemainderPass::run(Function &F, FunctionAnalysisManager &AM) { + if (!EnableRISCVLoopUnrollAndRemainder || F.arg_empty()) + return PreservedAnalyses::all(); + + addnoalias(F); + auto &LI = AM.getResult(F); + if (LI.empty()) + return PreservedAnalyses::all(); + + // Retrieve necessary analysis results + auto &SE = AM.getResult(F); + auto &TTI = AM.getResult(F); + auto &DT = AM.getResult(F); + auto &AC = AM.getResult(F); + auto &ORE = AM.getResult(F); + + LoopAnalysisManager *LAM = nullptr; + if (auto *LAMProxy = AM.getCachedResult(F)) + LAM = &LAMProxy->getManager(); + + auto &MAMProxy = AM.getResult(F); + ProfileSummaryInfo *PSI = + MAMProxy.getCachedResult(*F.getParent()); + auto *BFI = (PSI && PSI->hasProfileSummary()) + ? &AM.getResult(F) + : nullptr; + + bool Changed = false; + + // Process loops in reverse order of LoopInfo + SmallPriorityWorklist Worklist; + appendLoopsToWorklist(LI, Worklist); + SmallVector InnerLoops; + + while (!Worklist.empty()) { + Loop &L = *Worklist.pop_back_val(); + if (L.getBlocks().empty()) { + LLVM_DEBUG(errs() << "Skipping empty loop\n"); + continue; + } + + std::string LoopName = std::string(L.getName()); + if (L.getName().contains(".clone")) + continue; + + if (L.isInnermost()) { + InnerLoops.push_back(&L); + } + + LoopUnrollResult Result = + tryToUnrollLoop(F, &L, DT, &LI, SE, TTI, AC, ORE, BFI, PSI); + Changed |= Result != LoopUnrollResult::Unmodified; + + // Clear cached analysis results if loop was fully unrolled + if (LAM && Result == LoopUnrollResult::FullyUnrolled) + LAM->clear(L, LoopName); + } + + // Post-processing for specific unroll types + if (currentUnrollType == UnrollType::CONV_CCORR) { + processConvUnroll(F, InnerLoops); + } else if (currentUnrollType == UnrollType::FIRD) { + processFirdUnroll(F, InnerLoops); + } + + // Run dead code elimination + runDeadCodeElimination(F); + if (currentUnrollType != UnrollType::FIR) + addCommonOptimizationPasses(F); + if (currentUnrollType == UnrollType::FIRD) { + addLegacyCommonOptimizationPasses(F); + } + // Verify function + if (verifyFunction(F, &errs())) { + LLVM_DEBUG(errs() << "Function verification failed\n"); + report_fatal_error("Function verification failed"); + } + + return Changed ? getLoopPassPreservedAnalyses() : PreservedAnalyses::all(); +} diff --git a/llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.h b/llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.h new file mode 100644 index 0000000000000..9e941cae210ad --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.h @@ -0,0 +1,42 @@ +//===- RISCVLoopUnrollAndRemainder.h - Loop Unrolling and Remainder Handling +//------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// RISCVLoopUnrollAndRemainder pass +// +// This pass performs loop unrolling and handles the remainder iterations. +// It aims to improve performance by: +// 1. Unrolling loops to reduce loop overhead and enable further optimizations +// 2. Generating efficient code for handling any remaining iterations +// +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_RISCVLOOPUNROLLANDREMAINDER_H +#define LLVM_TRANSFORMS_UTILS_RISCVLOOPUNROLLANDREMAINDER_H + +#include "llvm/Analysis/IVDescriptors.h" +#include "llvm/IR/PassManager.h" + +namespace llvm { +class RecurrenceDescriptor; +extern cl::opt EnableRISCVLoopUnrollAndRemainder; +class Function; + +struct RISCVLoopUnrollAndRemainderPass + : public PassInfoMixin { + RISCVLoopUnrollAndRemainderPass() {} + + PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); + + static bool isRequired() { return true; } +}; + +} // namespace llvm + +#endif // LLVM_TRANSFORMS_UTILS_RISCVLOOPUNROLLANDREMAINDER_H diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp index f92fd1a06be38..86c99e1f30af7 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -14,6 +14,7 @@ #include "MCTargetDesc/RISCVBaseInfo.h" #include "RISCV.h" #include "RISCVCustomLICM.h" +#include "RISCVLoopUnrollAndRemainder.h" #include "RISCVMachineFunctionInfo.h" #include "RISCVSplitLoopByLength.h" #include "RISCVTargetObjectFile.h" @@ -599,6 +600,10 @@ void RISCVTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { FPM.addPass(RISCVCustomLICMPass()); return true; } + if (Name == "riscv-loop-unroll-and-remainder") { + FPM.addPass(RISCVLoopUnrollAndRemainderPass()); + return true; + } return false; }); @@ -607,9 +612,11 @@ void RISCVTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) { if(EnableEsp32P4Optimize && (Level == OptimizationLevel::O3 || Level == OptimizationLevel::O2)){ EnableRISCVSplitLoopByLength = true; EnableRISCVCustomLICM = true; + EnableRISCVLoopUnrollAndRemainder = true; FunctionPassManager FPM; FPM.addPass(RISCVSplitLoopByLengthPass()); FPM.addPass(RISCVCustomLICMPass()); + FPM.addPass(RISCVLoopUnrollAndRemainderPass()); PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); } }); diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/add.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/add.ll index 3960501c6ff11..a608ae2933aec 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/add.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/add.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_add_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_add_f32_ansi( -; CHECK-SAME: ptr noundef readonly [[INPUT1:%.*]], ptr noundef readonly [[INPUT2:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias noundef readonly [[INPUT1:%.*]], ptr noalias noundef readonly [[INPUT2:%.*]], ptr noalias noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT1]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[INPUT2]], null @@ -12,19 +12,159 @@ define dso_local noundef i32 @dsps_add_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: br i1 [[OR_COND19]], label [[RETURN:%.*]], label [[IF_END:%.*]] ; CHECK: if.end: ; CHECK-NEXT: [[CMP41:%.*]] = icmp sgt i32 [[LEN]], 2 -; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_COND_PREHEADER_NEW:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP720:%.*]] = icmp sgt i32 [[LEN]], 0 ; CHECK-NEXT: br i1 [[CMP720]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.cond.preheader.new: +; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[LEN]], -16 +; CHECK-NEXT: [[CMP6_NOT207:%.*]] = icmp ult i32 [[LEN]], 16 +; CHECK-NEXT: br i1 [[CMP6_NOT207]], label [[FOR_COND_PREHEADER_NEW2:%.*]], label [[FOR_BODY_MODIFY:%.*]] +; CHECK: for.cond.preheader.new2: +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[TMP1:%.*]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[CMP85209:%.*]] = icmp slt i32 [[TMP0]], [[LEN]] +; CHECK-NEXT: br i1 [[CMP85209]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body.modify: +; CHECK-NEXT: [[I_021_MODIFY:%.*]] = phi i32 [ [[TMP1]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[TMP1]] = add nuw i32 [[I_021_MODIFY]], 16 +; CHECK-NEXT: [[ADD2:%.*]] = or disjoint i32 [[I_021_MODIFY]], 1 +; CHECK-NEXT: [[ADD7:%.*]] = or disjoint i32 [[I_021_MODIFY]], 2 +; CHECK-NEXT: [[ADD13:%.*]] = or disjoint i32 [[I_021_MODIFY]], 3 +; CHECK-NEXT: [[ADD18:%.*]] = or disjoint i32 [[I_021_MODIFY]], 4 +; CHECK-NEXT: [[ADD23:%.*]] = or disjoint i32 [[I_021_MODIFY]], 5 +; CHECK-NEXT: [[ADD28:%.*]] = or disjoint i32 [[I_021_MODIFY]], 6 +; CHECK-NEXT: [[ADD33:%.*]] = or disjoint i32 [[I_021_MODIFY]], 7 +; CHECK-NEXT: [[ADD38:%.*]] = or disjoint i32 [[I_021_MODIFY]], 8 +; CHECK-NEXT: [[ADD43:%.*]] = or disjoint i32 [[I_021_MODIFY]], 9 +; CHECK-NEXT: [[ADD48:%.*]] = or disjoint i32 [[I_021_MODIFY]], 10 +; CHECK-NEXT: [[ADD53:%.*]] = or disjoint i32 [[I_021_MODIFY]], 11 +; CHECK-NEXT: [[ADD58:%.*]] = or disjoint i32 [[I_021_MODIFY]], 12 +; CHECK-NEXT: [[ADD63:%.*]] = or disjoint i32 [[I_021_MODIFY]], 13 +; CHECK-NEXT: [[ADD68:%.*]] = or disjoint i32 [[I_021_MODIFY]], 14 +; CHECK-NEXT: [[ADD73:%.*]] = or disjoint i32 [[I_021_MODIFY]], 15 +; CHECK-NEXT: [[ARRAYIDX_MODIFY:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[I_021_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX9_MODIFY:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[I_021_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX11_MODIFY:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_021_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD2]] +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD2]] +; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD2]] +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD7]] +; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD7]] +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD7]] +; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD13]] +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD13]] +; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD13]] +; CHECK-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD23]] +; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD23]] +; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD23]] +; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD28]] +; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD28]] +; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD28]] +; CHECK-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD33]] +; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD33]] +; CHECK-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD33]] +; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX40:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX42:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD43]] +; CHECK-NEXT: [[ARRAYIDX45:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD43]] +; CHECK-NEXT: [[ARRAYIDX47:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD43]] +; CHECK-NEXT: [[ARRAYIDX49:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD48]] +; CHECK-NEXT: [[ARRAYIDX50:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD48]] +; CHECK-NEXT: [[ARRAYIDX52:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD48]] +; CHECK-NEXT: [[ARRAYIDX54:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD53]] +; CHECK-NEXT: [[ARRAYIDX55:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD53]] +; CHECK-NEXT: [[ARRAYIDX57:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD53]] +; CHECK-NEXT: [[ARRAYIDX59:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD58]] +; CHECK-NEXT: [[ARRAYIDX60:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD58]] +; CHECK-NEXT: [[ARRAYIDX62:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD58]] +; CHECK-NEXT: [[ARRAYIDX64:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD63]] +; CHECK-NEXT: [[ARRAYIDX65:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD63]] +; CHECK-NEXT: [[ARRAYIDX67:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD63]] +; CHECK-NEXT: [[ARRAYIDX69:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD68]] +; CHECK-NEXT: [[ARRAYIDX70:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD68]] +; CHECK-NEXT: [[ARRAYIDX72:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD68]] +; CHECK-NEXT: [[ARRAYIDX74:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD73]] +; CHECK-NEXT: [[ARRAYIDX75:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD73]] +; CHECK-NEXT: [[ARRAYIDX77:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD73]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_MODIFY]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_MODIFY]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX8]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX14]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX19]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX20]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX24]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX25]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX29]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX30]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX34]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX35]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX39]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX40]], align 4 +; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX44]], align 4 +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX45]], align 4 +; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX49]], align 4 +; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX50]], align 4 +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX54]], align 4 +; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX55]], align 4 +; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX59]], align 4 +; CHECK-NEXT: [[TMP27:%.*]] = load float, ptr [[ARRAYIDX60]], align 4 +; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[ARRAYIDX64]], align 4 +; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX65]], align 4 +; CHECK-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX69]], align 4 +; CHECK-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX70]], align 4 +; CHECK-NEXT: [[TMP32:%.*]] = load float, ptr [[ARRAYIDX74]], align 4 +; CHECK-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX75]], align 4 +; CHECK-NEXT: [[ADD_MODIFY:%.*]] = fadd float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[ADD5:%.*]] = fadd float [[TMP4]], [[TMP5]] +; CHECK-NEXT: [[ADD11:%.*]] = fadd float [[TMP6]], [[TMP7]] +; CHECK-NEXT: [[ADD16:%.*]] = fadd float [[TMP8]], [[TMP9]] +; CHECK-NEXT: [[ADD21:%.*]] = fadd float [[TMP10]], [[TMP11]] +; CHECK-NEXT: [[ADD26:%.*]] = fadd float [[TMP12]], [[TMP13]] +; CHECK-NEXT: [[ADD31:%.*]] = fadd float [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[ADD36:%.*]] = fadd float [[TMP16]], [[TMP17]] +; CHECK-NEXT: [[ADD41:%.*]] = fadd float [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[ADD46:%.*]] = fadd float [[TMP20]], [[TMP21]] +; CHECK-NEXT: [[ADD51:%.*]] = fadd float [[TMP22]], [[TMP23]] +; CHECK-NEXT: [[ADD56:%.*]] = fadd float [[TMP24]], [[TMP25]] +; CHECK-NEXT: [[ADD61:%.*]] = fadd float [[TMP26]], [[TMP27]] +; CHECK-NEXT: [[ADD66:%.*]] = fadd float [[TMP28]], [[TMP29]] +; CHECK-NEXT: [[ADD71:%.*]] = fadd float [[TMP30]], [[TMP31]] +; CHECK-NEXT: [[ADD76:%.*]] = fadd float [[TMP32]], [[TMP33]] +; CHECK-NEXT: store float [[ADD_MODIFY]], ptr [[ARRAYIDX11_MODIFY]], align 4 +; CHECK-NEXT: store float [[ADD5]], ptr [[ARRAYIDX6]], align 4 +; CHECK-NEXT: store float [[ADD11]], ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: store float [[ADD16]], ptr [[ARRAYIDX17]], align 4 +; CHECK-NEXT: store float [[ADD21]], ptr [[ARRAYIDX22]], align 4 +; CHECK-NEXT: store float [[ADD26]], ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: store float [[ADD31]], ptr [[ARRAYIDX32]], align 4 +; CHECK-NEXT: store float [[ADD36]], ptr [[ARRAYIDX37]], align 4 +; CHECK-NEXT: store float [[ADD41]], ptr [[ARRAYIDX42]], align 4 +; CHECK-NEXT: store float [[ADD46]], ptr [[ARRAYIDX47]], align 4 +; CHECK-NEXT: store float [[ADD51]], ptr [[ARRAYIDX52]], align 4 +; CHECK-NEXT: store float [[ADD56]], ptr [[ARRAYIDX57]], align 4 +; CHECK-NEXT: store float [[ADD61]], ptr [[ARRAYIDX62]], align 4 +; CHECK-NEXT: store float [[ADD66]], ptr [[ARRAYIDX67]], align 4 +; CHECK-NEXT: store float [[ADD71]], ptr [[ARRAYIDX72]], align 4 +; CHECK-NEXT: store float [[ADD76]], ptr [[ARRAYIDX77]], align 4 +; CHECK-NEXT: [[EXITCOND_NOT_MODIFY:%.*]] = icmp sgt i32 [[TMP1]], [[SUB]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_MODIFY]], label [[FOR_COND_PREHEADER_NEW2]], label [[FOR_BODY_MODIFY]] ; CHECK: for.body: -; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[TMP0]], [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_021]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] -; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP34:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[MUL8:%.*]] = mul nsw i32 [[I_021]], [[STEP2]] ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP34]], [[TMP35]] ; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[I_021]], [[STEP_OUT]] ; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10]] ; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX11]], align 4 @@ -35,11 +175,11 @@ define dso_local noundef i32 @dsps_add_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[I_021_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] ; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL_CLONE]] -; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 ; CHECK-NEXT: [[MUL8_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP2]] ; CHECK-NEXT: [[ARRAYIDX9_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8_CLONE]] -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 -; CHECK-NEXT: [[ADD_CLONE:%.*]] = fadd float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[TMP37:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 +; CHECK-NEXT: [[ADD_CLONE:%.*]] = fadd float [[TMP36]], [[TMP37]] ; CHECK-NEXT: [[MUL10_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP_OUT]] ; CHECK-NEXT: [[ARRAYIDX11_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10_CLONE]] ; CHECK-NEXT: store float [[ADD_CLONE]], ptr [[ARRAYIDX11_CLONE]], align 4 @@ -47,7 +187,7 @@ define dso_local noundef i32 @dsps_add_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/addc.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/addc.ll index dd35ce0373fc6..bf98ec71686bc 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/addc.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/addc.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_addc_f32_ansi(ptr noundef readonly %input, ptr noundef writeonly %output, i32 noundef %len, float noundef %C, i32 noundef %step_in, i32 noundef %step_out) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_addc_f32_ansi( -; CHECK-SAME: ptr noundef readonly [[INPUT:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], float noundef [[C:%.*]], i32 noundef [[STEP_IN:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias noundef readonly [[INPUT:%.*]], ptr noalias noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], float noundef [[C:%.*]], i32 noundef [[STEP_IN:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null @@ -10,16 +10,124 @@ define dso_local noundef i32 @dsps_addc_f32_ansi(ptr noundef readonly %input, pt ; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[IF_END:%.*]] ; CHECK: if.end: ; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[LEN]], 2 -; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_COND_PREHEADER_NEW:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP412:%.*]] = icmp sgt i32 [[LEN]], 0 ; CHECK-NEXT: br i1 [[CMP412]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.cond.preheader.new: +; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[LEN]], -16 +; CHECK-NEXT: [[CMP6_NOT207:%.*]] = icmp ult i32 [[LEN]], 16 +; CHECK-NEXT: br i1 [[CMP6_NOT207]], label [[FOR_COND_PREHEADER_NEW2:%.*]], label [[FOR_BODY_MODIFY:%.*]] +; CHECK: for.cond.preheader.new2: +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[TMP1:%.*]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[CMP85209:%.*]] = icmp slt i32 [[TMP0]], [[LEN]] +; CHECK-NEXT: br i1 [[CMP85209]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body.modify: +; CHECK-NEXT: [[I_013_MODIFY:%.*]] = phi i32 [ [[TMP1]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[TMP1]] = add nuw i32 [[I_013_MODIFY]], 16 +; CHECK-NEXT: [[ADD2:%.*]] = or disjoint i32 [[I_013_MODIFY]], 1 +; CHECK-NEXT: [[ADD6:%.*]] = or disjoint i32 [[I_013_MODIFY]], 2 +; CHECK-NEXT: [[ADD10:%.*]] = or disjoint i32 [[I_013_MODIFY]], 3 +; CHECK-NEXT: [[ADD14:%.*]] = or disjoint i32 [[I_013_MODIFY]], 4 +; CHECK-NEXT: [[ADD18:%.*]] = or disjoint i32 [[I_013_MODIFY]], 5 +; CHECK-NEXT: [[ADD22:%.*]] = or disjoint i32 [[I_013_MODIFY]], 6 +; CHECK-NEXT: [[ADD26:%.*]] = or disjoint i32 [[I_013_MODIFY]], 7 +; CHECK-NEXT: [[ADD30:%.*]] = or disjoint i32 [[I_013_MODIFY]], 8 +; CHECK-NEXT: [[ADD34:%.*]] = or disjoint i32 [[I_013_MODIFY]], 9 +; CHECK-NEXT: [[ADD38:%.*]] = or disjoint i32 [[I_013_MODIFY]], 10 +; CHECK-NEXT: [[ADD42:%.*]] = or disjoint i32 [[I_013_MODIFY]], 11 +; CHECK-NEXT: [[ADD46:%.*]] = or disjoint i32 [[I_013_MODIFY]], 12 +; CHECK-NEXT: [[ADD50:%.*]] = or disjoint i32 [[I_013_MODIFY]], 13 +; CHECK-NEXT: [[ADD54:%.*]] = or disjoint i32 [[I_013_MODIFY]], 14 +; CHECK-NEXT: [[ADD58:%.*]] = or disjoint i32 [[I_013_MODIFY]], 15 +; CHECK-NEXT: [[ARRAYIDX_MODIFY:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_013_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX6_MODIFY:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_013_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD2]] +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD2]] +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD6]] +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD6]] +; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD10]] +; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD10]] +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD14]] +; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD14]] +; CHECK-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD22]] +; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD22]] +; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD26]] +; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD26]] +; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD34]] +; CHECK-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD34]] +; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX43:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD42]] +; CHECK-NEXT: [[ARRAYIDX45:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD42]] +; CHECK-NEXT: [[ARRAYIDX47:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD46]] +; CHECK-NEXT: [[ARRAYIDX49:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD46]] +; CHECK-NEXT: [[ARRAYIDX51:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD50]] +; CHECK-NEXT: [[ARRAYIDX53:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD50]] +; CHECK-NEXT: [[ARRAYIDX55:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD54]] +; CHECK-NEXT: [[ARRAYIDX57:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD54]] +; CHECK-NEXT: [[ARRAYIDX59:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD58]] +; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD58]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_MODIFY]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX11]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX19]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX23]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX31]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX35]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX39]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX43]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX47]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX51]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX55]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX59]], align 4 +; CHECK-NEXT: [[ADD_MODIFY:%.*]] = fadd float [[C]], [[TMP2]] +; CHECK-NEXT: [[ADD4:%.*]] = fadd float [[C]], [[TMP3]] +; CHECK-NEXT: [[ADD8:%.*]] = fadd float [[C]], [[TMP4]] +; CHECK-NEXT: [[ADD12:%.*]] = fadd float [[C]], [[TMP5]] +; CHECK-NEXT: [[ADD16:%.*]] = fadd float [[C]], [[TMP6]] +; CHECK-NEXT: [[ADD20:%.*]] = fadd float [[C]], [[TMP7]] +; CHECK-NEXT: [[ADD24:%.*]] = fadd float [[C]], [[TMP8]] +; CHECK-NEXT: [[ADD28:%.*]] = fadd float [[C]], [[TMP9]] +; CHECK-NEXT: [[ADD32:%.*]] = fadd float [[C]], [[TMP10]] +; CHECK-NEXT: [[ADD36:%.*]] = fadd float [[C]], [[TMP11]] +; CHECK-NEXT: [[ADD40:%.*]] = fadd float [[C]], [[TMP12]] +; CHECK-NEXT: [[ADD44:%.*]] = fadd float [[C]], [[TMP13]] +; CHECK-NEXT: [[ADD48:%.*]] = fadd float [[C]], [[TMP14]] +; CHECK-NEXT: [[ADD52:%.*]] = fadd float [[C]], [[TMP15]] +; CHECK-NEXT: [[ADD56:%.*]] = fadd float [[C]], [[TMP16]] +; CHECK-NEXT: [[ADD60:%.*]] = fadd float [[C]], [[TMP17]] +; CHECK-NEXT: store float [[ADD_MODIFY]], ptr [[ARRAYIDX6_MODIFY]], align 4 +; CHECK-NEXT: store float [[ADD4]], ptr [[ARRAYIDX5]], align 4 +; CHECK-NEXT: store float [[ADD8]], ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: store float [[ADD12]], ptr [[ARRAYIDX13]], align 4 +; CHECK-NEXT: store float [[ADD16]], ptr [[ARRAYIDX17]], align 4 +; CHECK-NEXT: store float [[ADD20]], ptr [[ARRAYIDX21]], align 4 +; CHECK-NEXT: store float [[ADD24]], ptr [[ARRAYIDX25]], align 4 +; CHECK-NEXT: store float [[ADD28]], ptr [[ARRAYIDX29]], align 4 +; CHECK-NEXT: store float [[ADD32]], ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: store float [[ADD36]], ptr [[ARRAYIDX37]], align 4 +; CHECK-NEXT: store float [[ADD40]], ptr [[ARRAYIDX41]], align 4 +; CHECK-NEXT: store float [[ADD44]], ptr [[ARRAYIDX45]], align 4 +; CHECK-NEXT: store float [[ADD48]], ptr [[ARRAYIDX49]], align 4 +; CHECK-NEXT: store float [[ADD52]], ptr [[ARRAYIDX53]], align 4 +; CHECK-NEXT: store float [[ADD56]], ptr [[ARRAYIDX57]], align 4 +; CHECK-NEXT: store float [[ADD60]], ptr [[ARRAYIDX61]], align 4 +; CHECK-NEXT: [[EXITCOND_NOT_MODIFY:%.*]] = icmp sgt i32 [[TMP1]], [[SUB]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_MODIFY]], label [[FOR_COND_PREHEADER_NEW2]], label [[FOR_BODY_MODIFY]] ; CHECK: for.body: -; CHECK-NEXT: [[I_013:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[I_013:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[TMP0]], [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_013]], [[STEP_IN]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL]] -; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[C]] +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = fadd float [[C]], [[TMP18]] ; CHECK-NEXT: [[MUL5:%.*]] = mul nsw i32 [[I_013]], [[STEP_OUT]] ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL5]] ; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX6]], align 4 @@ -30,8 +138,8 @@ define dso_local noundef i32 @dsps_addc_f32_ansi(ptr noundef readonly %input, pt ; CHECK-NEXT: [[I_013_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] ; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_013_CLONE]], [[STEP_IN]] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL_CLONE]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 -; CHECK-NEXT: [[ADD_CLONE:%.*]] = fadd float [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[ADD_CLONE:%.*]] = fadd float [[C]], [[TMP19]] ; CHECK-NEXT: [[MUL5_CLONE:%.*]] = mul nsw i32 [[I_013_CLONE]], [[STEP_OUT]] ; CHECK-NEXT: [[ARRAYIDX6_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL5_CLONE]] ; CHECK-NEXT: store float [[ADD_CLONE]], ptr [[ARRAYIDX6_CLONE]], align 4 @@ -39,7 +147,7 @@ define dso_local noundef i32 @dsps_addc_f32_ansi(ptr noundef readonly %input, pt ; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/ccorr.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/ccorr.ll index 11c9c556d526e..0432a51dfbb38 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/ccorr.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/ccorr.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_ccorr_f32_ansi(ptr noundef readonly %Signal, i32 noundef %siglen, ptr noundef readonly %Kernel, i32 noundef %kernlen, ptr noundef writeonly %corrvout) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_ccorr_f32_ansi( -; CHECK-SAME: ptr noundef readonly [[SIGNAL:%.*]], i32 noundef [[SIGLEN:%.*]], ptr noundef readonly [[KERNEL:%.*]], i32 noundef [[KERNLEN:%.*]], ptr noundef writeonly [[CORRVOUT:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias noundef readonly [[SIGNAL:%.*]], i32 noundef [[SIGLEN:%.*]], ptr noalias noundef readonly [[KERNEL:%.*]], i32 noundef [[KERNLEN:%.*]], ptr noalias noundef writeonly [[CORRVOUT:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[SIGNAL]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[KERNEL]], null @@ -21,36 +21,131 @@ define dso_local noundef i32 @dsps_ccorr_f32_ansi(ptr noundef readonly %Signal, ; CHECK-NEXT: [[KERN_0:%.*]] = phi ptr [ [[SIGNAL]], [[IF_THEN8]] ], [ [[KERNEL]], [[IF_END6]] ] ; CHECK-NEXT: [[SIG_0:%.*]] = phi ptr [ [[KERNEL]], [[IF_THEN8]] ], [ [[SIGNAL]], [[IF_END6]] ] ; CHECK-NEXT: [[CMP10124:%.*]] = icmp sgt i32 [[LKERN_0]], 0 -; CHECK-NEXT: br i1 [[CMP10124]], label [[FOR_BODY:%.*]], label [[FOR_COND22_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[CMP10124]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND22_PREHEADER:%.*]] +; CHECK: for.body.preheader: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond22.preheader.loopexit: +; CHECK-NEXT: br label [[FOR_COND22_PREHEADER]] ; CHECK: for.cond22.preheader: ; CHECK-NEXT: [[CMP23128:%.*]] = icmp slt i32 [[LKERN_0]], [[LSIG_0]] -; CHECK-NEXT: br i1 [[CMP23128]], label [[FOR_BODY25:%.*]], label [[FOR_COND45_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[CMP23128]], label [[FOR_BODY25_PREHEADER:%.*]], label [[FOR_COND45_PREHEADER:%.*]] +; CHECK: for.body25.preheader: +; CHECK-NEXT: [[DIV536:%.*]] = and i32 [[LKERN_0]], -16 +; CHECK-NEXT: br label [[FOR_BODY25:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_END:%.*]] ], [ 1, [[IF_END9]] ] -; CHECK-NEXT: [[N_0125:%.*]] = phi i32 [ [[INC19:%.*]], [[FOR_END]] ], [ 0, [[IF_END9]] ] -; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[N_0125]], -1 -; CHECK-NEXT: [[SUB11:%.*]] = add nsw i32 [[LKERN_0]], [[TMP0]] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_END:%.*]] ], [ 1, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[N_0125:%.*]] = phi i32 [ [[INC19:%.*]], [[FOR_END]] ], [ 0, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[TMP0:%.*]] = and i32 [[N_0125]], -8 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[N_0125]], 2147483640 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[N_0125]], -1 +; CHECK-NEXT: [[SUB11:%.*]] = add nsw i32 [[TMP3]], [[LKERN_0]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[CORRVOUT]], i32 [[N_0125]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: br label [[FOR_BODY14:%.*]] -; CHECK: for.body14: -; CHECK-NEXT: [[K_0123:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[INC:%.*]], [[FOR_BODY14]] ] -; CHECK-NEXT: [[TMP1:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP4:%.*]], [[FOR_BODY14]] ] +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY14_CLONE_PREHEADER:%.*]], label [[FOR_BODY14_7:%.*]] +; CHECK: for.body14.7: +; CHECK-NEXT: [[K_0123:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[INC_7:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[DOTPHI:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP20:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[DOTPHI1:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP21:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[DOTPHI2:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP22:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[DOTPHI3:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP23:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[DOTPHI4:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP24:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[DOTPHI5:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP25:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[DOTPHI6:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP26:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[DOTPHI7:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP27:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[K_0123]], [[SUB11]] +; CHECK-NEXT: [[INC:%.*]] = add nuw nsw i32 [[K_0123]], 1 +; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[INC]], [[SUB11]] +; CHECK-NEXT: [[INC_1:%.*]] = add nuw nsw i32 [[K_0123]], 2 +; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[INC_1]], [[SUB11]] +; CHECK-NEXT: [[INC_2:%.*]] = add nuw nsw i32 [[K_0123]], 3 +; CHECK-NEXT: [[ADD_3:%.*]] = add i32 [[INC_2]], [[SUB11]] +; CHECK-NEXT: [[INC_3:%.*]] = add nuw nsw i32 [[K_0123]], 4 +; CHECK-NEXT: [[ADD_4:%.*]] = add i32 [[INC_3]], [[SUB11]] +; CHECK-NEXT: [[INC_4:%.*]] = add nuw nsw i32 [[K_0123]], 5 +; CHECK-NEXT: [[ADD_5:%.*]] = add i32 [[INC_4]], [[SUB11]] +; CHECK-NEXT: [[INC_5:%.*]] = add nuw nsw i32 [[K_0123]], 6 +; CHECK-NEXT: [[ADD_6:%.*]] = add i32 [[INC_5]], [[SUB11]] +; CHECK-NEXT: [[INC_6:%.*]] = add nuw nsw i32 [[K_0123]], 7 +; CHECK-NEXT: [[ADD_7:%.*]] = add i32 [[INC_6]], [[SUB11]] +; CHECK-NEXT: [[INC_7]] = add nuw nsw i32 [[K_0123]], 8 ; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K_0123]] -; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB11]], [[K_0123]] ; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[ADD]] -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX16]], align 4 -; CHECK-NEXT: [[TMP4]] = tail call float @llvm.fmuladd.f32(float [[TMP2]], float [[TMP3]], float [[TMP1]]) -; CHECK-NEXT: store float [[TMP4]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[K_0123]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[INDVARS_IV]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY14]] +; CHECK-NEXT: [[ARRAYIDX15_1:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC]] +; CHECK-NEXT: [[ARRAYIDX16_1:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[ADD_1]] +; CHECK-NEXT: [[ARRAYIDX15_2:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_1]] +; CHECK-NEXT: [[ARRAYIDX16_2:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[ADD_2]] +; CHECK-NEXT: [[ARRAYIDX15_3:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_2]] +; CHECK-NEXT: [[ARRAYIDX16_3:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[ADD_3]] +; CHECK-NEXT: [[ARRAYIDX15_4:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_3]] +; CHECK-NEXT: [[ARRAYIDX16_4:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[ADD_4]] +; CHECK-NEXT: [[ARRAYIDX15_5:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_4]] +; CHECK-NEXT: [[ARRAYIDX16_5:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[ADD_5]] +; CHECK-NEXT: [[ARRAYIDX15_6:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_5]] +; CHECK-NEXT: [[ARRAYIDX16_6:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[ADD_6]] +; CHECK-NEXT: [[ARRAYIDX15_7:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_6]] +; CHECK-NEXT: [[ARRAYIDX16_7:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[ADD_7]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX16]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX15_1]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX16_1]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX15_2]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX16_2]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX15_3]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX16_3]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX15_4]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX16_4]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX15_5]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX16_5]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX15_6]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX16_6]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX15_7]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX16_7]], align 4 +; CHECK-NEXT: [[TMP20]] = tail call float @llvm.fmuladd.f32(float [[TMP4]], float [[TMP5]], float [[DOTPHI]]) +; CHECK-NEXT: [[TMP21]] = tail call float @llvm.fmuladd.f32(float [[TMP6]], float [[TMP7]], float [[DOTPHI1]]) +; CHECK-NEXT: [[TMP22]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP9]], float [[DOTPHI2]]) +; CHECK-NEXT: [[TMP23]] = tail call float @llvm.fmuladd.f32(float [[TMP10]], float [[TMP11]], float [[DOTPHI3]]) +; CHECK-NEXT: [[TMP24]] = tail call float @llvm.fmuladd.f32(float [[TMP12]], float [[TMP13]], float [[DOTPHI4]]) +; CHECK-NEXT: [[TMP25]] = tail call float @llvm.fmuladd.f32(float [[TMP14]], float [[TMP15]], float [[DOTPHI5]]) +; CHECK-NEXT: [[TMP26]] = tail call float @llvm.fmuladd.f32(float [[TMP16]], float [[TMP17]], float [[DOTPHI6]]) +; CHECK-NEXT: [[TMP27]] = tail call float @llvm.fmuladd.f32(float [[TMP18]], float [[TMP19]], float [[DOTPHI7]]) +; CHECK-NEXT: [[EXITCOND_7:%.*]] = icmp ult i32 [[INC_7]], [[TMP1]] +; CHECK-NEXT: br i1 [[EXITCOND_7]], label [[FOR_BODY14_7]], label [[FOR_END8:%.*]] +; CHECK: for.end8: +; CHECK-NEXT: [[SUM:%.*]] = fadd float [[TMP20]], [[TMP21]] +; CHECK-NEXT: [[SUM23:%.*]] = fadd float [[TMP22]], [[TMP23]] +; CHECK-NEXT: [[SUM24:%.*]] = fadd float [[TMP24]], [[TMP25]] +; CHECK-NEXT: [[SUM25:%.*]] = fadd float [[TMP26]], [[TMP27]] +; CHECK-NEXT: [[SUM26:%.*]] = fadd float [[SUM]], [[SUM23]] +; CHECK-NEXT: [[SUM27:%.*]] = fadd float [[SUM24]], [[SUM25]] +; CHECK-NEXT: [[SUM28:%.*]] = fadd float [[SUM26]], [[SUM27]] +; CHECK-NEXT: store float [[SUM28]], ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: br i1 false, label [[FOR_END]], label [[FOR_BODY14_CLONE_PREHEADER]] +; CHECK: for.body14.clone.preheader: +; CHECK-NEXT: [[SUM_PHI:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[SUM28]], [[FOR_END8]] ] +; CHECK-NEXT: [[ADD_PHI:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[TMP0]], [[FOR_END8]] ] +; CHECK-NEXT: br label [[FOR_BODY14_CLONE:%.*]] +; CHECK: for.body14.clone: +; CHECK-NEXT: [[K_0123_CLONE:%.*]] = phi i32 [ [[ADD_PHI]], [[FOR_BODY14_CLONE_PREHEADER]] ], [ [[INC_CLONE:%.*]], [[FOR_BODY14_CLONE]] ] +; CHECK-NEXT: [[TMP28:%.*]] = phi float [ [[SUM_PHI]], [[FOR_BODY14_CLONE_PREHEADER]] ], [ [[TMP31:%.*]], [[FOR_BODY14_CLONE]] ] +; CHECK-NEXT: [[ARRAYIDX15_CLONE:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K_0123_CLONE]] +; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX15_CLONE]], align 4 +; CHECK-NEXT: [[ADD_CLONE:%.*]] = add i32 [[K_0123_CLONE]], [[SUB11]] +; CHECK-NEXT: [[ARRAYIDX16_CLONE:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[ADD_CLONE]] +; CHECK-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX16_CLONE]], align 4 +; CHECK-NEXT: [[TMP31]] = tail call float @llvm.fmuladd.f32(float [[TMP29]], float [[TMP30]], float [[TMP28]]) +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[K_0123_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[INDVARS_IV]] +; CHECK-NEXT: br i1 [[EXITCOND_CLONE]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]], label [[FOR_BODY14_CLONE]] +; CHECK: for.cond.for.end_crit_edge: +; CHECK-NEXT: store float [[TMP31]], ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: br label [[FOR_END]] ; CHECK: for.end: ; CHECK-NEXT: [[INC19]] = add nuw nsw i32 [[N_0125]], 1 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw i32 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND134_NOT:%.*]] = icmp eq i32 [[INC19]], [[LKERN_0]] -; CHECK-NEXT: br i1 [[EXITCOND134_NOT]], label [[FOR_COND22_PREHEADER]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND134_NOT]], label [[FOR_COND22_PREHEADER_LOOPEXIT:%.*]], label [[FOR_BODY]] +; CHECK: for.cond45.preheader.loopexit: +; CHECK-NEXT: br label [[FOR_COND45_PREHEADER]] ; CHECK: for.cond45.preheader: ; CHECK-NEXT: [[ADD46:%.*]] = add i32 [[SIGLEN]], -1 ; CHECK-NEXT: [[SUB47:%.*]] = add i32 [[ADD46]], [[KERNLEN]] @@ -60,57 +155,308 @@ define dso_local noundef i32 @dsps_ccorr_f32_ansi(ptr noundef readonly %Signal, ; CHECK-NEXT: [[SUB57:%.*]] = add nsw i32 [[LSIG_0]], -1 ; CHECK-NEXT: br label [[FOR_BODY50:%.*]] ; CHECK: for.body25: -; CHECK-NEXT: [[N21_0129:%.*]] = phi i32 [ [[INC42:%.*]], [[FOR_END40:%.*]] ], [ [[LKERN_0]], [[FOR_COND22_PREHEADER]] ] +; CHECK-NEXT: [[N21_0129:%.*]] = phi i32 [ [[INC42:%.*]], [[FOR_END40:%.*]] ], [ [[LKERN_0]], [[FOR_BODY25_PREHEADER]] ] ; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, ptr [[CORRVOUT]], i32 [[N21_0129]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX28]], align 4 ; CHECK-NEXT: [[SUB29:%.*]] = sub nuw nsw i32 [[N21_0129]], [[LKERN_0]] ; CHECK-NEXT: [[ADD30:%.*]] = add nsw i32 [[SUB29]], 1 -; CHECK-NEXT: [[CMP32_NOT126:%.*]] = icmp ugt i32 [[ADD30]], [[N21_0129]] -; CHECK-NEXT: br i1 [[CMP32_NOT126]], label [[FOR_END40]], label [[FOR_BODY33:%.*]] -; CHECK: for.body33: -; CHECK-NEXT: [[TMP5:%.*]] = phi float [ [[TMP8:%.*]], [[FOR_BODY33]] ], [ 0.000000e+00, [[FOR_BODY25]] ] -; CHECK-NEXT: [[K27_0127:%.*]] = phi i32 [ [[INC39:%.*]], [[FOR_BODY33]] ], [ [[ADD30]], [[FOR_BODY25]] ] -; CHECK-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K27_0127]] -; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX34]], align 4 +; CHECK-NEXT: [[ADD60:%.*]] = add i32 [[ADD30]], [[DIV536]] +; CHECK-NEXT: [[CMP32_NOT126:%.*]] = icmp ult i32 [[ADD30]], [[ADD60]] +; CHECK-NEXT: br i1 [[CMP32_NOT126]], label [[FOR_BODY33_PREHEADER:%.*]], label [[FOR_END164:%.*]] +; CHECK: for.body33.preheader: +; CHECK-NEXT: br label [[FOR_BODY33_15:%.*]] +; CHECK: for.body33.15: +; CHECK-NEXT: [[K27_0127:%.*]] = phi i32 [ [[ADD30]], [[FOR_BODY33_PREHEADER]] ], [ [[INC39_15:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI9:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP64:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI10:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP65:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI11:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP66:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI12:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP67:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI13:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP68:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI14:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP69:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI15:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP70:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI16:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP71:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI17:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP72:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI18:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP73:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI19:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP74:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI20:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP75:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI21:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP76:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI22:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP77:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI23:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP78:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[DOTPHI24:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY33_PREHEADER]] ], [ [[TMP79:%.*]], [[FOR_BODY33_15]] ] +; CHECK-NEXT: [[INC39:%.*]] = add i32 [[K27_0127]], 1 +; CHECK-NEXT: [[INC39_1:%.*]] = add i32 [[K27_0127]], 2 +; CHECK-NEXT: [[INC39_2:%.*]] = add i32 [[K27_0127]], 3 +; CHECK-NEXT: [[INC39_3:%.*]] = add i32 [[K27_0127]], 4 +; CHECK-NEXT: [[INC39_4:%.*]] = add i32 [[K27_0127]], 5 +; CHECK-NEXT: [[INC39_5:%.*]] = add i32 [[K27_0127]], 6 +; CHECK-NEXT: [[INC39_6:%.*]] = add i32 [[K27_0127]], 7 +; CHECK-NEXT: [[INC39_7:%.*]] = add i32 [[K27_0127]], 8 +; CHECK-NEXT: [[INC39_8:%.*]] = add i32 [[K27_0127]], 9 +; CHECK-NEXT: [[INC39_9:%.*]] = add i32 [[K27_0127]], 10 +; CHECK-NEXT: [[INC39_10:%.*]] = add i32 [[K27_0127]], 11 +; CHECK-NEXT: [[INC39_11:%.*]] = add i32 [[K27_0127]], 12 +; CHECK-NEXT: [[INC39_12:%.*]] = add i32 [[K27_0127]], 13 +; CHECK-NEXT: [[INC39_13:%.*]] = add i32 [[K27_0127]], 14 +; CHECK-NEXT: [[INC39_14:%.*]] = add i32 [[K27_0127]], 15 +; CHECK-NEXT: [[INC39_15]] = add i32 [[K27_0127]], 16 ; CHECK-NEXT: [[SUB35:%.*]] = sub i32 [[K27_0127]], [[ADD30]] +; CHECK-NEXT: [[SUB35_1:%.*]] = sub i32 [[INC39]], [[ADD30]] +; CHECK-NEXT: [[SUB35_2:%.*]] = sub i32 [[INC39_1]], [[ADD30]] +; CHECK-NEXT: [[SUB35_3:%.*]] = sub i32 [[INC39_2]], [[ADD30]] +; CHECK-NEXT: [[SUB35_4:%.*]] = sub i32 [[INC39_3]], [[ADD30]] +; CHECK-NEXT: [[SUB35_5:%.*]] = sub i32 [[INC39_4]], [[ADD30]] +; CHECK-NEXT: [[SUB35_6:%.*]] = sub i32 [[INC39_5]], [[ADD30]] +; CHECK-NEXT: [[SUB35_7:%.*]] = sub i32 [[INC39_6]], [[ADD30]] +; CHECK-NEXT: [[SUB35_8:%.*]] = sub i32 [[INC39_7]], [[ADD30]] +; CHECK-NEXT: [[SUB35_9:%.*]] = sub i32 [[INC39_8]], [[ADD30]] +; CHECK-NEXT: [[SUB35_10:%.*]] = sub i32 [[INC39_9]], [[ADD30]] +; CHECK-NEXT: [[SUB35_11:%.*]] = sub i32 [[INC39_10]], [[ADD30]] +; CHECK-NEXT: [[SUB35_12:%.*]] = sub i32 [[INC39_11]], [[ADD30]] +; CHECK-NEXT: [[SUB35_13:%.*]] = sub i32 [[INC39_12]], [[ADD30]] +; CHECK-NEXT: [[SUB35_14:%.*]] = sub i32 [[INC39_13]], [[ADD30]] +; CHECK-NEXT: [[SUB35_15:%.*]] = sub i32 [[INC39_14]], [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K27_0127]] ; CHECK-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX36]], align 4 -; CHECK-NEXT: [[TMP8]] = tail call float @llvm.fmuladd.f32(float [[TMP6]], float [[TMP7]], float [[TMP5]]) -; CHECK-NEXT: store float [[TMP8]], ptr [[ARRAYIDX28]], align 4 -; CHECK-NEXT: [[INC39]] = add i32 [[K27_0127]], 1 -; CHECK-NEXT: [[CMP32_NOT:%.*]] = icmp ugt i32 [[INC39]], [[N21_0129]] -; CHECK-NEXT: br i1 [[CMP32_NOT]], label [[FOR_END40]], label [[FOR_BODY33]] +; CHECK-NEXT: [[ARRAYIDX34_1:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39]] +; CHECK-NEXT: [[ARRAYIDX36_1:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_1]] +; CHECK-NEXT: [[ARRAYIDX34_2:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_1]] +; CHECK-NEXT: [[ARRAYIDX36_2:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_2]] +; CHECK-NEXT: [[ARRAYIDX34_3:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_2]] +; CHECK-NEXT: [[ARRAYIDX36_3:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_3]] +; CHECK-NEXT: [[ARRAYIDX34_4:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_3]] +; CHECK-NEXT: [[ARRAYIDX36_4:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_4]] +; CHECK-NEXT: [[ARRAYIDX34_5:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_4]] +; CHECK-NEXT: [[ARRAYIDX36_5:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_5]] +; CHECK-NEXT: [[ARRAYIDX34_6:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_5]] +; CHECK-NEXT: [[ARRAYIDX36_6:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_6]] +; CHECK-NEXT: [[ARRAYIDX34_7:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_6]] +; CHECK-NEXT: [[ARRAYIDX36_7:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_7]] +; CHECK-NEXT: [[ARRAYIDX34_8:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_7]] +; CHECK-NEXT: [[ARRAYIDX36_8:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_8]] +; CHECK-NEXT: [[ARRAYIDX34_9:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_8]] +; CHECK-NEXT: [[ARRAYIDX36_9:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_9]] +; CHECK-NEXT: [[ARRAYIDX34_10:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_9]] +; CHECK-NEXT: [[ARRAYIDX36_10:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_10]] +; CHECK-NEXT: [[ARRAYIDX34_11:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_10]] +; CHECK-NEXT: [[ARRAYIDX36_11:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_11]] +; CHECK-NEXT: [[ARRAYIDX34_12:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_11]] +; CHECK-NEXT: [[ARRAYIDX36_12:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_12]] +; CHECK-NEXT: [[ARRAYIDX34_13:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_12]] +; CHECK-NEXT: [[ARRAYIDX36_13:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_13]] +; CHECK-NEXT: [[ARRAYIDX34_14:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_13]] +; CHECK-NEXT: [[ARRAYIDX36_14:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_14]] +; CHECK-NEXT: [[ARRAYIDX34_15:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC39_14]] +; CHECK-NEXT: [[ARRAYIDX36_15:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_15]] +; CHECK-NEXT: [[TMP32:%.*]] = load float, ptr [[ARRAYIDX34]], align 4 +; CHECK-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX36]], align 4 +; CHECK-NEXT: [[TMP34:%.*]] = load float, ptr [[ARRAYIDX34_1]], align 4 +; CHECK-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX36_1]], align 4 +; CHECK-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX34_2]], align 4 +; CHECK-NEXT: [[TMP37:%.*]] = load float, ptr [[ARRAYIDX36_2]], align 4 +; CHECK-NEXT: [[TMP38:%.*]] = load float, ptr [[ARRAYIDX34_3]], align 4 +; CHECK-NEXT: [[TMP39:%.*]] = load float, ptr [[ARRAYIDX36_3]], align 4 +; CHECK-NEXT: [[TMP40:%.*]] = load float, ptr [[ARRAYIDX34_4]], align 4 +; CHECK-NEXT: [[TMP41:%.*]] = load float, ptr [[ARRAYIDX36_4]], align 4 +; CHECK-NEXT: [[TMP42:%.*]] = load float, ptr [[ARRAYIDX34_5]], align 4 +; CHECK-NEXT: [[TMP43:%.*]] = load float, ptr [[ARRAYIDX36_5]], align 4 +; CHECK-NEXT: [[TMP44:%.*]] = load float, ptr [[ARRAYIDX34_6]], align 4 +; CHECK-NEXT: [[TMP45:%.*]] = load float, ptr [[ARRAYIDX36_6]], align 4 +; CHECK-NEXT: [[TMP46:%.*]] = load float, ptr [[ARRAYIDX34_7]], align 4 +; CHECK-NEXT: [[TMP47:%.*]] = load float, ptr [[ARRAYIDX36_7]], align 4 +; CHECK-NEXT: [[TMP48:%.*]] = load float, ptr [[ARRAYIDX34_8]], align 4 +; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[ARRAYIDX36_8]], align 4 +; CHECK-NEXT: [[TMP50:%.*]] = load float, ptr [[ARRAYIDX34_9]], align 4 +; CHECK-NEXT: [[TMP51:%.*]] = load float, ptr [[ARRAYIDX36_9]], align 4 +; CHECK-NEXT: [[TMP52:%.*]] = load float, ptr [[ARRAYIDX34_10]], align 4 +; CHECK-NEXT: [[TMP53:%.*]] = load float, ptr [[ARRAYIDX36_10]], align 4 +; CHECK-NEXT: [[TMP54:%.*]] = load float, ptr [[ARRAYIDX34_11]], align 4 +; CHECK-NEXT: [[TMP55:%.*]] = load float, ptr [[ARRAYIDX36_11]], align 4 +; CHECK-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX34_12]], align 4 +; CHECK-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX36_12]], align 4 +; CHECK-NEXT: [[TMP58:%.*]] = load float, ptr [[ARRAYIDX34_13]], align 4 +; CHECK-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX36_13]], align 4 +; CHECK-NEXT: [[TMP60:%.*]] = load float, ptr [[ARRAYIDX34_14]], align 4 +; CHECK-NEXT: [[TMP61:%.*]] = load float, ptr [[ARRAYIDX36_14]], align 4 +; CHECK-NEXT: [[TMP62:%.*]] = load float, ptr [[ARRAYIDX34_15]], align 4 +; CHECK-NEXT: [[TMP63:%.*]] = load float, ptr [[ARRAYIDX36_15]], align 4 +; CHECK-NEXT: [[TMP64]] = tail call float @llvm.fmuladd.f32(float [[TMP32]], float [[TMP33]], float [[DOTPHI9]]) +; CHECK-NEXT: [[TMP65]] = tail call float @llvm.fmuladd.f32(float [[TMP34]], float [[TMP35]], float [[DOTPHI10]]) +; CHECK-NEXT: [[TMP66]] = tail call float @llvm.fmuladd.f32(float [[TMP36]], float [[TMP37]], float [[DOTPHI11]]) +; CHECK-NEXT: [[TMP67]] = tail call float @llvm.fmuladd.f32(float [[TMP38]], float [[TMP39]], float [[DOTPHI12]]) +; CHECK-NEXT: [[TMP68]] = tail call float @llvm.fmuladd.f32(float [[TMP40]], float [[TMP41]], float [[DOTPHI13]]) +; CHECK-NEXT: [[TMP69]] = tail call float @llvm.fmuladd.f32(float [[TMP42]], float [[TMP43]], float [[DOTPHI14]]) +; CHECK-NEXT: [[TMP70]] = tail call float @llvm.fmuladd.f32(float [[TMP44]], float [[TMP45]], float [[DOTPHI15]]) +; CHECK-NEXT: [[TMP71]] = tail call float @llvm.fmuladd.f32(float [[TMP46]], float [[TMP47]], float [[DOTPHI16]]) +; CHECK-NEXT: [[TMP72]] = tail call float @llvm.fmuladd.f32(float [[TMP48]], float [[TMP49]], float [[DOTPHI17]]) +; CHECK-NEXT: [[TMP73]] = tail call float @llvm.fmuladd.f32(float [[TMP50]], float [[TMP51]], float [[DOTPHI18]]) +; CHECK-NEXT: [[TMP74]] = tail call float @llvm.fmuladd.f32(float [[TMP52]], float [[TMP53]], float [[DOTPHI19]]) +; CHECK-NEXT: [[TMP75]] = tail call float @llvm.fmuladd.f32(float [[TMP54]], float [[TMP55]], float [[DOTPHI20]]) +; CHECK-NEXT: [[TMP76]] = tail call float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP57]], float [[DOTPHI21]]) +; CHECK-NEXT: [[TMP77]] = tail call float @llvm.fmuladd.f32(float [[TMP58]], float [[TMP59]], float [[DOTPHI22]]) +; CHECK-NEXT: [[TMP78]] = tail call float @llvm.fmuladd.f32(float [[TMP60]], float [[TMP61]], float [[DOTPHI23]]) +; CHECK-NEXT: [[TMP79]] = tail call float @llvm.fmuladd.f32(float [[TMP62]], float [[TMP63]], float [[DOTPHI24]]) +; CHECK-NEXT: [[CMP32_NOT_15:%.*]] = icmp ult i32 [[INC39_15]], [[ADD60]] +; CHECK-NEXT: br i1 [[CMP32_NOT_15]], label [[FOR_BODY33_15]], label [[FOR_END40_LOOPEXIT:%.*]] +; CHECK: for.end40.loopexit: +; CHECK-NEXT: [[SUM45:%.*]] = fadd float [[TMP64]], [[TMP65]] +; CHECK-NEXT: [[SUM46:%.*]] = fadd float [[TMP66]], [[TMP67]] +; CHECK-NEXT: [[SUM47:%.*]] = fadd float [[TMP68]], [[TMP69]] +; CHECK-NEXT: [[SUM48:%.*]] = fadd float [[TMP70]], [[TMP71]] +; CHECK-NEXT: [[SUM49:%.*]] = fadd float [[TMP72]], [[TMP73]] +; CHECK-NEXT: [[SUM50:%.*]] = fadd float [[TMP74]], [[TMP75]] +; CHECK-NEXT: [[SUM51:%.*]] = fadd float [[TMP76]], [[TMP77]] +; CHECK-NEXT: [[SUM52:%.*]] = fadd float [[TMP78]], [[TMP79]] +; CHECK-NEXT: [[SUM53:%.*]] = fadd float [[SUM45]], [[SUM46]] +; CHECK-NEXT: [[SUM54:%.*]] = fadd float [[SUM47]], [[SUM48]] +; CHECK-NEXT: [[SUM55:%.*]] = fadd float [[SUM49]], [[SUM50]] +; CHECK-NEXT: [[SUM56:%.*]] = fadd float [[SUM51]], [[SUM52]] +; CHECK-NEXT: [[SUM57:%.*]] = fadd float [[SUM53]], [[SUM54]] +; CHECK-NEXT: [[SUM58:%.*]] = fadd float [[SUM55]], [[SUM56]] +; CHECK-NEXT: [[SUM59:%.*]] = fadd float [[SUM57]], [[SUM58]] +; CHECK-NEXT: br label [[FOR_END164]] +; CHECK: for.end164: +; CHECK-NEXT: [[PHI_SUM:%.*]] = phi i32 [ [[ADD30]], [[FOR_BODY25]] ], [ [[INC39_15]], [[FOR_END40_LOOPEXIT]] ] +; CHECK-NEXT: [[PHI_FLOAT:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY25]] ], [ [[SUM59]], [[FOR_END40_LOOPEXIT]] ] +; CHECK-NEXT: store float [[PHI_FLOAT]], ptr [[ARRAYIDX28]], align 4 +; CHECK-NEXT: [[CMP182_NOT587:%.*]] = icmp ugt i32 [[PHI_SUM]], [[N21_0129]] +; CHECK-NEXT: br i1 [[CMP182_NOT587]], label [[FOR_END40]], label [[FOR_BODY33_CLONE:%.*]] +; CHECK: for.body33.clone: +; CHECK-NEXT: [[TMP80:%.*]] = phi float [ [[TMP83:%.*]], [[FOR_BODY33_CLONE]] ], [ [[PHI_FLOAT]], [[FOR_END164]] ] +; CHECK-NEXT: [[K27_0127_CLONE:%.*]] = phi i32 [ [[INC39_CLONE:%.*]], [[FOR_BODY33_CLONE]] ], [ [[PHI_SUM]], [[FOR_END164]] ] +; CHECK-NEXT: [[ARRAYIDX34_CLONE:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K27_0127_CLONE]] +; CHECK-NEXT: [[TMP81:%.*]] = load float, ptr [[ARRAYIDX34_CLONE]], align 4 +; CHECK-NEXT: [[SUB35_CLONE:%.*]] = sub i32 [[K27_0127_CLONE]], [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX36_CLONE:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB35_CLONE]] +; CHECK-NEXT: [[TMP82:%.*]] = load float, ptr [[ARRAYIDX36_CLONE]], align 4 +; CHECK-NEXT: [[TMP83]] = tail call float @llvm.fmuladd.f32(float [[TMP81]], float [[TMP82]], float [[TMP80]]) +; CHECK-NEXT: [[INC39_CLONE]] = add i32 [[K27_0127_CLONE]], 1 +; CHECK-NEXT: [[CMP32_NOT_CLONE:%.*]] = icmp ugt i32 [[INC39_CLONE]], [[N21_0129]] +; CHECK-NEXT: br i1 [[CMP32_NOT_CLONE]], label [[FOR_COND_FOR_END_CRIT_EDGE25:%.*]], label [[FOR_BODY33_CLONE]] +; CHECK: for.cond.for.end_crit_edge25: +; CHECK-NEXT: store float [[TMP83]], ptr [[ARRAYIDX28]], align 4 +; CHECK-NEXT: br label [[FOR_END40]] ; CHECK: for.end40: ; CHECK-NEXT: [[INC42]] = add nuw nsw i32 [[N21_0129]], 1 ; CHECK-NEXT: [[EXITCOND135_NOT:%.*]] = icmp eq i32 [[INC42]], [[LSIG_0]] -; CHECK-NEXT: br i1 [[EXITCOND135_NOT]], label [[FOR_COND45_PREHEADER]], label [[FOR_BODY25]] +; CHECK-NEXT: br i1 [[EXITCOND135_NOT]], label [[FOR_COND45_PREHEADER_LOOPEXIT:%.*]], label [[FOR_BODY25]] ; CHECK: for.body50: ; CHECK-NEXT: [[N44_0133:%.*]] = phi i32 [ [[LSIG_0]], [[FOR_BODY50_LR_PH]] ], [ [[INC69:%.*]], [[FOR_END67:%.*]] ] ; CHECK-NEXT: [[ARRAYIDX54:%.*]] = getelementptr inbounds float, ptr [[CORRVOUT]], i32 [[N44_0133]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX54]], align 4 ; CHECK-NEXT: [[SUB55:%.*]] = sub nsw i32 [[N44_0133]], [[LKERN_0]] ; CHECK-NEXT: [[ADD56:%.*]] = add nsw i32 [[SUB55]], 1 -; CHECK-NEXT: [[CMP59_NOT130:%.*]] = icmp ugt i32 [[ADD56]], [[SUB57]] -; CHECK-NEXT: br i1 [[CMP59_NOT130]], label [[FOR_END67]], label [[FOR_BODY60:%.*]] -; CHECK: for.body60: -; CHECK-NEXT: [[TMP9:%.*]] = phi float [ [[TMP12:%.*]], [[FOR_BODY60]] ], [ 0.000000e+00, [[FOR_BODY50]] ] -; CHECK-NEXT: [[K53_0131:%.*]] = phi i32 [ [[INC66:%.*]], [[FOR_BODY60]] ], [ [[ADD56]], [[FOR_BODY50]] ] -; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K53_0131]] -; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX61]], align 4 +; CHECK-NEXT: [[ADD207_NEG:%.*]] = xor i32 [[SUB55]], -1 +; CHECK-NEXT: [[ADD211:%.*]] = add i32 [[ADD207_NEG]], [[LSIG_0]] +; CHECK-NEXT: [[DIV212535:%.*]] = and i32 [[ADD211]], -8 +; CHECK-NEXT: [[ADD214:%.*]] = add i32 [[DIV212535]], [[ADD56]] +; CHECK-NEXT: [[CMP59_NOT130:%.*]] = icmp ult i32 [[ADD56]], [[ADD214]] +; CHECK-NEXT: br i1 [[CMP59_NOT130]], label [[FOR_BODY60_PREHEADER:%.*]], label [[FOR_END16434:%.*]] +; CHECK: for.body60.preheader: +; CHECK-NEXT: br label [[FOR_BODY60_7:%.*]] +; CHECK: for.body60.7: +; CHECK-NEXT: [[K53_0131:%.*]] = phi i32 [ [[ADD56]], [[FOR_BODY60_PREHEADER]] ], [ [[INC66_7:%.*]], [[FOR_BODY60_7]] ] +; CHECK-NEXT: [[DOTPHI26:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY60_PREHEADER]] ], [ [[TMP100:%.*]], [[FOR_BODY60_7]] ] +; CHECK-NEXT: [[DOTPHI27:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY60_PREHEADER]] ], [ [[TMP101:%.*]], [[FOR_BODY60_7]] ] +; CHECK-NEXT: [[DOTPHI28:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY60_PREHEADER]] ], [ [[TMP102:%.*]], [[FOR_BODY60_7]] ] +; CHECK-NEXT: [[DOTPHI29:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY60_PREHEADER]] ], [ [[TMP103:%.*]], [[FOR_BODY60_7]] ] +; CHECK-NEXT: [[DOTPHI30:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY60_PREHEADER]] ], [ [[TMP104:%.*]], [[FOR_BODY60_7]] ] +; CHECK-NEXT: [[DOTPHI31:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY60_PREHEADER]] ], [ [[TMP105:%.*]], [[FOR_BODY60_7]] ] +; CHECK-NEXT: [[DOTPHI32:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY60_PREHEADER]] ], [ [[TMP106:%.*]], [[FOR_BODY60_7]] ] +; CHECK-NEXT: [[DOTPHI33:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY60_PREHEADER]] ], [ [[TMP107:%.*]], [[FOR_BODY60_7]] ] +; CHECK-NEXT: [[INC66:%.*]] = add i32 [[K53_0131]], 1 +; CHECK-NEXT: [[INC66_1:%.*]] = add i32 [[K53_0131]], 2 +; CHECK-NEXT: [[INC66_2:%.*]] = add i32 [[K53_0131]], 3 +; CHECK-NEXT: [[INC66_3:%.*]] = add i32 [[K53_0131]], 4 +; CHECK-NEXT: [[INC66_4:%.*]] = add i32 [[K53_0131]], 5 +; CHECK-NEXT: [[INC66_5:%.*]] = add i32 [[K53_0131]], 6 +; CHECK-NEXT: [[INC66_6:%.*]] = add i32 [[K53_0131]], 7 +; CHECK-NEXT: [[INC66_7]] = add i32 [[K53_0131]], 8 ; CHECK-NEXT: [[SUB62:%.*]] = sub i32 [[K53_0131]], [[ADD56]] +; CHECK-NEXT: [[SUB62_1:%.*]] = sub i32 [[INC66]], [[ADD56]] +; CHECK-NEXT: [[SUB62_2:%.*]] = sub i32 [[INC66_1]], [[ADD56]] +; CHECK-NEXT: [[SUB62_3:%.*]] = sub i32 [[INC66_2]], [[ADD56]] +; CHECK-NEXT: [[SUB62_4:%.*]] = sub i32 [[INC66_3]], [[ADD56]] +; CHECK-NEXT: [[SUB62_5:%.*]] = sub i32 [[INC66_4]], [[ADD56]] +; CHECK-NEXT: [[SUB62_6:%.*]] = sub i32 [[INC66_5]], [[ADD56]] +; CHECK-NEXT: [[SUB62_7:%.*]] = sub i32 [[INC66_6]], [[ADD56]] +; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K53_0131]] ; CHECK-NEXT: [[ARRAYIDX63:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB62]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX63]], align 4 -; CHECK-NEXT: [[TMP12]] = tail call float @llvm.fmuladd.f32(float [[TMP10]], float [[TMP11]], float [[TMP9]]) -; CHECK-NEXT: store float [[TMP12]], ptr [[ARRAYIDX54]], align 4 -; CHECK-NEXT: [[INC66]] = add i32 [[K53_0131]], 1 -; CHECK-NEXT: [[CMP59_NOT:%.*]] = icmp ugt i32 [[INC66]], [[SUB57]] -; CHECK-NEXT: br i1 [[CMP59_NOT]], label [[FOR_END67]], label [[FOR_BODY60]] +; CHECK-NEXT: [[ARRAYIDX61_1:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC66]] +; CHECK-NEXT: [[ARRAYIDX63_1:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB62_1]] +; CHECK-NEXT: [[ARRAYIDX61_2:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC66_1]] +; CHECK-NEXT: [[ARRAYIDX63_2:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB62_2]] +; CHECK-NEXT: [[ARRAYIDX61_3:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC66_2]] +; CHECK-NEXT: [[ARRAYIDX63_3:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB62_3]] +; CHECK-NEXT: [[ARRAYIDX61_4:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC66_3]] +; CHECK-NEXT: [[ARRAYIDX63_4:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB62_4]] +; CHECK-NEXT: [[ARRAYIDX61_5:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC66_4]] +; CHECK-NEXT: [[ARRAYIDX63_5:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB62_5]] +; CHECK-NEXT: [[ARRAYIDX61_6:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC66_5]] +; CHECK-NEXT: [[ARRAYIDX63_6:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB62_6]] +; CHECK-NEXT: [[ARRAYIDX61_7:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC66_6]] +; CHECK-NEXT: [[ARRAYIDX63_7:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB62_7]] +; CHECK-NEXT: [[TMP84:%.*]] = load float, ptr [[ARRAYIDX61]], align 4 +; CHECK-NEXT: [[TMP85:%.*]] = load float, ptr [[ARRAYIDX63]], align 4 +; CHECK-NEXT: [[TMP86:%.*]] = load float, ptr [[ARRAYIDX61_1]], align 4 +; CHECK-NEXT: [[TMP87:%.*]] = load float, ptr [[ARRAYIDX63_1]], align 4 +; CHECK-NEXT: [[TMP88:%.*]] = load float, ptr [[ARRAYIDX61_2]], align 4 +; CHECK-NEXT: [[TMP89:%.*]] = load float, ptr [[ARRAYIDX63_2]], align 4 +; CHECK-NEXT: [[TMP90:%.*]] = load float, ptr [[ARRAYIDX61_3]], align 4 +; CHECK-NEXT: [[TMP91:%.*]] = load float, ptr [[ARRAYIDX63_3]], align 4 +; CHECK-NEXT: [[TMP92:%.*]] = load float, ptr [[ARRAYIDX61_4]], align 4 +; CHECK-NEXT: [[TMP93:%.*]] = load float, ptr [[ARRAYIDX63_4]], align 4 +; CHECK-NEXT: [[TMP94:%.*]] = load float, ptr [[ARRAYIDX61_5]], align 4 +; CHECK-NEXT: [[TMP95:%.*]] = load float, ptr [[ARRAYIDX63_5]], align 4 +; CHECK-NEXT: [[TMP96:%.*]] = load float, ptr [[ARRAYIDX61_6]], align 4 +; CHECK-NEXT: [[TMP97:%.*]] = load float, ptr [[ARRAYIDX63_6]], align 4 +; CHECK-NEXT: [[TMP98:%.*]] = load float, ptr [[ARRAYIDX61_7]], align 4 +; CHECK-NEXT: [[TMP99:%.*]] = load float, ptr [[ARRAYIDX63_7]], align 4 +; CHECK-NEXT: [[TMP100]] = tail call float @llvm.fmuladd.f32(float [[TMP84]], float [[TMP85]], float [[DOTPHI26]]) +; CHECK-NEXT: [[TMP101]] = tail call float @llvm.fmuladd.f32(float [[TMP86]], float [[TMP87]], float [[DOTPHI27]]) +; CHECK-NEXT: [[TMP102]] = tail call float @llvm.fmuladd.f32(float [[TMP88]], float [[TMP89]], float [[DOTPHI28]]) +; CHECK-NEXT: [[TMP103]] = tail call float @llvm.fmuladd.f32(float [[TMP90]], float [[TMP91]], float [[DOTPHI29]]) +; CHECK-NEXT: [[TMP104]] = tail call float @llvm.fmuladd.f32(float [[TMP92]], float [[TMP93]], float [[DOTPHI30]]) +; CHECK-NEXT: [[TMP105]] = tail call float @llvm.fmuladd.f32(float [[TMP94]], float [[TMP95]], float [[DOTPHI31]]) +; CHECK-NEXT: [[TMP106]] = tail call float @llvm.fmuladd.f32(float [[TMP96]], float [[TMP97]], float [[DOTPHI32]]) +; CHECK-NEXT: [[TMP107]] = tail call float @llvm.fmuladd.f32(float [[TMP98]], float [[TMP99]], float [[DOTPHI33]]) +; CHECK-NEXT: [[CMP59_NOT_7:%.*]] = icmp ult i32 [[INC66_7]], [[ADD214]] +; CHECK-NEXT: br i1 [[CMP59_NOT_7]], label [[FOR_BODY60_7]], label [[FOR_END67_LOOPEXIT:%.*]] +; CHECK: for.end67.loopexit: +; CHECK-NEXT: [[SUM60:%.*]] = fadd float [[TMP100]], [[TMP101]] +; CHECK-NEXT: [[SUM61:%.*]] = fadd float [[TMP102]], [[TMP103]] +; CHECK-NEXT: [[SUM62:%.*]] = fadd float [[TMP104]], [[TMP105]] +; CHECK-NEXT: [[SUM63:%.*]] = fadd float [[TMP106]], [[TMP107]] +; CHECK-NEXT: [[SUM64:%.*]] = fadd float [[SUM60]], [[SUM61]] +; CHECK-NEXT: [[SUM65:%.*]] = fadd float [[SUM62]], [[SUM63]] +; CHECK-NEXT: [[SUM66:%.*]] = fadd float [[SUM64]], [[SUM65]] +; CHECK-NEXT: br label [[FOR_END16434]] +; CHECK: for.end16434: +; CHECK-NEXT: [[PHI_SUM35:%.*]] = phi i32 [ [[ADD56]], [[FOR_BODY50]] ], [ [[INC66_7]], [[FOR_END67_LOOPEXIT]] ] +; CHECK-NEXT: [[PHI_FLOAT36:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY50]] ], [ [[SUM66]], [[FOR_END67_LOOPEXIT]] ] +; CHECK-NEXT: store float [[PHI_FLOAT36]], ptr [[ARRAYIDX54]], align 4 +; CHECK-NEXT: [[CMP182_NOT58737:%.*]] = icmp ugt i32 [[PHI_SUM35]], [[SUB57]] +; CHECK-NEXT: br i1 [[CMP182_NOT58737]], label [[FOR_END67]], label [[FOR_BODY60_CLONE:%.*]] +; CHECK: for.body60.clone: +; CHECK-NEXT: [[TMP108:%.*]] = phi float [ [[TMP111:%.*]], [[FOR_BODY60_CLONE]] ], [ [[PHI_FLOAT36]], [[FOR_END16434]] ] +; CHECK-NEXT: [[K53_0131_CLONE:%.*]] = phi i32 [ [[INC66_CLONE:%.*]], [[FOR_BODY60_CLONE]] ], [ [[PHI_SUM35]], [[FOR_END16434]] ] +; CHECK-NEXT: [[ARRAYIDX61_CLONE:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K53_0131_CLONE]] +; CHECK-NEXT: [[TMP109:%.*]] = load float, ptr [[ARRAYIDX61_CLONE]], align 4 +; CHECK-NEXT: [[SUB62_CLONE:%.*]] = sub i32 [[K53_0131_CLONE]], [[ADD56]] +; CHECK-NEXT: [[ARRAYIDX63_CLONE:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB62_CLONE]] +; CHECK-NEXT: [[TMP110:%.*]] = load float, ptr [[ARRAYIDX63_CLONE]], align 4 +; CHECK-NEXT: [[TMP111]] = tail call float @llvm.fmuladd.f32(float [[TMP109]], float [[TMP110]], float [[TMP108]]) +; CHECK-NEXT: [[INC66_CLONE]] = add i32 [[K53_0131_CLONE]], 1 +; CHECK-NEXT: [[CMP59_NOT_CLONE:%.*]] = icmp ugt i32 [[INC66_CLONE]], [[SUB57]] +; CHECK-NEXT: br i1 [[CMP59_NOT_CLONE]], label [[FOR_COND_FOR_END_CRIT_EDGE38:%.*]], label [[FOR_BODY60_CLONE]] +; CHECK: for.cond.for.end_crit_edge38: +; CHECK-NEXT: store float [[TMP111]], ptr [[ARRAYIDX54]], align 4 +; CHECK-NEXT: br label [[FOR_END67]] ; CHECK: for.end67: ; CHECK-NEXT: [[INC69]] = add nsw i32 [[N44_0133]], 1 ; CHECK-NEXT: [[EXITCOND136_NOT:%.*]] = icmp eq i32 [[INC69]], [[SUB47]] -; CHECK-NEXT: br i1 [[EXITCOND136_NOT]], label [[RETURN]], label [[FOR_BODY50]] +; CHECK-NEXT: br i1 [[EXITCOND136_NOT]], label [[RETURN_LOOPEXIT:%.*]], label [[FOR_BODY50]] +; CHECK: return.loopexit: +; CHECK-NEXT: br label [[RETURN]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND45_PREHEADER]] ], [ 0, [[FOR_END67]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND45_PREHEADER]] ], [ 0, [[RETURN_LOOPEXIT]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/conv.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/conv.ll index 33a08dfbf9df1..86f9a33488455 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/conv.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/conv.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_conv_f32_ansi(ptr noundef readonly %Signal, i32 noundef %siglen, ptr noundef readonly %Kernel, i32 noundef %kernlen, ptr noundef writeonly %convout) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_conv_f32_ansi( -; CHECK-SAME: ptr noundef readonly [[SIGNAL:%.*]], i32 noundef [[SIGLEN:%.*]], ptr noundef readonly [[KERNEL:%.*]], i32 noundef [[KERNLEN:%.*]], ptr noundef writeonly [[CONVOUT:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias noundef readonly [[SIGNAL:%.*]], i32 noundef [[SIGLEN:%.*]], ptr noalias noundef readonly [[KERNEL:%.*]], i32 noundef [[KERNLEN:%.*]], ptr noalias noundef writeonly [[CONVOUT:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[SIGNAL]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[KERNEL]], null @@ -21,34 +21,129 @@ define dso_local noundef i32 @dsps_conv_f32_ansi(ptr noundef readonly %Signal, i ; CHECK-NEXT: [[KERN_0:%.*]] = phi ptr [ [[SIGNAL]], [[IF_THEN8]] ], [ [[KERNEL]], [[IF_END6]] ] ; CHECK-NEXT: [[SIG_0:%.*]] = phi ptr [ [[KERNEL]], [[IF_THEN8]] ], [ [[SIGNAL]], [[IF_END6]] ] ; CHECK-NEXT: [[CMP10120:%.*]] = icmp sgt i32 [[LKERN_0]], 0 -; CHECK-NEXT: br i1 [[CMP10120]], label [[FOR_BODY:%.*]], label [[FOR_COND21_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[CMP10120]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND21_PREHEADER:%.*]] +; CHECK: for.body.preheader: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond21.preheader.loopexit: +; CHECK-NEXT: br label [[FOR_COND21_PREHEADER]] ; CHECK: for.cond21.preheader: ; CHECK-NEXT: [[CMP22125:%.*]] = icmp slt i32 [[LKERN_0]], [[LSIG_0]] -; CHECK-NEXT: br i1 [[CMP22125]], label [[FOR_BODY24:%.*]], label [[FOR_COND42_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[CMP22125]], label [[FOR_BODY24_PREHEADER:%.*]], label [[FOR_COND42_PREHEADER:%.*]] +; CHECK: for.body24.preheader: +; CHECK-NEXT: [[DIV536:%.*]] = and i32 [[LKERN_0]], -16 +; CHECK-NEXT: br label [[FOR_BODY24:%.*]] ; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_END:%.*]] ], [ 1, [[IF_END9]] ] -; CHECK-NEXT: [[N_0121:%.*]] = phi i32 [ [[INC18:%.*]], [[FOR_END]] ], [ 0, [[IF_END9]] ] +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_END:%.*]] ], [ 1, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[N_0121:%.*]] = phi i32 [ [[INC18:%.*]], [[FOR_END]] ], [ 0, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[TMP0:%.*]] = and i32 [[N_0121]], -8 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[N_0121]], 2147483640 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[CONVOUT]], i32 [[N_0121]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: br label [[FOR_BODY13:%.*]] -; CHECK: for.body13: -; CHECK-NEXT: [[K_0119:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[INC:%.*]], [[FOR_BODY13]] ] -; CHECK-NEXT: [[TMP0:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP3:%.*]], [[FOR_BODY13]] ] -; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K_0119]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX14]], align 4 +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY13_CLONE_PREHEADER:%.*]], label [[FOR_BODY13_7:%.*]] +; CHECK: for.body13.7: +; CHECK-NEXT: [[K_0119:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[INC_7:%.*]], [[FOR_BODY13_7]] ] +; CHECK-NEXT: [[DOTPHI:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP19:%.*]], [[FOR_BODY13_7]] ] +; CHECK-NEXT: [[DOTPHI1:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP20:%.*]], [[FOR_BODY13_7]] ] +; CHECK-NEXT: [[DOTPHI2:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP21:%.*]], [[FOR_BODY13_7]] ] +; CHECK-NEXT: [[DOTPHI3:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP22:%.*]], [[FOR_BODY13_7]] ] +; CHECK-NEXT: [[DOTPHI4:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP23:%.*]], [[FOR_BODY13_7]] ] +; CHECK-NEXT: [[DOTPHI5:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP24:%.*]], [[FOR_BODY13_7]] ] +; CHECK-NEXT: [[DOTPHI6:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP25:%.*]], [[FOR_BODY13_7]] ] +; CHECK-NEXT: [[DOTPHI7:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP26:%.*]], [[FOR_BODY13_7]] ] +; CHECK-NEXT: [[INC:%.*]] = add nuw nsw i32 [[K_0119]], 1 +; CHECK-NEXT: [[INC_1:%.*]] = add nuw nsw i32 [[K_0119]], 2 +; CHECK-NEXT: [[INC_2:%.*]] = add nuw nsw i32 [[K_0119]], 3 +; CHECK-NEXT: [[INC_3:%.*]] = add nuw nsw i32 [[K_0119]], 4 +; CHECK-NEXT: [[INC_4:%.*]] = add nuw nsw i32 [[K_0119]], 5 +; CHECK-NEXT: [[INC_5:%.*]] = add nuw nsw i32 [[K_0119]], 6 +; CHECK-NEXT: [[INC_6:%.*]] = add nuw nsw i32 [[K_0119]], 7 +; CHECK-NEXT: [[INC_7]] = add nuw nsw i32 [[K_0119]], 8 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[N_0121]], [[K_0119]] +; CHECK-NEXT: [[SUB_1:%.*]] = sub nsw i32 [[N_0121]], [[INC]] +; CHECK-NEXT: [[SUB_2:%.*]] = sub nsw i32 [[N_0121]], [[INC_1]] +; CHECK-NEXT: [[SUB_3:%.*]] = sub nsw i32 [[N_0121]], [[INC_2]] +; CHECK-NEXT: [[SUB_4:%.*]] = sub nsw i32 [[N_0121]], [[INC_3]] +; CHECK-NEXT: [[SUB_5:%.*]] = sub nsw i32 [[N_0121]], [[INC_4]] +; CHECK-NEXT: [[SUB_6:%.*]] = sub nsw i32 [[N_0121]], [[INC_5]] +; CHECK-NEXT: [[SUB_7:%.*]] = sub nsw i32 [[N_0121]], [[INC_6]] +; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K_0119]] ; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB]] -; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 -; CHECK-NEXT: [[TMP3]] = tail call float @llvm.fmuladd.f32(float [[TMP1]], float [[TMP2]], float [[TMP0]]) -; CHECK-NEXT: store float [[TMP3]], ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[K_0119]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[INDVARS_IV]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY13]] +; CHECK-NEXT: [[ARRAYIDX14_1:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC]] +; CHECK-NEXT: [[ARRAYIDX15_1:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB_1]] +; CHECK-NEXT: [[ARRAYIDX14_2:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_1]] +; CHECK-NEXT: [[ARRAYIDX15_2:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB_2]] +; CHECK-NEXT: [[ARRAYIDX14_3:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_2]] +; CHECK-NEXT: [[ARRAYIDX15_3:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB_3]] +; CHECK-NEXT: [[ARRAYIDX14_4:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_3]] +; CHECK-NEXT: [[ARRAYIDX15_4:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB_4]] +; CHECK-NEXT: [[ARRAYIDX14_5:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_4]] +; CHECK-NEXT: [[ARRAYIDX15_5:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB_5]] +; CHECK-NEXT: [[ARRAYIDX14_6:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_5]] +; CHECK-NEXT: [[ARRAYIDX15_6:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB_6]] +; CHECK-NEXT: [[ARRAYIDX14_7:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[INC_6]] +; CHECK-NEXT: [[ARRAYIDX15_7:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB_7]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX14]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX14_1]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX15_1]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX14_2]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX15_2]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX14_3]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX15_3]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX14_4]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX15_4]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX14_5]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX15_5]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX14_6]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX15_6]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX14_7]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX15_7]], align 4 +; CHECK-NEXT: [[TMP19]] = tail call float @llvm.fmuladd.f32(float [[TMP3]], float [[TMP4]], float [[DOTPHI]]) +; CHECK-NEXT: [[TMP20]] = tail call float @llvm.fmuladd.f32(float [[TMP5]], float [[TMP6]], float [[DOTPHI1]]) +; CHECK-NEXT: [[TMP21]] = tail call float @llvm.fmuladd.f32(float [[TMP7]], float [[TMP8]], float [[DOTPHI2]]) +; CHECK-NEXT: [[TMP22]] = tail call float @llvm.fmuladd.f32(float [[TMP9]], float [[TMP10]], float [[DOTPHI3]]) +; CHECK-NEXT: [[TMP23]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[DOTPHI4]]) +; CHECK-NEXT: [[TMP24]] = tail call float @llvm.fmuladd.f32(float [[TMP13]], float [[TMP14]], float [[DOTPHI5]]) +; CHECK-NEXT: [[TMP25]] = tail call float @llvm.fmuladd.f32(float [[TMP15]], float [[TMP16]], float [[DOTPHI6]]) +; CHECK-NEXT: [[TMP26]] = tail call float @llvm.fmuladd.f32(float [[TMP17]], float [[TMP18]], float [[DOTPHI7]]) +; CHECK-NEXT: [[EXITCOND_7:%.*]] = icmp ult i32 [[INC_7]], [[TMP1]] +; CHECK-NEXT: br i1 [[EXITCOND_7]], label [[FOR_BODY13_7]], label [[FOR_END8:%.*]] +; CHECK: for.end8: +; CHECK-NEXT: [[SUM:%.*]] = fadd float [[TMP19]], [[TMP20]] +; CHECK-NEXT: [[SUM23:%.*]] = fadd float [[TMP21]], [[TMP22]] +; CHECK-NEXT: [[SUM24:%.*]] = fadd float [[TMP23]], [[TMP24]] +; CHECK-NEXT: [[SUM25:%.*]] = fadd float [[TMP25]], [[TMP26]] +; CHECK-NEXT: [[SUM26:%.*]] = fadd float [[SUM]], [[SUM23]] +; CHECK-NEXT: [[SUM27:%.*]] = fadd float [[SUM24]], [[SUM25]] +; CHECK-NEXT: [[SUM28:%.*]] = fadd float [[SUM26]], [[SUM27]] +; CHECK-NEXT: store float [[SUM28]], ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: br i1 false, label [[FOR_END]], label [[FOR_BODY13_CLONE_PREHEADER]] +; CHECK: for.body13.clone.preheader: +; CHECK-NEXT: [[SUM_PHI:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[SUM28]], [[FOR_END8]] ] +; CHECK-NEXT: [[ADD_PHI:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[TMP0]], [[FOR_END8]] ] +; CHECK-NEXT: br label [[FOR_BODY13_CLONE:%.*]] +; CHECK: for.body13.clone: +; CHECK-NEXT: [[K_0119_CLONE:%.*]] = phi i32 [ [[ADD_PHI]], [[FOR_BODY13_CLONE_PREHEADER]] ], [ [[INC_CLONE:%.*]], [[FOR_BODY13_CLONE]] ] +; CHECK-NEXT: [[TMP27:%.*]] = phi float [ [[SUM_PHI]], [[FOR_BODY13_CLONE_PREHEADER]] ], [ [[TMP30:%.*]], [[FOR_BODY13_CLONE]] ] +; CHECK-NEXT: [[ARRAYIDX14_CLONE:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K_0119_CLONE]] +; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[ARRAYIDX14_CLONE]], align 4 +; CHECK-NEXT: [[SUB_CLONE:%.*]] = sub nsw i32 [[N_0121]], [[K_0119_CLONE]] +; CHECK-NEXT: [[ARRAYIDX15_CLONE:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB_CLONE]] +; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX15_CLONE]], align 4 +; CHECK-NEXT: [[TMP30]] = tail call float @llvm.fmuladd.f32(float [[TMP28]], float [[TMP29]], float [[TMP27]]) +; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[K_0119_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[INDVARS_IV]] +; CHECK-NEXT: br i1 [[EXITCOND_CLONE]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]], label [[FOR_BODY13_CLONE]] +; CHECK: for.cond.for.end_crit_edge: +; CHECK-NEXT: store float [[TMP30]], ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: br label [[FOR_END]] ; CHECK: for.end: ; CHECK-NEXT: [[INC18]] = add nuw nsw i32 [[N_0121]], 1 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw i32 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND132_NOT:%.*]] = icmp eq i32 [[INC18]], [[LKERN_0]] -; CHECK-NEXT: br i1 [[EXITCOND132_NOT]], label [[FOR_COND21_PREHEADER]], label [[FOR_BODY]] +; CHECK-NEXT: br i1 [[EXITCOND132_NOT]], label [[FOR_COND21_PREHEADER_LOOPEXIT:%.*]], label [[FOR_BODY]] +; CHECK: for.cond42.preheader.loopexit: +; CHECK-NEXT: br label [[FOR_COND42_PREHEADER]] ; CHECK: for.cond42.preheader: ; CHECK-NEXT: [[ADD43:%.*]] = add i32 [[SIGLEN]], -1 ; CHECK-NEXT: [[SUB44:%.*]] = add i32 [[ADD43]], [[KERNLEN]] @@ -58,57 +153,308 @@ define dso_local noundef i32 @dsps_conv_f32_ansi(ptr noundef readonly %Signal, i ; CHECK-NEXT: [[SUB54:%.*]] = add nsw i32 [[LSIG_0]], -1 ; CHECK-NEXT: br label [[FOR_BODY47:%.*]] ; CHECK: for.body24: -; CHECK-NEXT: [[N20_0126:%.*]] = phi i32 [ [[INC39:%.*]], [[FOR_END37:%.*]] ], [ [[LKERN_0]], [[FOR_COND21_PREHEADER]] ] +; CHECK-NEXT: [[N20_0126:%.*]] = phi i32 [ [[INC39:%.*]], [[FOR_END37:%.*]] ], [ [[LKERN_0]], [[FOR_BODY24_PREHEADER]] ] ; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[CONVOUT]], i32 [[N20_0126]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX26]], align 4 ; CHECK-NEXT: [[SUB27:%.*]] = sub nuw nsw i32 [[N20_0126]], [[LKERN_0]] ; CHECK-NEXT: [[K25_0122:%.*]] = add i32 [[SUB27]], 1 -; CHECK-NEXT: [[CMP29_NOT123:%.*]] = icmp ugt i32 [[K25_0122]], [[N20_0126]] -; CHECK-NEXT: br i1 [[CMP29_NOT123]], label [[FOR_END37]], label [[FOR_BODY30:%.*]] -; CHECK: for.body30: -; CHECK-NEXT: [[TMP4:%.*]] = phi float [ [[TMP7:%.*]], [[FOR_BODY30]] ], [ 0.000000e+00, [[FOR_BODY24]] ] -; CHECK-NEXT: [[K25_0124:%.*]] = phi i32 [ [[K25_0:%.*]], [[FOR_BODY30]] ], [ [[K25_0122]], [[FOR_BODY24]] ] -; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0124]] -; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX31]], align 4 +; CHECK-NEXT: [[ADD60:%.*]] = add i32 [[K25_0122]], [[DIV536]] +; CHECK-NEXT: [[CMP29_NOT123:%.*]] = icmp ult i32 [[K25_0122]], [[ADD60]] +; CHECK-NEXT: br i1 [[CMP29_NOT123]], label [[FOR_BODY30_PREHEADER:%.*]], label [[FOR_END164:%.*]] +; CHECK: for.body30.preheader: +; CHECK-NEXT: br label [[FOR_BODY30_15:%.*]] +; CHECK: for.body30.15: +; CHECK-NEXT: [[K25_0124:%.*]] = phi i32 [ [[K25_0122]], [[FOR_BODY30_PREHEADER]] ], [ [[K25_0_15:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI9:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP63:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI10:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP64:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI11:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP65:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI12:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP66:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI13:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP67:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI14:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP68:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI15:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP69:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI16:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP70:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI17:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP71:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI18:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP72:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI19:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP73:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI20:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP74:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI21:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP75:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI22:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP76:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI23:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP77:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[DOTPHI24:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY30_PREHEADER]] ], [ [[TMP78:%.*]], [[FOR_BODY30_15]] ] +; CHECK-NEXT: [[K25_0:%.*]] = add i32 [[K25_0124]], 1 +; CHECK-NEXT: [[K25_0_1:%.*]] = add i32 [[K25_0124]], 2 +; CHECK-NEXT: [[K25_0_2:%.*]] = add i32 [[K25_0124]], 3 +; CHECK-NEXT: [[K25_0_3:%.*]] = add i32 [[K25_0124]], 4 +; CHECK-NEXT: [[K25_0_4:%.*]] = add i32 [[K25_0124]], 5 +; CHECK-NEXT: [[K25_0_5:%.*]] = add i32 [[K25_0124]], 6 +; CHECK-NEXT: [[K25_0_6:%.*]] = add i32 [[K25_0124]], 7 +; CHECK-NEXT: [[K25_0_7:%.*]] = add i32 [[K25_0124]], 8 +; CHECK-NEXT: [[K25_0_8:%.*]] = add i32 [[K25_0124]], 9 +; CHECK-NEXT: [[K25_0_9:%.*]] = add i32 [[K25_0124]], 10 +; CHECK-NEXT: [[K25_0_10:%.*]] = add i32 [[K25_0124]], 11 +; CHECK-NEXT: [[K25_0_11:%.*]] = add i32 [[K25_0124]], 12 +; CHECK-NEXT: [[K25_0_12:%.*]] = add i32 [[K25_0124]], 13 +; CHECK-NEXT: [[K25_0_13:%.*]] = add i32 [[K25_0124]], 14 +; CHECK-NEXT: [[K25_0_14:%.*]] = add i32 [[K25_0124]], 15 +; CHECK-NEXT: [[K25_0_15]] = add i32 [[K25_0124]], 16 ; CHECK-NEXT: [[SUB32:%.*]] = sub i32 [[N20_0126]], [[K25_0124]] +; CHECK-NEXT: [[SUB32_1:%.*]] = sub i32 [[N20_0126]], [[K25_0]] +; CHECK-NEXT: [[SUB32_2:%.*]] = sub i32 [[N20_0126]], [[K25_0_1]] +; CHECK-NEXT: [[SUB32_3:%.*]] = sub i32 [[N20_0126]], [[K25_0_2]] +; CHECK-NEXT: [[SUB32_4:%.*]] = sub i32 [[N20_0126]], [[K25_0_3]] +; CHECK-NEXT: [[SUB32_5:%.*]] = sub i32 [[N20_0126]], [[K25_0_4]] +; CHECK-NEXT: [[SUB32_6:%.*]] = sub i32 [[N20_0126]], [[K25_0_5]] +; CHECK-NEXT: [[SUB32_7:%.*]] = sub i32 [[N20_0126]], [[K25_0_6]] +; CHECK-NEXT: [[SUB32_8:%.*]] = sub i32 [[N20_0126]], [[K25_0_7]] +; CHECK-NEXT: [[SUB32_9:%.*]] = sub i32 [[N20_0126]], [[K25_0_8]] +; CHECK-NEXT: [[SUB32_10:%.*]] = sub i32 [[N20_0126]], [[K25_0_9]] +; CHECK-NEXT: [[SUB32_11:%.*]] = sub i32 [[N20_0126]], [[K25_0_10]] +; CHECK-NEXT: [[SUB32_12:%.*]] = sub i32 [[N20_0126]], [[K25_0_11]] +; CHECK-NEXT: [[SUB32_13:%.*]] = sub i32 [[N20_0126]], [[K25_0_12]] +; CHECK-NEXT: [[SUB32_14:%.*]] = sub i32 [[N20_0126]], [[K25_0_13]] +; CHECK-NEXT: [[SUB32_15:%.*]] = sub i32 [[N20_0126]], [[K25_0_14]] +; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0124]] ; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32]] -; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX33]], align 4 -; CHECK-NEXT: [[TMP7]] = tail call float @llvm.fmuladd.f32(float [[TMP5]], float [[TMP6]], float [[TMP4]]) -; CHECK-NEXT: store float [[TMP7]], ptr [[ARRAYIDX26]], align 4 -; CHECK-NEXT: [[K25_0]] = add i32 [[K25_0124]], 1 -; CHECK-NEXT: [[CMP29_NOT:%.*]] = icmp ugt i32 [[K25_0]], [[N20_0126]] -; CHECK-NEXT: br i1 [[CMP29_NOT]], label [[FOR_END37]], label [[FOR_BODY30]] +; CHECK-NEXT: [[ARRAYIDX31_1:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0]] +; CHECK-NEXT: [[ARRAYIDX33_1:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_1]] +; CHECK-NEXT: [[ARRAYIDX31_2:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_1]] +; CHECK-NEXT: [[ARRAYIDX33_2:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_2]] +; CHECK-NEXT: [[ARRAYIDX31_3:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_2]] +; CHECK-NEXT: [[ARRAYIDX33_3:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_3]] +; CHECK-NEXT: [[ARRAYIDX31_4:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_3]] +; CHECK-NEXT: [[ARRAYIDX33_4:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_4]] +; CHECK-NEXT: [[ARRAYIDX31_5:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_4]] +; CHECK-NEXT: [[ARRAYIDX33_5:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_5]] +; CHECK-NEXT: [[ARRAYIDX31_6:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_5]] +; CHECK-NEXT: [[ARRAYIDX33_6:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_6]] +; CHECK-NEXT: [[ARRAYIDX31_7:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_6]] +; CHECK-NEXT: [[ARRAYIDX33_7:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_7]] +; CHECK-NEXT: [[ARRAYIDX31_8:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_7]] +; CHECK-NEXT: [[ARRAYIDX33_8:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_8]] +; CHECK-NEXT: [[ARRAYIDX31_9:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_8]] +; CHECK-NEXT: [[ARRAYIDX33_9:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_9]] +; CHECK-NEXT: [[ARRAYIDX31_10:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_9]] +; CHECK-NEXT: [[ARRAYIDX33_10:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_10]] +; CHECK-NEXT: [[ARRAYIDX31_11:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_10]] +; CHECK-NEXT: [[ARRAYIDX33_11:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_11]] +; CHECK-NEXT: [[ARRAYIDX31_12:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_11]] +; CHECK-NEXT: [[ARRAYIDX33_12:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_12]] +; CHECK-NEXT: [[ARRAYIDX31_13:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_12]] +; CHECK-NEXT: [[ARRAYIDX33_13:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_13]] +; CHECK-NEXT: [[ARRAYIDX31_14:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_13]] +; CHECK-NEXT: [[ARRAYIDX33_14:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_14]] +; CHECK-NEXT: [[ARRAYIDX31_15:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0_14]] +; CHECK-NEXT: [[ARRAYIDX33_15:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_15]] +; CHECK-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX31]], align 4 +; CHECK-NEXT: [[TMP32:%.*]] = load float, ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX31_1]], align 4 +; CHECK-NEXT: [[TMP34:%.*]] = load float, ptr [[ARRAYIDX33_1]], align 4 +; CHECK-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX31_2]], align 4 +; CHECK-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX33_2]], align 4 +; CHECK-NEXT: [[TMP37:%.*]] = load float, ptr [[ARRAYIDX31_3]], align 4 +; CHECK-NEXT: [[TMP38:%.*]] = load float, ptr [[ARRAYIDX33_3]], align 4 +; CHECK-NEXT: [[TMP39:%.*]] = load float, ptr [[ARRAYIDX31_4]], align 4 +; CHECK-NEXT: [[TMP40:%.*]] = load float, ptr [[ARRAYIDX33_4]], align 4 +; CHECK-NEXT: [[TMP41:%.*]] = load float, ptr [[ARRAYIDX31_5]], align 4 +; CHECK-NEXT: [[TMP42:%.*]] = load float, ptr [[ARRAYIDX33_5]], align 4 +; CHECK-NEXT: [[TMP43:%.*]] = load float, ptr [[ARRAYIDX31_6]], align 4 +; CHECK-NEXT: [[TMP44:%.*]] = load float, ptr [[ARRAYIDX33_6]], align 4 +; CHECK-NEXT: [[TMP45:%.*]] = load float, ptr [[ARRAYIDX31_7]], align 4 +; CHECK-NEXT: [[TMP46:%.*]] = load float, ptr [[ARRAYIDX33_7]], align 4 +; CHECK-NEXT: [[TMP47:%.*]] = load float, ptr [[ARRAYIDX31_8]], align 4 +; CHECK-NEXT: [[TMP48:%.*]] = load float, ptr [[ARRAYIDX33_8]], align 4 +; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[ARRAYIDX31_9]], align 4 +; CHECK-NEXT: [[TMP50:%.*]] = load float, ptr [[ARRAYIDX33_9]], align 4 +; CHECK-NEXT: [[TMP51:%.*]] = load float, ptr [[ARRAYIDX31_10]], align 4 +; CHECK-NEXT: [[TMP52:%.*]] = load float, ptr [[ARRAYIDX33_10]], align 4 +; CHECK-NEXT: [[TMP53:%.*]] = load float, ptr [[ARRAYIDX31_11]], align 4 +; CHECK-NEXT: [[TMP54:%.*]] = load float, ptr [[ARRAYIDX33_11]], align 4 +; CHECK-NEXT: [[TMP55:%.*]] = load float, ptr [[ARRAYIDX31_12]], align 4 +; CHECK-NEXT: [[TMP56:%.*]] = load float, ptr [[ARRAYIDX33_12]], align 4 +; CHECK-NEXT: [[TMP57:%.*]] = load float, ptr [[ARRAYIDX31_13]], align 4 +; CHECK-NEXT: [[TMP58:%.*]] = load float, ptr [[ARRAYIDX33_13]], align 4 +; CHECK-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX31_14]], align 4 +; CHECK-NEXT: [[TMP60:%.*]] = load float, ptr [[ARRAYIDX33_14]], align 4 +; CHECK-NEXT: [[TMP61:%.*]] = load float, ptr [[ARRAYIDX31_15]], align 4 +; CHECK-NEXT: [[TMP62:%.*]] = load float, ptr [[ARRAYIDX33_15]], align 4 +; CHECK-NEXT: [[TMP63]] = tail call float @llvm.fmuladd.f32(float [[TMP31]], float [[TMP32]], float [[DOTPHI9]]) +; CHECK-NEXT: [[TMP64]] = tail call float @llvm.fmuladd.f32(float [[TMP33]], float [[TMP34]], float [[DOTPHI10]]) +; CHECK-NEXT: [[TMP65]] = tail call float @llvm.fmuladd.f32(float [[TMP35]], float [[TMP36]], float [[DOTPHI11]]) +; CHECK-NEXT: [[TMP66]] = tail call float @llvm.fmuladd.f32(float [[TMP37]], float [[TMP38]], float [[DOTPHI12]]) +; CHECK-NEXT: [[TMP67]] = tail call float @llvm.fmuladd.f32(float [[TMP39]], float [[TMP40]], float [[DOTPHI13]]) +; CHECK-NEXT: [[TMP68]] = tail call float @llvm.fmuladd.f32(float [[TMP41]], float [[TMP42]], float [[DOTPHI14]]) +; CHECK-NEXT: [[TMP69]] = tail call float @llvm.fmuladd.f32(float [[TMP43]], float [[TMP44]], float [[DOTPHI15]]) +; CHECK-NEXT: [[TMP70]] = tail call float @llvm.fmuladd.f32(float [[TMP45]], float [[TMP46]], float [[DOTPHI16]]) +; CHECK-NEXT: [[TMP71]] = tail call float @llvm.fmuladd.f32(float [[TMP47]], float [[TMP48]], float [[DOTPHI17]]) +; CHECK-NEXT: [[TMP72]] = tail call float @llvm.fmuladd.f32(float [[TMP49]], float [[TMP50]], float [[DOTPHI18]]) +; CHECK-NEXT: [[TMP73]] = tail call float @llvm.fmuladd.f32(float [[TMP51]], float [[TMP52]], float [[DOTPHI19]]) +; CHECK-NEXT: [[TMP74]] = tail call float @llvm.fmuladd.f32(float [[TMP53]], float [[TMP54]], float [[DOTPHI20]]) +; CHECK-NEXT: [[TMP75]] = tail call float @llvm.fmuladd.f32(float [[TMP55]], float [[TMP56]], float [[DOTPHI21]]) +; CHECK-NEXT: [[TMP76]] = tail call float @llvm.fmuladd.f32(float [[TMP57]], float [[TMP58]], float [[DOTPHI22]]) +; CHECK-NEXT: [[TMP77]] = tail call float @llvm.fmuladd.f32(float [[TMP59]], float [[TMP60]], float [[DOTPHI23]]) +; CHECK-NEXT: [[TMP78]] = tail call float @llvm.fmuladd.f32(float [[TMP61]], float [[TMP62]], float [[DOTPHI24]]) +; CHECK-NEXT: [[CMP29_NOT_15:%.*]] = icmp ult i32 [[K25_0_15]], [[ADD60]] +; CHECK-NEXT: br i1 [[CMP29_NOT_15]], label [[FOR_BODY30_15]], label [[FOR_END37_LOOPEXIT:%.*]] +; CHECK: for.end37.loopexit: +; CHECK-NEXT: [[SUM45:%.*]] = fadd float [[TMP63]], [[TMP64]] +; CHECK-NEXT: [[SUM46:%.*]] = fadd float [[TMP65]], [[TMP66]] +; CHECK-NEXT: [[SUM47:%.*]] = fadd float [[TMP67]], [[TMP68]] +; CHECK-NEXT: [[SUM48:%.*]] = fadd float [[TMP69]], [[TMP70]] +; CHECK-NEXT: [[SUM49:%.*]] = fadd float [[TMP71]], [[TMP72]] +; CHECK-NEXT: [[SUM50:%.*]] = fadd float [[TMP73]], [[TMP74]] +; CHECK-NEXT: [[SUM51:%.*]] = fadd float [[TMP75]], [[TMP76]] +; CHECK-NEXT: [[SUM52:%.*]] = fadd float [[TMP77]], [[TMP78]] +; CHECK-NEXT: [[SUM53:%.*]] = fadd float [[SUM45]], [[SUM46]] +; CHECK-NEXT: [[SUM54:%.*]] = fadd float [[SUM47]], [[SUM48]] +; CHECK-NEXT: [[SUM55:%.*]] = fadd float [[SUM49]], [[SUM50]] +; CHECK-NEXT: [[SUM56:%.*]] = fadd float [[SUM51]], [[SUM52]] +; CHECK-NEXT: [[SUM57:%.*]] = fadd float [[SUM53]], [[SUM54]] +; CHECK-NEXT: [[SUM58:%.*]] = fadd float [[SUM55]], [[SUM56]] +; CHECK-NEXT: [[SUM59:%.*]] = fadd float [[SUM57]], [[SUM58]] +; CHECK-NEXT: br label [[FOR_END164]] +; CHECK: for.end164: +; CHECK-NEXT: [[PHI_SUM:%.*]] = phi i32 [ [[K25_0122]], [[FOR_BODY24]] ], [ [[K25_0_15]], [[FOR_END37_LOOPEXIT]] ] +; CHECK-NEXT: [[PHI_FLOAT:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY24]] ], [ [[SUM59]], [[FOR_END37_LOOPEXIT]] ] +; CHECK-NEXT: store float [[PHI_FLOAT]], ptr [[ARRAYIDX26]], align 4 +; CHECK-NEXT: [[CMP182_NOT587:%.*]] = icmp ugt i32 [[PHI_SUM]], [[N20_0126]] +; CHECK-NEXT: br i1 [[CMP182_NOT587]], label [[FOR_END37]], label [[FOR_BODY30_CLONE:%.*]] +; CHECK: for.body30.clone: +; CHECK-NEXT: [[TMP79:%.*]] = phi float [ [[TMP82:%.*]], [[FOR_BODY30_CLONE]] ], [ [[PHI_FLOAT]], [[FOR_END164]] ] +; CHECK-NEXT: [[K25_0124_CLONE:%.*]] = phi i32 [ [[K25_0_CLONE:%.*]], [[FOR_BODY30_CLONE]] ], [ [[PHI_SUM]], [[FOR_END164]] ] +; CHECK-NEXT: [[ARRAYIDX31_CLONE:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K25_0124_CLONE]] +; CHECK-NEXT: [[TMP80:%.*]] = load float, ptr [[ARRAYIDX31_CLONE]], align 4 +; CHECK-NEXT: [[SUB32_CLONE:%.*]] = sub i32 [[N20_0126]], [[K25_0124_CLONE]] +; CHECK-NEXT: [[ARRAYIDX33_CLONE:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB32_CLONE]] +; CHECK-NEXT: [[TMP81:%.*]] = load float, ptr [[ARRAYIDX33_CLONE]], align 4 +; CHECK-NEXT: [[TMP82]] = tail call float @llvm.fmuladd.f32(float [[TMP80]], float [[TMP81]], float [[TMP79]]) +; CHECK-NEXT: [[K25_0_CLONE]] = add i32 [[K25_0124_CLONE]], 1 +; CHECK-NEXT: [[CMP29_NOT_CLONE:%.*]] = icmp ugt i32 [[K25_0_CLONE]], [[N20_0126]] +; CHECK-NEXT: br i1 [[CMP29_NOT_CLONE]], label [[FOR_COND_FOR_END_CRIT_EDGE25:%.*]], label [[FOR_BODY30_CLONE]] +; CHECK: for.cond.for.end_crit_edge25: +; CHECK-NEXT: store float [[TMP82]], ptr [[ARRAYIDX26]], align 4 +; CHECK-NEXT: br label [[FOR_END37]] ; CHECK: for.end37: ; CHECK-NEXT: [[INC39]] = add nuw nsw i32 [[N20_0126]], 1 ; CHECK-NEXT: [[EXITCOND133_NOT:%.*]] = icmp eq i32 [[INC39]], [[LSIG_0]] -; CHECK-NEXT: br i1 [[EXITCOND133_NOT]], label [[FOR_COND42_PREHEADER]], label [[FOR_BODY24]] +; CHECK-NEXT: br i1 [[EXITCOND133_NOT]], label [[FOR_COND42_PREHEADER_LOOPEXIT:%.*]], label [[FOR_BODY24]] ; CHECK: for.body47: ; CHECK-NEXT: [[N41_0131:%.*]] = phi i32 [ [[LSIG_0]], [[FOR_BODY47_LR_PH]] ], [ [[INC66:%.*]], [[FOR_END64:%.*]] ] ; CHECK-NEXT: [[ARRAYIDX51:%.*]] = getelementptr inbounds float, ptr [[CONVOUT]], i32 [[N41_0131]] -; CHECK-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX51]], align 4 ; CHECK-NEXT: [[SUB52:%.*]] = sub nsw i32 [[N41_0131]], [[LKERN_0]] ; CHECK-NEXT: [[K50_0127:%.*]] = add i32 [[SUB52]], 1 -; CHECK-NEXT: [[CMP56_NOT128:%.*]] = icmp ugt i32 [[K50_0127]], [[SUB54]] -; CHECK-NEXT: br i1 [[CMP56_NOT128]], label [[FOR_END64]], label [[FOR_BODY57:%.*]] -; CHECK: for.body57: -; CHECK-NEXT: [[TMP8:%.*]] = phi float [ [[TMP11:%.*]], [[FOR_BODY57]] ], [ 0.000000e+00, [[FOR_BODY47]] ] -; CHECK-NEXT: [[K50_0129:%.*]] = phi i32 [ [[K50_0:%.*]], [[FOR_BODY57]] ], [ [[K50_0127]], [[FOR_BODY47]] ] -; CHECK-NEXT: [[ARRAYIDX58:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0129]] -; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX58]], align 4 +; CHECK-NEXT: [[ADD207_NEG:%.*]] = xor i32 [[SUB52]], -1 +; CHECK-NEXT: [[ADD211:%.*]] = add i32 [[ADD207_NEG]], [[LSIG_0]] +; CHECK-NEXT: [[DIV212535:%.*]] = and i32 [[ADD211]], -8 +; CHECK-NEXT: [[ADD214:%.*]] = add i32 [[DIV212535]], [[K50_0127]] +; CHECK-NEXT: [[CMP56_NOT128:%.*]] = icmp ult i32 [[K50_0127]], [[ADD214]] +; CHECK-NEXT: br i1 [[CMP56_NOT128]], label [[FOR_BODY57_PREHEADER:%.*]], label [[FOR_END16434:%.*]] +; CHECK: for.body57.preheader: +; CHECK-NEXT: br label [[FOR_BODY57_7:%.*]] +; CHECK: for.body57.7: +; CHECK-NEXT: [[K50_0129:%.*]] = phi i32 [ [[K50_0127]], [[FOR_BODY57_PREHEADER]] ], [ [[K50_0_7:%.*]], [[FOR_BODY57_7]] ] +; CHECK-NEXT: [[DOTPHI26:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY57_PREHEADER]] ], [ [[TMP99:%.*]], [[FOR_BODY57_7]] ] +; CHECK-NEXT: [[DOTPHI27:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY57_PREHEADER]] ], [ [[TMP100:%.*]], [[FOR_BODY57_7]] ] +; CHECK-NEXT: [[DOTPHI28:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY57_PREHEADER]] ], [ [[TMP101:%.*]], [[FOR_BODY57_7]] ] +; CHECK-NEXT: [[DOTPHI29:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY57_PREHEADER]] ], [ [[TMP102:%.*]], [[FOR_BODY57_7]] ] +; CHECK-NEXT: [[DOTPHI30:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY57_PREHEADER]] ], [ [[TMP103:%.*]], [[FOR_BODY57_7]] ] +; CHECK-NEXT: [[DOTPHI31:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY57_PREHEADER]] ], [ [[TMP104:%.*]], [[FOR_BODY57_7]] ] +; CHECK-NEXT: [[DOTPHI32:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY57_PREHEADER]] ], [ [[TMP105:%.*]], [[FOR_BODY57_7]] ] +; CHECK-NEXT: [[DOTPHI33:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY57_PREHEADER]] ], [ [[TMP106:%.*]], [[FOR_BODY57_7]] ] +; CHECK-NEXT: [[K50_0:%.*]] = add i32 [[K50_0129]], 1 +; CHECK-NEXT: [[K50_0_1:%.*]] = add i32 [[K50_0129]], 2 +; CHECK-NEXT: [[K50_0_2:%.*]] = add i32 [[K50_0129]], 3 +; CHECK-NEXT: [[K50_0_3:%.*]] = add i32 [[K50_0129]], 4 +; CHECK-NEXT: [[K50_0_4:%.*]] = add i32 [[K50_0129]], 5 +; CHECK-NEXT: [[K50_0_5:%.*]] = add i32 [[K50_0129]], 6 +; CHECK-NEXT: [[K50_0_6:%.*]] = add i32 [[K50_0129]], 7 +; CHECK-NEXT: [[K50_0_7]] = add i32 [[K50_0129]], 8 ; CHECK-NEXT: [[SUB59:%.*]] = sub i32 [[N41_0131]], [[K50_0129]] +; CHECK-NEXT: [[SUB59_1:%.*]] = sub i32 [[N41_0131]], [[K50_0]] +; CHECK-NEXT: [[SUB59_2:%.*]] = sub i32 [[N41_0131]], [[K50_0_1]] +; CHECK-NEXT: [[SUB59_3:%.*]] = sub i32 [[N41_0131]], [[K50_0_2]] +; CHECK-NEXT: [[SUB59_4:%.*]] = sub i32 [[N41_0131]], [[K50_0_3]] +; CHECK-NEXT: [[SUB59_5:%.*]] = sub i32 [[N41_0131]], [[K50_0_4]] +; CHECK-NEXT: [[SUB59_6:%.*]] = sub i32 [[N41_0131]], [[K50_0_5]] +; CHECK-NEXT: [[SUB59_7:%.*]] = sub i32 [[N41_0131]], [[K50_0_6]] +; CHECK-NEXT: [[ARRAYIDX58:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0129]] ; CHECK-NEXT: [[ARRAYIDX60:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB59]] -; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX60]], align 4 -; CHECK-NEXT: [[TMP11]] = tail call float @llvm.fmuladd.f32(float [[TMP9]], float [[TMP10]], float [[TMP8]]) -; CHECK-NEXT: store float [[TMP11]], ptr [[ARRAYIDX51]], align 4 -; CHECK-NEXT: [[K50_0]] = add i32 [[K50_0129]], 1 -; CHECK-NEXT: [[CMP56_NOT:%.*]] = icmp ugt i32 [[K50_0]], [[SUB54]] -; CHECK-NEXT: br i1 [[CMP56_NOT]], label [[FOR_END64]], label [[FOR_BODY57]] +; CHECK-NEXT: [[ARRAYIDX58_1:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0]] +; CHECK-NEXT: [[ARRAYIDX60_1:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB59_1]] +; CHECK-NEXT: [[ARRAYIDX58_2:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0_1]] +; CHECK-NEXT: [[ARRAYIDX60_2:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB59_2]] +; CHECK-NEXT: [[ARRAYIDX58_3:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0_2]] +; CHECK-NEXT: [[ARRAYIDX60_3:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB59_3]] +; CHECK-NEXT: [[ARRAYIDX58_4:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0_3]] +; CHECK-NEXT: [[ARRAYIDX60_4:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB59_4]] +; CHECK-NEXT: [[ARRAYIDX58_5:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0_4]] +; CHECK-NEXT: [[ARRAYIDX60_5:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB59_5]] +; CHECK-NEXT: [[ARRAYIDX58_6:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0_5]] +; CHECK-NEXT: [[ARRAYIDX60_6:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB59_6]] +; CHECK-NEXT: [[ARRAYIDX58_7:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0_6]] +; CHECK-NEXT: [[ARRAYIDX60_7:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB59_7]] +; CHECK-NEXT: [[TMP83:%.*]] = load float, ptr [[ARRAYIDX58]], align 4 +; CHECK-NEXT: [[TMP84:%.*]] = load float, ptr [[ARRAYIDX60]], align 4 +; CHECK-NEXT: [[TMP85:%.*]] = load float, ptr [[ARRAYIDX58_1]], align 4 +; CHECK-NEXT: [[TMP86:%.*]] = load float, ptr [[ARRAYIDX60_1]], align 4 +; CHECK-NEXT: [[TMP87:%.*]] = load float, ptr [[ARRAYIDX58_2]], align 4 +; CHECK-NEXT: [[TMP88:%.*]] = load float, ptr [[ARRAYIDX60_2]], align 4 +; CHECK-NEXT: [[TMP89:%.*]] = load float, ptr [[ARRAYIDX58_3]], align 4 +; CHECK-NEXT: [[TMP90:%.*]] = load float, ptr [[ARRAYIDX60_3]], align 4 +; CHECK-NEXT: [[TMP91:%.*]] = load float, ptr [[ARRAYIDX58_4]], align 4 +; CHECK-NEXT: [[TMP92:%.*]] = load float, ptr [[ARRAYIDX60_4]], align 4 +; CHECK-NEXT: [[TMP93:%.*]] = load float, ptr [[ARRAYIDX58_5]], align 4 +; CHECK-NEXT: [[TMP94:%.*]] = load float, ptr [[ARRAYIDX60_5]], align 4 +; CHECK-NEXT: [[TMP95:%.*]] = load float, ptr [[ARRAYIDX58_6]], align 4 +; CHECK-NEXT: [[TMP96:%.*]] = load float, ptr [[ARRAYIDX60_6]], align 4 +; CHECK-NEXT: [[TMP97:%.*]] = load float, ptr [[ARRAYIDX58_7]], align 4 +; CHECK-NEXT: [[TMP98:%.*]] = load float, ptr [[ARRAYIDX60_7]], align 4 +; CHECK-NEXT: [[TMP99]] = tail call float @llvm.fmuladd.f32(float [[TMP83]], float [[TMP84]], float [[DOTPHI26]]) +; CHECK-NEXT: [[TMP100]] = tail call float @llvm.fmuladd.f32(float [[TMP85]], float [[TMP86]], float [[DOTPHI27]]) +; CHECK-NEXT: [[TMP101]] = tail call float @llvm.fmuladd.f32(float [[TMP87]], float [[TMP88]], float [[DOTPHI28]]) +; CHECK-NEXT: [[TMP102]] = tail call float @llvm.fmuladd.f32(float [[TMP89]], float [[TMP90]], float [[DOTPHI29]]) +; CHECK-NEXT: [[TMP103]] = tail call float @llvm.fmuladd.f32(float [[TMP91]], float [[TMP92]], float [[DOTPHI30]]) +; CHECK-NEXT: [[TMP104]] = tail call float @llvm.fmuladd.f32(float [[TMP93]], float [[TMP94]], float [[DOTPHI31]]) +; CHECK-NEXT: [[TMP105]] = tail call float @llvm.fmuladd.f32(float [[TMP95]], float [[TMP96]], float [[DOTPHI32]]) +; CHECK-NEXT: [[TMP106]] = tail call float @llvm.fmuladd.f32(float [[TMP97]], float [[TMP98]], float [[DOTPHI33]]) +; CHECK-NEXT: [[CMP56_NOT_7:%.*]] = icmp ult i32 [[K50_0_7]], [[ADD214]] +; CHECK-NEXT: br i1 [[CMP56_NOT_7]], label [[FOR_BODY57_7]], label [[FOR_END64_LOOPEXIT:%.*]] +; CHECK: for.end64.loopexit: +; CHECK-NEXT: [[SUM60:%.*]] = fadd float [[TMP99]], [[TMP100]] +; CHECK-NEXT: [[SUM61:%.*]] = fadd float [[TMP101]], [[TMP102]] +; CHECK-NEXT: [[SUM62:%.*]] = fadd float [[TMP103]], [[TMP104]] +; CHECK-NEXT: [[SUM63:%.*]] = fadd float [[TMP105]], [[TMP106]] +; CHECK-NEXT: [[SUM64:%.*]] = fadd float [[SUM60]], [[SUM61]] +; CHECK-NEXT: [[SUM65:%.*]] = fadd float [[SUM62]], [[SUM63]] +; CHECK-NEXT: [[SUM66:%.*]] = fadd float [[SUM64]], [[SUM65]] +; CHECK-NEXT: br label [[FOR_END16434]] +; CHECK: for.end16434: +; CHECK-NEXT: [[PHI_SUM35:%.*]] = phi i32 [ [[K50_0127]], [[FOR_BODY47]] ], [ [[K50_0_7]], [[FOR_END64_LOOPEXIT]] ] +; CHECK-NEXT: [[PHI_FLOAT36:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY47]] ], [ [[SUM66]], [[FOR_END64_LOOPEXIT]] ] +; CHECK-NEXT: store float [[PHI_FLOAT36]], ptr [[ARRAYIDX51]], align 4 +; CHECK-NEXT: [[CMP182_NOT58737:%.*]] = icmp ugt i32 [[PHI_SUM35]], [[SUB54]] +; CHECK-NEXT: br i1 [[CMP182_NOT58737]], label [[FOR_END64]], label [[FOR_BODY57_CLONE:%.*]] +; CHECK: for.body57.clone: +; CHECK-NEXT: [[TMP107:%.*]] = phi float [ [[TMP110:%.*]], [[FOR_BODY57_CLONE]] ], [ [[PHI_FLOAT36]], [[FOR_END16434]] ] +; CHECK-NEXT: [[K50_0129_CLONE:%.*]] = phi i32 [ [[K50_0_CLONE:%.*]], [[FOR_BODY57_CLONE]] ], [ [[PHI_SUM35]], [[FOR_END16434]] ] +; CHECK-NEXT: [[ARRAYIDX58_CLONE:%.*]] = getelementptr inbounds float, ptr [[SIG_0]], i32 [[K50_0129_CLONE]] +; CHECK-NEXT: [[TMP108:%.*]] = load float, ptr [[ARRAYIDX58_CLONE]], align 4 +; CHECK-NEXT: [[SUB59_CLONE:%.*]] = sub i32 [[N41_0131]], [[K50_0129_CLONE]] +; CHECK-NEXT: [[ARRAYIDX60_CLONE:%.*]] = getelementptr inbounds float, ptr [[KERN_0]], i32 [[SUB59_CLONE]] +; CHECK-NEXT: [[TMP109:%.*]] = load float, ptr [[ARRAYIDX60_CLONE]], align 4 +; CHECK-NEXT: [[TMP110]] = tail call float @llvm.fmuladd.f32(float [[TMP108]], float [[TMP109]], float [[TMP107]]) +; CHECK-NEXT: [[K50_0_CLONE]] = add i32 [[K50_0129_CLONE]], 1 +; CHECK-NEXT: [[CMP56_NOT_CLONE:%.*]] = icmp ugt i32 [[K50_0_CLONE]], [[SUB54]] +; CHECK-NEXT: br i1 [[CMP56_NOT_CLONE]], label [[FOR_COND_FOR_END_CRIT_EDGE38:%.*]], label [[FOR_BODY57_CLONE]] +; CHECK: for.cond.for.end_crit_edge38: +; CHECK-NEXT: store float [[TMP110]], ptr [[ARRAYIDX51]], align 4 +; CHECK-NEXT: br label [[FOR_END64]] ; CHECK: for.end64: ; CHECK-NEXT: [[INC66]] = add nsw i32 [[N41_0131]], 1 ; CHECK-NEXT: [[EXITCOND134_NOT:%.*]] = icmp eq i32 [[INC66]], [[SUB44]] -; CHECK-NEXT: br i1 [[EXITCOND134_NOT]], label [[RETURN]], label [[FOR_BODY47]] +; CHECK-NEXT: br i1 [[EXITCOND134_NOT]], label [[RETURN_LOOPEXIT:%.*]], label [[FOR_BODY47]] +; CHECK: return.loopexit: +; CHECK-NEXT: br label [[RETURN]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND42_PREHEADER]] ], [ 0, [[FOR_END64]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND42_PREHEADER]] ], [ 0, [[RETURN_LOOPEXIT]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/corr.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/corr.ll index cd8f939112a54..3091bef36bf89 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/corr.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/corr.ll @@ -1,9 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_corr_f32_ansi(ptr noundef readonly %Signal, i32 noundef %siglen, ptr noundef readonly %Pattern, i32 noundef %patlen, ptr noundef writeonly %dest) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_corr_f32_ansi( -; CHECK-SAME: ptr noundef readonly [[SIGNAL:%.*]], i32 noundef [[SIGLEN:%.*]], ptr noundef readonly [[PATTERN:%.*]], i32 noundef [[PATLEN:%.*]], ptr noundef writeonly [[DEST:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias noundef readonly [[SIGNAL:%.*]], i32 noundef [[SIGLEN:%.*]], ptr noalias noundef readonly [[PATTERN:%.*]], i32 noundef [[PATLEN:%.*]], ptr noalias noundef writeonly [[DEST:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: +; CHECK-NEXT: [[PATLEN_NEG:%.*]] = sub i32 0, [[PATLEN]] ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[SIGNAL]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[PATTERN]], null ; CHECK-NEXT: [[OR_COND:%.*]] = or i1 [[CMP]], [[CMP1]] @@ -11,39 +12,232 @@ define dso_local noundef i32 @dsps_corr_f32_ansi(ptr noundef readonly %Signal, i ; CHECK-NEXT: [[OR_COND33:%.*]] = or i1 [[OR_COND]], [[CMP4]] ; CHECK-NEXT: [[CMP7:%.*]] = icmp slt i32 [[SIGLEN]], [[PATLEN]] ; CHECK-NEXT: [[OR_COND34:%.*]] = or i1 [[CMP7]], [[OR_COND33]] -; CHECK-NEXT: br i1 [[OR_COND34]], label [[RETURN:%.*]], label [[FOR_COND_PREHEADER:%.*]] -; CHECK: for.cond.preheader: +; CHECK-NEXT: br i1 [[OR_COND34]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[SIGLEN]], [[PATLEN]] -; CHECK-NEXT: [[CMP1235_NOT:%.*]] = icmp eq i32 [[PATLEN]], 0 -; CHECK-NEXT: br i1 [[CMP1235_NOT]], label [[FOR_COND11_PREHEADER_PREHEADER:%.*]], label [[FOR_COND11_PREHEADER_US:%.*]] +; CHECK-NEXT: [[SUB6:%.*]] = add nsw i32 [[SUB]], -15 +; CHECK-NEXT: [[CMP1235_NOT:%.*]] = icmp sgt i32 [[SUB]], 15 +; CHECK-NEXT: br i1 [[CMP1235_NOT]], label [[FOR_COND8_PREHEADER_LR_PH:%.*]], label [[FOR_COND91_PREHEADER:%.*]] +; CHECK: for.cond8.preheader.lr.ph: +; CHECK-NEXT: [[CMP9242:%.*]] = icmp sgt i32 [[PATLEN]], 0 +; CHECK-NEXT: [[SCEVGEP62:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 60 +; CHECK-NEXT: [[SCEVGEP66:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 56 +; CHECK-NEXT: [[SCEVGEP68:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 52 +; CHECK-NEXT: [[SCEVGEP70:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 48 +; CHECK-NEXT: [[SCEVGEP72:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 44 +; CHECK-NEXT: [[SCEVGEP74:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 40 +; CHECK-NEXT: [[SCEVGEP76:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 36 +; CHECK-NEXT: [[SCEVGEP78:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 32 +; CHECK-NEXT: [[SCEVGEP80:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 28 +; CHECK-NEXT: [[SCEVGEP82:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 24 +; CHECK-NEXT: [[SCEVGEP84:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 20 +; CHECK-NEXT: [[SCEVGEP86:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 16 +; CHECK-NEXT: [[SCEVGEP88:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 12 +; CHECK-NEXT: [[SCEVGEP90:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 8 +; CHECK-NEXT: [[SCEVGEP92:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 4 +; CHECK-NEXT: br label [[FOR_COND8_PREHEADER:%.*]] +; CHECK: for.cond8.preheader: +; CHECK-NEXT: [[LSR_IV95:%.*]] = phi ptr [ [[SCEVGEP96:%.*]], [[FOR_COND_CLEANUP:%.*]] ], [ [[SIGNAL]], [[FOR_COND8_PREHEADER_LR_PH]] ] +; CHECK-NEXT: [[N_0276:%.*]] = phi i32 [ 0, [[FOR_COND8_PREHEADER_LR_PH]] ], [ [[ADD89:%.*]], [[FOR_COND_CLEANUP]] ] +; CHECK-NEXT: br i1 [[CMP9242]], label [[FOR_BODY10_LR_PH:%.*]], label [[FOR_COND_CLEANUP]] +; CHECK: for.body10.lr.ph: +; CHECK-NEXT: br label [[FOR_BODY14_US_UNROLL:%.*]] +; CHECK: for.cond91.preheader.loopexit: +; CHECK-NEXT: br label [[FOR_COND91_PREHEADER]] +; CHECK: for.cond91.preheader: +; CHECK-NEXT: [[N_0_LCSSA:%.*]] = phi i32 [ 0, [[IF_END]] ], [ [[ADD89]], [[FOR_COND91_PREHEADER_LOOPEXIT:%.*]] ] +; CHECK-NEXT: [[CMP92_NOT282:%.*]] = icmp sgt i32 [[N_0_LCSSA]], [[SUB]] +; CHECK-NEXT: br i1 [[CMP92_NOT282]], label [[RETURN]], label [[FOR_COND95_PREHEADER_LR_PH:%.*]] +; CHECK: for.cond95.preheader.lr.ph: +; CHECK-NEXT: [[CMP92678:%.*]] = icmp sgt i32 [[PATLEN]], 0 +; CHECK-NEXT: br i1 [[CMP92678]], label [[FOR_COND11_PREHEADER_US_PREHEADER:%.*]], label [[FOR_COND11_PREHEADER_PREHEADER:%.*]] ; CHECK: for.cond11.preheader.preheader: -; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[SIGLEN]], 2 -; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 4 -; CHECK-NEXT: tail call void @llvm.memset.p0.i32(ptr nonnull align 4 [[DEST]], i8 0, i32 [[TMP1]], i1 false) +; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[N_0_LCSSA]], 2 +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DEST]], i32 [[TMP0]] +; CHECK-NEXT: [[N_0_LCSSA_NEG:%.*]] = sub i32 0, [[N_0_LCSSA]] +; CHECK-NEXT: [[DOTNEG:%.*]] = add i32 [[SIGLEN]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[DOTNEG]], [[PATLEN_NEG]] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[N_0_LCSSA_NEG]] +; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[TMP2]], 2 +; CHECK-NEXT: tail call void @llvm.memset.p0.i32(ptr nonnull align 4 [[SCEVGEP]], i8 0, i32 [[TMP3]], i1 false) ; CHECK-NEXT: br label [[RETURN]] +; CHECK: for.cond11.preheader.us.preheader: +; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[SIGLEN]], 1 +; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], [[PATLEN_NEG]] +; CHECK-NEXT: [[TMP6:%.*]] = shl i32 [[N_0_LCSSA]], 2 +; CHECK-NEXT: [[SCEVGEP102:%.*]] = getelementptr i8, ptr [[SIGNAL]], i32 [[TMP6]] +; CHECK-NEXT: br label [[FOR_COND11_PREHEADER_US:%.*]] ; CHECK: for.cond11.preheader.us: -; CHECK-NEXT: [[N_038_US:%.*]] = phi i32 [ [[INC18_US:%.*]], [[FOR_COND11_FOR_COND_CLEANUP13_CRIT_EDGE_US:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[SIGNAL]], i32 [[N_038_US]] +; CHECK-NEXT: [[LSR_IV103:%.*]] = phi ptr [ [[SCEVGEP104:%.*]], [[FOR_COND11_FOR_COND_CLEANUP13_CRIT_EDGE_US:%.*]] ], [ [[SCEVGEP102]], [[FOR_COND11_PREHEADER_US_PREHEADER]] ] +; CHECK-NEXT: [[N_038_US:%.*]] = phi i32 [ [[INC18_US:%.*]], [[FOR_COND11_FOR_COND_CLEANUP13_CRIT_EDGE_US]] ], [ [[N_0_LCSSA]], [[FOR_COND11_PREHEADER_US_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY14_US:%.*]] ; CHECK: for.body14.us: -; CHECK-NEXT: [[M_037_US:%.*]] = phi i32 [ 0, [[FOR_COND11_PREHEADER_US]] ], [ [[INC_US:%.*]], [[FOR_BODY14_US]] ] -; CHECK-NEXT: [[K_CORR_036_US:%.*]] = phi float [ 0.000000e+00, [[FOR_COND11_PREHEADER_US]] ], [ [[TMP5:%.*]], [[FOR_BODY14_US]] ] -; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr float, ptr [[TMP2]], i32 [[M_037_US]] -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX_US]], align 4 -; CHECK-NEXT: [[ARRAYIDX15_US:%.*]] = getelementptr inbounds float, ptr [[PATTERN]], i32 [[M_037_US]] -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX15_US]], align 4 -; CHECK-NEXT: [[TMP5]] = tail call float @llvm.fmuladd.f32(float [[TMP3]], float [[TMP4]], float [[K_CORR_036_US]]) -; CHECK-NEXT: [[INC_US]] = add nuw i32 [[M_037_US]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC_US]], [[PATLEN]] +; CHECK-NEXT: [[LSR_IV105:%.*]] = phi ptr [ [[SCEVGEP106:%.*]], [[FOR_BODY14_US]] ], [ [[LSR_IV103]], [[FOR_COND11_PREHEADER_US]] ] +; CHECK-NEXT: [[LSR_IV100:%.*]] = phi ptr [ [[SCEVGEP101:%.*]], [[FOR_BODY14_US]] ], [ [[PATTERN]], [[FOR_COND11_PREHEADER_US]] ] +; CHECK-NEXT: [[LSR_IV98:%.*]] = phi i32 [ [[LSR_IV_NEXT99:%.*]], [[FOR_BODY14_US]] ], [ [[PATLEN]], [[FOR_COND11_PREHEADER_US]] ] +; CHECK-NEXT: [[K_CORR_036_US:%.*]] = phi float [ 0.000000e+00, [[FOR_COND11_PREHEADER_US]] ], [ [[TMP9:%.*]], [[FOR_BODY14_US]] ] +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[LSR_IV105]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[LSR_IV100]], align 4 +; CHECK-NEXT: [[TMP9]] = tail call float @llvm.fmuladd.f32(float [[TMP7]], float [[TMP8]], float [[K_CORR_036_US]]) +; CHECK-NEXT: [[LSR_IV_NEXT99]] = add i32 [[LSR_IV98]], -1 +; CHECK-NEXT: [[SCEVGEP101]] = getelementptr i8, ptr [[LSR_IV100]], i32 4 +; CHECK-NEXT: [[SCEVGEP106]] = getelementptr i8, ptr [[LSR_IV105]], i32 4 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LSR_IV_NEXT99]], 0 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND11_FOR_COND_CLEANUP13_CRIT_EDGE_US]], label [[FOR_BODY14_US]] ; CHECK: for.cond11.for.cond.cleanup13_crit_edge.us: ; CHECK-NEXT: [[ARRAYIDX16_US:%.*]] = getelementptr inbounds float, ptr [[DEST]], i32 [[N_038_US]] -; CHECK-NEXT: store float [[TMP5]], ptr [[ARRAYIDX16_US]], align 4 +; CHECK-NEXT: store float [[TMP9]], ptr [[ARRAYIDX16_US]], align 4 ; CHECK-NEXT: [[INC18_US]] = add nuw i32 [[N_038_US]], 1 -; CHECK-NEXT: [[CMP10_NOT_US_NOT:%.*]] = icmp ult i32 [[N_038_US]], [[SUB]] -; CHECK-NEXT: br i1 [[CMP10_NOT_US_NOT]], label [[FOR_COND11_PREHEADER_US]], label [[RETURN]] +; CHECK-NEXT: [[SCEVGEP104]] = getelementptr i8, ptr [[LSR_IV103]], i32 4 +; CHECK-NEXT: [[CMP10_NOT_US_NOT:%.*]] = icmp eq i32 [[INC18_US]], [[TMP5]] +; CHECK-NEXT: br i1 [[CMP10_NOT_US_NOT]], label [[RETURN_LOOPEXIT:%.*]], label [[FOR_COND11_PREHEADER_US]] +; CHECK: for.cond.cleanup.loopexit: +; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: [[TMP10:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP58:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT:%.*]] ] +; CHECK-NEXT: [[TMP11:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP59:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP12:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP60:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP13:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP61:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP14:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP62:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP15:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP63:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP16:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP64:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP17:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP65:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP18:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP66:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP19:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP67:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP20:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP68:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP21:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP69:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP22:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP70:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP23:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP71:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP24:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP72:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP25:%.*]] = phi float [ 0.000000e+00, [[FOR_COND8_PREHEADER]] ], [ [[TMP73:%.*]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; CHECK-NEXT: [[ADD89]] = add nuw nsw i32 [[N_0276]], 16 +; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[N_0276]], 1 +; CHECK-NEXT: [[ADD17:%.*]] = or disjoint i32 [[N_0276]], 2 +; CHECK-NEXT: [[ADD19:%.*]] = or disjoint i32 [[N_0276]], 3 +; CHECK-NEXT: [[ADD21:%.*]] = or disjoint i32 [[N_0276]], 4 +; CHECK-NEXT: [[ADD23:%.*]] = or disjoint i32 [[N_0276]], 5 +; CHECK-NEXT: [[ADD25:%.*]] = or disjoint i32 [[N_0276]], 6 +; CHECK-NEXT: [[ADD27:%.*]] = or disjoint i32 [[N_0276]], 7 +; CHECK-NEXT: [[ADD29:%.*]] = or disjoint i32 [[N_0276]], 8 +; CHECK-NEXT: [[ADD31:%.*]] = or disjoint i32 [[N_0276]], 9 +; CHECK-NEXT: [[ADD33:%.*]] = or disjoint i32 [[N_0276]], 10 +; CHECK-NEXT: [[ADD35:%.*]] = or disjoint i32 [[N_0276]], 11 +; CHECK-NEXT: [[ADD37:%.*]] = or disjoint i32 [[N_0276]], 12 +; CHECK-NEXT: [[ADD39:%.*]] = or disjoint i32 [[N_0276]], 13 +; CHECK-NEXT: [[ADD41:%.*]] = or disjoint i32 [[N_0276]], 14 +; CHECK-NEXT: [[ADD43:%.*]] = or disjoint i32 [[N_0276]], 15 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr float, ptr [[DEST]], i32 [[N_0276]] +; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX18:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD17]] +; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD19]] +; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD21]] +; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD23]] +; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD25]] +; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD27]] +; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD29]] +; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD31]] +; CHECK-NEXT: [[ARRAYIDX34:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD33]] +; CHECK-NEXT: [[ARRAYIDX36:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD35]] +; CHECK-NEXT: [[ARRAYIDX38:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD37]] +; CHECK-NEXT: [[ARRAYIDX40:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD39]] +; CHECK-NEXT: [[ARRAYIDX42:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD41]] +; CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr float, ptr [[DEST]], i32 [[ADD43]] +; CHECK-NEXT: store float [[TMP10]], ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: store float [[TMP11]], ptr [[ARRAYIDX16]], align 4 +; CHECK-NEXT: store float [[TMP12]], ptr [[ARRAYIDX18]], align 4 +; CHECK-NEXT: store float [[TMP13]], ptr [[ARRAYIDX20]], align 4 +; CHECK-NEXT: store float [[TMP14]], ptr [[ARRAYIDX22]], align 4 +; CHECK-NEXT: store float [[TMP15]], ptr [[ARRAYIDX24]], align 4 +; CHECK-NEXT: store float [[TMP16]], ptr [[ARRAYIDX26]], align 4 +; CHECK-NEXT: store float [[TMP17]], ptr [[ARRAYIDX28]], align 4 +; CHECK-NEXT: store float [[TMP18]], ptr [[ARRAYIDX30]], align 4 +; CHECK-NEXT: store float [[TMP19]], ptr [[ARRAYIDX32]], align 4 +; CHECK-NEXT: store float [[TMP20]], ptr [[ARRAYIDX34]], align 4 +; CHECK-NEXT: store float [[TMP21]], ptr [[ARRAYIDX36]], align 4 +; CHECK-NEXT: store float [[TMP22]], ptr [[ARRAYIDX38]], align 4 +; CHECK-NEXT: store float [[TMP23]], ptr [[ARRAYIDX40]], align 4 +; CHECK-NEXT: store float [[TMP24]], ptr [[ARRAYIDX42]], align 4 +; CHECK-NEXT: store float [[TMP25]], ptr [[ARRAYIDX44]], align 4 +; CHECK-NEXT: [[SCEVGEP96]] = getelementptr i8, ptr [[LSR_IV95]], i32 64 +; CHECK-NEXT: [[CMP745:%.*]] = icmp slt i32 [[ADD89]], [[SUB6]] +; CHECK-NEXT: br i1 [[CMP745]], label [[FOR_COND8_PREHEADER]], label [[FOR_COND91_PREHEADER_LOOPEXIT]] +; CHECK: for.body14.us.unroll: +; CHECK-NEXT: [[LSR_IV63:%.*]] = phi i32 [ 0, [[FOR_BODY10_LR_PH]] ], [ [[LSR_IV_NEXT64:%.*]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[PATLEN]], [[FOR_BODY10_LR_PH]] ], [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[K_CORR_036_US_UNROLL:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP58]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP26:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP59]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP27:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP60]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP28:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP61]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP29:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP62]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP30:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP63]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP31:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP64]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP32:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP65]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP33:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP66]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP34:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP67]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP35:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP68]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP36:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP69]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP37:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP70]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP38:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP71]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP39:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP72]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[TMP40:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY10_LR_PH]] ], [ [[TMP73]], [[FOR_BODY14_US_UNROLL]] ] +; CHECK-NEXT: [[SCEVGEP94:%.*]] = getelementptr i8, ptr [[PATTERN]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP97:%.*]] = getelementptr i8, ptr [[LSR_IV95]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP93:%.*]] = getelementptr i8, ptr [[SCEVGEP92]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP91:%.*]] = getelementptr i8, ptr [[SCEVGEP90]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP89:%.*]] = getelementptr i8, ptr [[SCEVGEP88]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP87:%.*]] = getelementptr i8, ptr [[SCEVGEP86]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP85:%.*]] = getelementptr i8, ptr [[SCEVGEP84]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP83:%.*]] = getelementptr i8, ptr [[SCEVGEP82]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP81:%.*]] = getelementptr i8, ptr [[SCEVGEP80]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP79:%.*]] = getelementptr i8, ptr [[SCEVGEP78]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP77:%.*]] = getelementptr i8, ptr [[SCEVGEP76]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP75:%.*]] = getelementptr i8, ptr [[SCEVGEP74]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP73:%.*]] = getelementptr i8, ptr [[SCEVGEP72]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP71:%.*]] = getelementptr i8, ptr [[SCEVGEP70]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP69:%.*]] = getelementptr i8, ptr [[SCEVGEP68]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP67:%.*]] = getelementptr i8, ptr [[SCEVGEP66]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[SCEVGEP65:%.*]] = getelementptr i8, ptr [[SCEVGEP62]], i32 [[LSR_IV63]] +; CHECK-NEXT: [[TMP41:%.*]] = load float, ptr [[SCEVGEP94]], align 4 +; CHECK-NEXT: [[TMP42:%.*]] = load float, ptr [[SCEVGEP97]], align 4 +; CHECK-NEXT: [[TMP43:%.*]] = load float, ptr [[SCEVGEP93]], align 4 +; CHECK-NEXT: [[TMP44:%.*]] = load float, ptr [[SCEVGEP91]], align 4 +; CHECK-NEXT: [[TMP45:%.*]] = load float, ptr [[SCEVGEP89]], align 4 +; CHECK-NEXT: [[TMP46:%.*]] = load float, ptr [[SCEVGEP87]], align 4 +; CHECK-NEXT: [[TMP47:%.*]] = load float, ptr [[SCEVGEP85]], align 4 +; CHECK-NEXT: [[TMP48:%.*]] = load float, ptr [[SCEVGEP83]], align 4 +; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[SCEVGEP81]], align 4 +; CHECK-NEXT: [[TMP50:%.*]] = load float, ptr [[SCEVGEP79]], align 4 +; CHECK-NEXT: [[TMP51:%.*]] = load float, ptr [[SCEVGEP77]], align 4 +; CHECK-NEXT: [[TMP52:%.*]] = load float, ptr [[SCEVGEP75]], align 4 +; CHECK-NEXT: [[TMP53:%.*]] = load float, ptr [[SCEVGEP73]], align 4 +; CHECK-NEXT: [[TMP54:%.*]] = load float, ptr [[SCEVGEP71]], align 4 +; CHECK-NEXT: [[TMP55:%.*]] = load float, ptr [[SCEVGEP69]], align 4 +; CHECK-NEXT: [[TMP56:%.*]] = load float, ptr [[SCEVGEP67]], align 4 +; CHECK-NEXT: [[TMP57:%.*]] = load float, ptr [[SCEVGEP65]], align 4 +; CHECK-NEXT: [[TMP58]] = tail call float @llvm.fmuladd.f32(float [[TMP42]], float [[TMP41]], float [[K_CORR_036_US_UNROLL]]) +; CHECK-NEXT: [[TMP59]] = tail call float @llvm.fmuladd.f32(float [[TMP43]], float [[TMP41]], float [[TMP26]]) +; CHECK-NEXT: [[TMP60]] = tail call float @llvm.fmuladd.f32(float [[TMP44]], float [[TMP41]], float [[TMP27]]) +; CHECK-NEXT: [[TMP61]] = tail call float @llvm.fmuladd.f32(float [[TMP45]], float [[TMP41]], float [[TMP28]]) +; CHECK-NEXT: [[TMP62]] = tail call float @llvm.fmuladd.f32(float [[TMP46]], float [[TMP41]], float [[TMP29]]) +; CHECK-NEXT: [[TMP63]] = tail call float @llvm.fmuladd.f32(float [[TMP47]], float [[TMP41]], float [[TMP30]]) +; CHECK-NEXT: [[TMP64]] = tail call float @llvm.fmuladd.f32(float [[TMP48]], float [[TMP41]], float [[TMP31]]) +; CHECK-NEXT: [[TMP65]] = tail call float @llvm.fmuladd.f32(float [[TMP49]], float [[TMP41]], float [[TMP32]]) +; CHECK-NEXT: [[TMP66]] = tail call float @llvm.fmuladd.f32(float [[TMP50]], float [[TMP41]], float [[TMP33]]) +; CHECK-NEXT: [[TMP67]] = tail call float @llvm.fmuladd.f32(float [[TMP51]], float [[TMP41]], float [[TMP34]]) +; CHECK-NEXT: [[TMP68]] = tail call float @llvm.fmuladd.f32(float [[TMP52]], float [[TMP41]], float [[TMP35]]) +; CHECK-NEXT: [[TMP69]] = tail call float @llvm.fmuladd.f32(float [[TMP53]], float [[TMP41]], float [[TMP36]]) +; CHECK-NEXT: [[TMP70]] = tail call float @llvm.fmuladd.f32(float [[TMP54]], float [[TMP41]], float [[TMP37]]) +; CHECK-NEXT: [[TMP71]] = tail call float @llvm.fmuladd.f32(float [[TMP55]], float [[TMP41]], float [[TMP38]]) +; CHECK-NEXT: [[TMP72]] = tail call float @llvm.fmuladd.f32(float [[TMP56]], float [[TMP41]], float [[TMP39]]) +; CHECK-NEXT: [[TMP73]] = tail call float @llvm.fmuladd.f32(float [[TMP57]], float [[TMP41]], float [[TMP40]]) +; CHECK-NEXT: [[LSR_IV_NEXT]] = add i32 [[LSR_IV]], -1 +; CHECK-NEXT: [[LSR_IV_NEXT64]] = add nuw i32 [[LSR_IV63]], 4 +; CHECK-NEXT: [[EXITCOND_NOT_UNROLL:%.*]] = icmp eq i32 [[LSR_IV_NEXT]], 0 +; CHECK-NEXT: br i1 [[EXITCOND_NOT_UNROLL]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY14_US_UNROLL]] +; CHECK: return.loopexit: +; CHECK-NEXT: br label [[RETURN]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND11_PREHEADER_PREHEADER]] ], [ 0, [[FOR_COND11_FOR_COND_CLEANUP13_CRIT_EDGE_US]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND11_PREHEADER_PREHEADER]] ], [ 0, [[FOR_COND91_PREHEADER]] ], [ 0, [[RETURN_LOOPEXIT]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod.ll index 2fe5f8edd108c..af95e0500cf2c 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod.ll @@ -1,37 +1,126 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_dotprod_f32_ansi(ptr nocapture noundef readonly %src1, ptr nocapture noundef readonly %src2, ptr nocapture noundef writeonly %dest, i32 noundef %len) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_dotprod_f32_ansi( -; CHECK-SAME: ptr nocapture noundef readonly [[SRC1:%.*]], ptr nocapture noundef readonly [[SRC2:%.*]], ptr nocapture noundef writeonly [[DEST:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias nocapture noundef readonly [[SRC1:%.*]], ptr noalias nocapture noundef readonly [[SRC2:%.*]], ptr noalias nocapture noundef writeonly [[DEST:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 -; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_PREHEADER1:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: -; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY_CLONE:%.*]], label [[IF_END:%.*]] +; CHECK-NEXT: [[CMP47110:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP47110]], label [[FOR_BODY_CLONE:%.*]], label [[IF_END:%.*]] ; CHECK: if.end: -; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_PREHEADER]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ], [ [[TMP6:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_PREHEADER]] ], [ [[ADD44:%.*]], [[FOR_END37:%.*]] ], [ [[TMP31:%.*]], [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: store float [[ACC_0_LCSSA]], ptr [[DEST]], align 4 ; CHECK-NEXT: ret i32 0 +; CHECK: for.cond.preheader1: +; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[LEN]], -7 +; CHECK-NEXT: [[CMP1113:%.*]] = icmp ugt i32 [[LEN]], 7 +; CHECK-NEXT: br i1 [[CMP1113]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND31_PREHEADER:%.*]] +; CHECK: for.body.preheader: +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[LEN]], 2147483640 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond31.preheader: +; CHECK-NEXT: [[ACC0_0_LCSSA:%.*]] = phi float [ [[TMP4:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC1_0_LCSSA:%.*]] = phi float [ [[TMP7:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC2_0_LCSSA:%.*]] = phi float [ [[TMP10:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC3_0_LCSSA:%.*]] = phi float [ [[TMP13:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC4_0_LCSSA:%.*]] = phi float [ [[TMP16:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC5_0_LCSSA:%.*]] = phi float [ [[TMP19:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC6_0_LCSSA:%.*]] = phi float [ [[TMP22:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC7_0_LCSSA:%.*]] = phi float [ [[TMP25:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[I_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_COND_PREHEADER1]] ], [ [[TMP1]], [[FOR_BODY]] ] +; CHECK-NEXT: [[CMP32132:%.*]] = icmp slt i32 [[I_0_LCSSA]], [[LEN]] +; CHECK-NEXT: br i1 [[CMP32132]], label [[FOR_BODY33:%.*]], label [[FOR_END37]] ; CHECK: for.body: -; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[ACC_07:%.*]] = phi float [ [[TMP3]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[I_08]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[I_08]] -; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 -; CHECK-NEXT: [[TMP3]] = tail call float @llvm.fmuladd.f32(float [[TMP1]], float [[TMP2]], float [[ACC_07]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK-NEXT: [[I_0122:%.*]] = phi i32 [ [[ADD30:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC_07:%.*]] = phi float [ [[TMP4]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC1:%.*]] = phi float [ [[TMP7]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC2:%.*]] = phi float [ [[TMP10]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC3:%.*]] = phi float [ [[TMP13]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC4:%.*]] = phi float [ [[TMP16]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC5:%.*]] = phi float [ [[TMP19]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC6:%.*]] = phi float [ [[TMP22]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC7:%.*]] = phi float [ [[TMP25]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[I_0122]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[I_0122]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[TMP4]] = tail call float @llvm.fmuladd.f32(float [[TMP2]], float [[TMP3]], float [[ACC_07]]) +; CHECK-NEXT: [[ADD1:%.*]] = or disjoint i32 [[I_0122]], 1 +; CHECK-NEXT: [[ARRAYIDX1_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD1]] +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX1_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX1_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD1]] +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX1_1]], align 4 +; CHECK-NEXT: [[TMP7]] = tail call float @llvm.fmuladd.f32(float [[TMP5]], float [[TMP6]], float [[ACC1]]) +; CHECK-NEXT: [[ADD2:%.*]] = or disjoint i32 [[I_0122]], 2 +; CHECK-NEXT: [[ARRAYIDX2_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD2]] +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX2_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD2]] +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[TMP10]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP9]], float [[ACC2]]) +; CHECK-NEXT: [[ADD3:%.*]] = or disjoint i32 [[I_0122]], 3 +; CHECK-NEXT: [[ARRAYIDX3_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD3]] +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX3_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD3]] +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3_1]], align 4 +; CHECK-NEXT: [[TMP13]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[ACC3]]) +; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[I_0122]], 4 +; CHECK-NEXT: [[ARRAYIDX4_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD4]] +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX4_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD4]] +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX4_1]], align 4 +; CHECK-NEXT: [[TMP16]] = tail call float @llvm.fmuladd.f32(float [[TMP14]], float [[TMP15]], float [[ACC4]]) +; CHECK-NEXT: [[ADD5:%.*]] = or disjoint i32 [[I_0122]], 5 +; CHECK-NEXT: [[ARRAYIDX5_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD5]] +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX5_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX5_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD5]] +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX5_1]], align 4 +; CHECK-NEXT: [[TMP19]] = tail call float @llvm.fmuladd.f32(float [[TMP17]], float [[TMP18]], float [[ACC5]]) +; CHECK-NEXT: [[ADD6:%.*]] = or disjoint i32 [[I_0122]], 6 +; CHECK-NEXT: [[ARRAYIDX6_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD6]] +; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD6]] +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6_1]], align 4 +; CHECK-NEXT: [[TMP22]] = tail call float @llvm.fmuladd.f32(float [[TMP20]], float [[TMP21]], float [[ACC6]]) +; CHECK-NEXT: [[ADD7:%.*]] = or disjoint i32 [[I_0122]], 7 +; CHECK-NEXT: [[ARRAYIDX7_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD7]] +; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX7_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX7_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD7]] +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX7_1]], align 4 +; CHECK-NEXT: [[TMP25]] = tail call float @llvm.fmuladd.f32(float [[TMP23]], float [[TMP24]], float [[ACC7]]) +; CHECK-NEXT: [[ADD30]] = add nuw nsw i32 [[I_0122]], 8 +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[ADD30]], [[SUB]] +; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY]], label [[FOR_COND31_PREHEADER]] +; CHECK: for.body33: +; CHECK-NEXT: [[I_0833:%.*]] = phi i32 [ [[INC33:%.*]], [[FOR_BODY33]] ], [ [[I_0_LCSSA]], [[FOR_COND31_PREHEADER]] ] +; CHECK-NEXT: [[ACC_0733:%.*]] = phi float [ [[TMP28:%.*]], [[FOR_BODY33]] ], [ [[ACC0_0_LCSSA]], [[FOR_COND31_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[I_0833]] +; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: [[ARRAYIDX133:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[I_0833]] +; CHECK-NEXT: [[TMP27:%.*]] = load float, ptr [[ARRAYIDX133]], align 4 +; CHECK-NEXT: [[TMP28]] = tail call float @llvm.fmuladd.f32(float [[TMP26]], float [[TMP27]], float [[ACC_0733]]) +; CHECK-NEXT: [[INC33]] = add nuw nsw i32 [[I_0833]], 1 +; CHECK-NEXT: [[EXITCOND_NOT33:%.*]] = icmp eq i32 [[INC33]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT33]], label [[FOR_END37]], label [[FOR_BODY33]] +; CHECK: for.end37: +; CHECK-NEXT: [[ACC0_1_LCSSA:%.*]] = phi float [ [[TMP28]], [[FOR_BODY33]] ], [ [[ACC0_0_LCSSA]], [[FOR_COND31_PREHEADER]] ] +; CHECK-NEXT: [[SUM01:%.*]] = fadd float [[ACC1_0_LCSSA]], [[ACC0_1_LCSSA]] +; CHECK-NEXT: [[SUM23:%.*]] = fadd float [[ACC2_0_LCSSA]], [[ACC3_0_LCSSA]] +; CHECK-NEXT: [[SUM45:%.*]] = fadd float [[ACC4_0_LCSSA]], [[ACC5_0_LCSSA]] +; CHECK-NEXT: [[SUM67:%.*]] = fadd float [[ACC6_0_LCSSA]], [[ACC7_0_LCSSA]] +; CHECK-NEXT: [[SUM0123:%.*]] = fadd float [[SUM23]], [[SUM01]] +; CHECK-NEXT: [[SUM4567:%.*]] = fadd float [[SUM45]], [[SUM67]] +; CHECK-NEXT: [[ADD44]] = fadd float [[SUM4567]], [[SUM0123]] +; CHECK-NEXT: br label [[IF_END]] ; CHECK: for.body.clone: ; CHECK-NEXT: [[I_08_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] -; CHECK-NEXT: [[ACC_07_CLONE:%.*]] = phi float [ [[TMP6]], [[FOR_BODY_CLONE]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ACC_07_CLONE:%.*]] = phi float [ [[TMP31]], [[FOR_BODY_CLONE]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER]] ] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[I_08_CLONE]] -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 ; CHECK-NEXT: [[ARRAYIDX1_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[I_08_CLONE]] -; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX1_CLONE]], align 4 -; CHECK-NEXT: [[TMP6]] = tail call float @llvm.fmuladd.f32(float [[TMP4]], float [[TMP5]], float [[ACC_07_CLONE]]) +; CHECK-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX1_CLONE]], align 4 +; CHECK-NEXT: [[TMP31]] = tail call float @llvm.fmuladd.f32(float [[TMP29]], float [[TMP30]], float [[ACC_07_CLONE]]) ; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_08_CLONE]], 1 ; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll index 8db7f9dd4c788..60c76b1ad159d 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll @@ -1,28 +1,115 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local float @test_loop(ptr nocapture noundef readonly %data1, ptr nocapture noundef readonly %data2, i32 noundef %start_index, i32 noundef %end_index, i32 noundef %update1, i32 noundef %update2, float noundef %offset) local_unnamed_addr { ; CHECK-LABEL: define dso_local float @test_loop( -; CHECK-SAME: ptr nocapture noundef readonly [[DATA1:%.*]], ptr nocapture noundef readonly [[DATA2:%.*]], i32 noundef [[START_INDEX:%.*]], i32 noundef [[END_INDEX:%.*]], i32 noundef [[UPDATE1:%.*]], i32 noundef [[UPDATE2:%.*]], float noundef [[OFFSET:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias nocapture noundef readonly [[DATA1:%.*]], ptr noalias nocapture noundef readonly [[DATA2:%.*]], i32 noundef [[START_INDEX:%.*]], i32 noundef [[END_INDEX:%.*]], i32 noundef [[UPDATE1:%.*]], i32 noundef [[UPDATE2:%.*]], float noundef [[OFFSET:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr float, ptr [[DATA1]], i32 [[UPDATE1]] ; CHECK-NEXT: [[INVARIANT_GEP8:%.*]] = getelementptr float, ptr [[DATA2]], i32 [[UPDATE2]] -; CHECK-NEXT: [[CMP10:%.*]] = icmp slt i32 [[START_INDEX]], [[END_INDEX]] -; CHECK-NEXT: br i1 [[CMP10]], label [[FOR_BODY:%.*]], label [[FOR_COND_CLEANUP:%.*]] -; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[ADD3:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: ret float [[RESULT_0_LCSSA]] -; CHECK: for.body: -; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[START_INDEX]], [[ENTRY]] ] -; CHECK-NEXT: [[RESULT_011:%.*]] = phi float [ [[ADD3]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] +; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[END_INDEX]], -8 +; CHECK-NEXT: [[CMP10:%.*]] = icmp slt i32 [[SUB]], [[START_INDEX]] +; CHECK-NEXT: br i1 [[CMP10]], label [[FOR_COND_PREHEADER:%.*]], label [[FOR_BODY_7:%.*]] +; CHECK: for.cond.preheader: +; CHECK-NEXT: [[RESULT0_0_LCSSA:%.*]] = phi i32 [ [[START_INDEX]], [[ENTRY:%.*]] ], [ [[INC_7:%.*]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT0_0_LCSSA1:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_7:%.*]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT0_0_LCSSA2:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_6:%.*]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT0_0_LCSSA3:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_5:%.*]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT0_0_LCSSA4:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_4:%.*]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT0_0_LCSSA5:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_3:%.*]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT0_0_LCSSA6:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_2:%.*]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT0_0_LCSSA7:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_1:%.*]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT0_0_LCSSA8:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3:%.*]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[RESULT0_0_LCSSA]], [[END_INDEX]] +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY_CLONE:%.*]], label [[FOR_END:%.*]] +; CHECK: for.body.7: +; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[START_INDEX]], [[ENTRY]] ], [ [[INC_7]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT6:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_6]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT5:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_5]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT4:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_4]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT3:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_3]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT2:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_2]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT1:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3_1]], [[FOR_BODY_7]] ] +; CHECK-NEXT: [[RESULT0:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD3]], [[FOR_BODY_7]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i32 [[I_012]] ; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4 ; CHECK-NEXT: [[GEP9:%.*]] = getelementptr float, ptr [[INVARIANT_GEP8]], i32 [[I_012]] ; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[GEP9]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float [[OFFSET]]) -; CHECK-NEXT: [[ADD3]] = fadd float [[RESULT_011]], [[TMP2]] -; CHECK-NEXT: [[INC]] = add nsw i32 [[I_012]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[END_INDEX]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; CHECK-NEXT: [[ADD3]] = fadd float [[RESULT0]], [[TMP2]] +; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[I_012]], 1 +; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i32 [[INC]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[GEP_1]], align 4 +; CHECK-NEXT: [[GEP9_1:%.*]] = getelementptr float, ptr [[INVARIANT_GEP8]], i32 [[INC]] +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[GEP9_1]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP3]], float [[TMP4]], float [[OFFSET]]) +; CHECK-NEXT: [[ADD3_1]] = fadd float [[RESULT1]], [[TMP5]] +; CHECK-NEXT: [[INC_1:%.*]] = add nsw i32 [[I_012]], 2 +; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i32 [[INC_1]] +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[GEP_2]], align 4 +; CHECK-NEXT: [[GEP9_2:%.*]] = getelementptr float, ptr [[INVARIANT_GEP8]], i32 [[INC_1]] +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[GEP9_2]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP6]], float [[TMP7]], float [[OFFSET]]) +; CHECK-NEXT: [[ADD3_2]] = fadd float [[RESULT2]], [[TMP8]] +; CHECK-NEXT: [[INC_2:%.*]] = add nsw i32 [[I_012]], 3 +; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i32 [[INC_2]] +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[GEP_3]], align 4 +; CHECK-NEXT: [[GEP9_3:%.*]] = getelementptr float, ptr [[INVARIANT_GEP8]], i32 [[INC_2]] +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[GEP9_3]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP9]], float [[TMP10]], float [[OFFSET]]) +; CHECK-NEXT: [[ADD3_3]] = fadd float [[RESULT3]], [[TMP11]] +; CHECK-NEXT: [[INC_3:%.*]] = add nsw i32 [[I_012]], 4 +; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i32 [[INC_3]] +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[GEP_4]], align 4 +; CHECK-NEXT: [[GEP9_4:%.*]] = getelementptr float, ptr [[INVARIANT_GEP8]], i32 [[INC_3]] +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[GEP9_4]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP12]], float [[TMP13]], float [[OFFSET]]) +; CHECK-NEXT: [[ADD3_4]] = fadd float [[RESULT4]], [[TMP14]] +; CHECK-NEXT: [[INC_4:%.*]] = add nsw i32 [[I_012]], 5 +; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i32 [[INC_4]] +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[GEP_5]], align 4 +; CHECK-NEXT: [[GEP9_5:%.*]] = getelementptr float, ptr [[INVARIANT_GEP8]], i32 [[INC_4]] +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[GEP9_5]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP15]], float [[TMP16]], float [[OFFSET]]) +; CHECK-NEXT: [[ADD3_5]] = fadd float [[RESULT5]], [[TMP17]] +; CHECK-NEXT: [[INC_5:%.*]] = add nsw i32 [[I_012]], 6 +; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i32 [[INC_5]] +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[GEP_6]], align 4 +; CHECK-NEXT: [[GEP9_6:%.*]] = getelementptr float, ptr [[INVARIANT_GEP8]], i32 [[INC_5]] +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[GEP9_6]], align 4 +; CHECK-NEXT: [[TMP20:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP18]], float [[TMP19]], float [[OFFSET]]) +; CHECK-NEXT: [[ADD3_6]] = fadd float [[RESULT6]], [[TMP20]] +; CHECK-NEXT: [[INC_6:%.*]] = add nsw i32 [[I_012]], 7 +; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i32 [[INC_6]] +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[GEP_7]], align 4 +; CHECK-NEXT: [[GEP9_7:%.*]] = getelementptr float, ptr [[INVARIANT_GEP8]], i32 [[INC_6]] +; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[GEP9_7]], align 4 +; CHECK-NEXT: [[TMP23:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP21]], float [[TMP22]], float [[OFFSET]]) +; CHECK-NEXT: [[ADD3_7]] = fadd float [[ADD3_6]], [[TMP23]] +; CHECK-NEXT: [[INC_7]] = add nsw i32 [[I_012]], 8 +; CHECK-NEXT: [[EXITCOND_NOT_7:%.*]] = icmp slt i32 [[INC_7]], [[SUB]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_7]], label [[FOR_COND_PREHEADER]], label [[FOR_BODY_7]] +; CHECK: for.body.clone: +; CHECK-NEXT: [[I_012_CLONE:%.*]] = phi i32 [ [[RESULT0_0_LCSSA]], [[FOR_COND_PREHEADER]] ], [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[RESULT_011_CLONE:%.*]] = phi float [ [[RESULT0_0_LCSSA8]], [[FOR_COND_PREHEADER]] ], [ [[ADD3_CLONE:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[GEP_CLONE:%.*]] = getelementptr float, ptr [[INVARIANT_GEP]], i32 [[I_012_CLONE]] +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[GEP_CLONE]], align 4 +; CHECK-NEXT: [[GEP9_CLONE:%.*]] = getelementptr float, ptr [[INVARIANT_GEP8]], i32 [[I_012_CLONE]] +; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[GEP9_CLONE]], align 4 +; CHECK-NEXT: [[TMP26:%.*]] = tail call float @llvm.fmuladd.f32(float [[TMP24]], float [[TMP25]], float [[OFFSET]]) +; CHECK-NEXT: [[ADD3_CLONE]] = fadd float [[RESULT_011_CLONE]], [[TMP26]] +; CHECK-NEXT: [[INC_CLONE]] = add nsw i32 [[I_012_CLONE]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[END_INDEX]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[FOR_END]], label [[FOR_BODY_CLONE]] +; CHECK: for.end: +; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi float [ [[ADD3_CLONE]], [[FOR_BODY_CLONE]] ], [ [[RESULT0_0_LCSSA8]], [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ADD64:%.*]] = fadd float [[RESULT0_0_LCSSA1]], [[RESULT_0_LCSSA]] +; CHECK-NEXT: [[ADD65:%.*]] = fadd float [[RESULT0_0_LCSSA2]], [[RESULT0_0_LCSSA3]] +; CHECK-NEXT: [[ADD66:%.*]] = fadd float [[RESULT0_0_LCSSA4]], [[RESULT0_0_LCSSA5]] +; CHECK-NEXT: [[ADD67:%.*]] = fadd float [[RESULT0_0_LCSSA6]], [[RESULT0_0_LCSSA7]] +; CHECK-NEXT: [[ADD68:%.*]] = fadd float [[ADD65]], [[ADD64]] +; CHECK-NEXT: [[ADD69:%.*]] = fadd float [[ADD66]], [[ADD67]] +; CHECK-NEXT: [[ADD70:%.*]] = fadd float [[ADD69]], [[ADD68]] +; CHECK-NEXT: ret float [[ADD70]] ; entry: %invariant.gep = getelementptr float, ptr %data1, i32 %update1 diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprode.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprode.ll index 78ea995d2a297..9922d9aa34f77 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprode.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprode.ll @@ -1,41 +1,132 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_dotprode_f32_ansi(ptr nocapture noundef readonly %src1, ptr nocapture noundef readonly %src2, ptr nocapture noundef writeonly %dest, i32 noundef %len, i32 noundef %step1, i32 noundef %step2) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_dotprode_f32_ansi( -; CHECK-SAME: ptr nocapture noundef readonly [[SRC1:%.*]], ptr nocapture noundef readonly [[SRC2:%.*]], ptr nocapture noundef writeonly [[DEST:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias nocapture noundef readonly [[SRC1:%.*]], ptr noalias nocapture noundef readonly [[SRC2:%.*]], ptr noalias nocapture noundef writeonly [[DEST:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 -; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_PREHEADER1:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: -; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_CLONE:%.*]], label [[IF_END:%.*]] +; CHECK-NEXT: [[CMP47110:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP47110]], label [[FOR_BODY_CLONE:%.*]], label [[IF_END:%.*]] ; CHECK: if.end: -; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_PREHEADER]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ], [ [[TMP6:%.*]], [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_PREHEADER]] ], [ [[ADD44:%.*]], [[FOR_END37:%.*]] ], [ [[TMP31:%.*]], [[FOR_BODY_CLONE]] ] ; CHECK-NEXT: store float [[ACC_0_LCSSA]], ptr [[DEST]], align 4 ; CHECK-NEXT: ret i32 0 +; CHECK: for.cond.preheader1: +; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[LEN]], -7 +; CHECK-NEXT: [[CMP1113:%.*]] = icmp ugt i32 [[LEN]], 7 +; CHECK-NEXT: br i1 [[CMP1113]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND31_PREHEADER:%.*]] +; CHECK: for.body.preheader: +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[LEN]], 2147483640 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond31.preheader: +; CHECK-NEXT: [[ACC0_0_LCSSA:%.*]] = phi float [ [[TMP4:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC1_0_LCSSA:%.*]] = phi float [ [[TMP7:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC2_0_LCSSA:%.*]] = phi float [ [[TMP10:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC3_0_LCSSA:%.*]] = phi float [ [[TMP13:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC4_0_LCSSA:%.*]] = phi float [ [[TMP16:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC5_0_LCSSA:%.*]] = phi float [ [[TMP19:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC6_0_LCSSA:%.*]] = phi float [ [[TMP22:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[ACC7_0_LCSSA:%.*]] = phi float [ [[TMP25:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER1]] ] +; CHECK-NEXT: [[I_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_COND_PREHEADER1]] ], [ [[TMP1]], [[FOR_BODY]] ] +; CHECK-NEXT: [[CMP32132:%.*]] = icmp slt i32 [[I_0_LCSSA]], [[LEN]] +; CHECK-NEXT: br i1 [[CMP32132]], label [[FOR_BODY33:%.*]], label [[FOR_END37]] ; CHECK: for.body: -; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[ACC_09:%.*]] = phi float [ [[TMP3]], [[FOR_BODY]] ], [ 0.000000e+00, [[ENTRY]] ] -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_010]], [[STEP1]] +; CHECK-NEXT: [[I_0122:%.*]] = phi i32 [ [[ADD30:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC_09:%.*]] = phi float [ [[TMP4]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC1:%.*]] = phi float [ [[TMP7]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC2:%.*]] = phi float [ [[TMP10]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC3:%.*]] = phi float [ [[TMP13]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC4:%.*]] = phi float [ [[TMP16]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC5:%.*]] = phi float [ [[TMP19]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC6:%.*]] = phi float [ [[TMP22]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ACC7:%.*]] = phi float [ [[TMP25]], [[FOR_BODY]] ], [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_0122]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[MUL]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[MUL1:%.*]] = mul nsw i32 [[I_010]], [[STEP2]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL1:%.*]] = mul nsw i32 [[I_0122]], [[STEP2]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[MUL1]] -; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[TMP3]] = tail call float @llvm.fmuladd.f32(float [[TMP1]], float [[TMP2]], float [[ACC_09]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_010]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[LEN]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[TMP4]] = tail call float @llvm.fmuladd.f32(float [[TMP2]], float [[TMP3]], float [[ACC_09]]) +; CHECK-NEXT: [[ADD1:%.*]] = or disjoint i32 [[I_0122]], 1 +; CHECK-NEXT: [[ARRAYIDX1_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD1]] +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX1_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX1_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD1]] +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX1_1]], align 4 +; CHECK-NEXT: [[TMP7]] = tail call float @llvm.fmuladd.f32(float [[TMP5]], float [[TMP6]], float [[ACC1]]) +; CHECK-NEXT: [[ADD2:%.*]] = or disjoint i32 [[I_0122]], 2 +; CHECK-NEXT: [[ARRAYIDX2_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD2]] +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX2_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD2]] +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2_1]], align 4 +; CHECK-NEXT: [[TMP10]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP9]], float [[ACC2]]) +; CHECK-NEXT: [[ADD3:%.*]] = or disjoint i32 [[I_0122]], 3 +; CHECK-NEXT: [[ARRAYIDX3_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD3]] +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX3_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD3]] +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3_1]], align 4 +; CHECK-NEXT: [[TMP13]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[ACC3]]) +; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[I_0122]], 4 +; CHECK-NEXT: [[ARRAYIDX4_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD4]] +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX4_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD4]] +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX4_1]], align 4 +; CHECK-NEXT: [[TMP16]] = tail call float @llvm.fmuladd.f32(float [[TMP14]], float [[TMP15]], float [[ACC4]]) +; CHECK-NEXT: [[ADD5:%.*]] = or disjoint i32 [[I_0122]], 5 +; CHECK-NEXT: [[ARRAYIDX5_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD5]] +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX5_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX5_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD5]] +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX5_1]], align 4 +; CHECK-NEXT: [[TMP19]] = tail call float @llvm.fmuladd.f32(float [[TMP17]], float [[TMP18]], float [[ACC5]]) +; CHECK-NEXT: [[ADD6:%.*]] = or disjoint i32 [[I_0122]], 6 +; CHECK-NEXT: [[ARRAYIDX6_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD6]] +; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX6_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD6]] +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6_1]], align 4 +; CHECK-NEXT: [[TMP22]] = tail call float @llvm.fmuladd.f32(float [[TMP20]], float [[TMP21]], float [[ACC6]]) +; CHECK-NEXT: [[ADD7:%.*]] = or disjoint i32 [[I_0122]], 7 +; CHECK-NEXT: [[ARRAYIDX7_0:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[ADD7]] +; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX7_0]], align 4 +; CHECK-NEXT: [[ARRAYIDX7_1:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[ADD7]] +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX7_1]], align 4 +; CHECK-NEXT: [[TMP25]] = tail call float @llvm.fmuladd.f32(float [[TMP23]], float [[TMP24]], float [[ACC7]]) +; CHECK-NEXT: [[ADD30]] = add nuw nsw i32 [[I_0122]], 8 +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[ADD30]], [[SUB]] +; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY]], label [[FOR_COND31_PREHEADER]] +; CHECK: for.body33: +; CHECK-NEXT: [[I_01033:%.*]] = phi i32 [ [[INC33:%.*]], [[FOR_BODY33]] ], [ [[I_0_LCSSA]], [[FOR_COND31_PREHEADER]] ] +; CHECK-NEXT: [[ACC_0933:%.*]] = phi float [ [[TMP28:%.*]], [[FOR_BODY33]] ], [ [[ACC0_0_LCSSA]], [[FOR_COND31_PREHEADER]] ] +; CHECK-NEXT: [[MUL33:%.*]] = mul nsw i32 [[I_01033]], [[STEP1]] +; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[MUL33]] +; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: [[MUL133:%.*]] = mul nsw i32 [[I_01033]], [[STEP2]] +; CHECK-NEXT: [[ARRAYIDX233:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[MUL133]] +; CHECK-NEXT: [[TMP27:%.*]] = load float, ptr [[ARRAYIDX233]], align 4 +; CHECK-NEXT: [[TMP28]] = tail call float @llvm.fmuladd.f32(float [[TMP26]], float [[TMP27]], float [[ACC_0933]]) +; CHECK-NEXT: [[INC33]] = add nuw nsw i32 [[I_01033]], 1 +; CHECK-NEXT: [[EXITCOND_NOT33:%.*]] = icmp eq i32 [[INC33]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT33]], label [[FOR_END37]], label [[FOR_BODY33]] +; CHECK: for.end37: +; CHECK-NEXT: [[ACC0_1_LCSSA:%.*]] = phi float [ [[TMP28]], [[FOR_BODY33]] ], [ [[ACC0_0_LCSSA]], [[FOR_COND31_PREHEADER]] ] +; CHECK-NEXT: [[SUM01:%.*]] = fadd float [[ACC1_0_LCSSA]], [[ACC0_1_LCSSA]] +; CHECK-NEXT: [[SUM23:%.*]] = fadd float [[ACC2_0_LCSSA]], [[ACC3_0_LCSSA]] +; CHECK-NEXT: [[SUM45:%.*]] = fadd float [[ACC4_0_LCSSA]], [[ACC5_0_LCSSA]] +; CHECK-NEXT: [[SUM67:%.*]] = fadd float [[ACC6_0_LCSSA]], [[ACC7_0_LCSSA]] +; CHECK-NEXT: [[SUM0123:%.*]] = fadd float [[SUM23]], [[SUM01]] +; CHECK-NEXT: [[SUM4567:%.*]] = fadd float [[SUM45]], [[SUM67]] +; CHECK-NEXT: [[ADD44]] = fadd float [[SUM4567]], [[SUM0123]] +; CHECK-NEXT: br label [[IF_END]] ; CHECK: for.body.clone: ; CHECK-NEXT: [[I_010_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] -; CHECK-NEXT: [[ACC_09_CLONE:%.*]] = phi float [ [[TMP6]], [[FOR_BODY_CLONE]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER]] ] +; CHECK-NEXT: [[ACC_09_CLONE:%.*]] = phi float [ [[TMP31]], [[FOR_BODY_CLONE]] ], [ 0.000000e+00, [[FOR_COND_PREHEADER]] ] ; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_010_CLONE]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC1]], i32 [[MUL_CLONE]] -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 ; CHECK-NEXT: [[MUL1_CLONE:%.*]] = mul nsw i32 [[I_010_CLONE]], [[STEP2]] ; CHECK-NEXT: [[ARRAYIDX2_CLONE:%.*]] = getelementptr inbounds float, ptr [[SRC2]], i32 [[MUL1_CLONE]] -; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX2_CLONE]], align 4 -; CHECK-NEXT: [[TMP6]] = tail call float @llvm.fmuladd.f32(float [[TMP4]], float [[TMP5]], float [[ACC_09_CLONE]]) +; CHECK-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX2_CLONE]], align 4 +; CHECK-NEXT: [[TMP31]] = tail call float @llvm.fmuladd.f32(float [[TMP29]], float [[TMP30]], float [[ACC_09_CLONE]]) ; CHECK-NEXT: [[INC_CLONE]] = add nuw nsw i32 [[I_010_CLONE]], 1 ; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fir.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fir.ll index 6a8cb4868b7ea..61470c86fb215 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fir.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fir.ll @@ -1,28 +1,24 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s %struct.fir_f32_s = type { ptr, ptr, i32, i32, i32, i16 } define dso_local noundef i32 @dsps_fir_f32_ansi(ptr nocapture noundef %fir, ptr nocapture noundef readonly %input, ptr nocapture noundef writeonly %output, i32 noundef %len) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_fir_f32_ansi( -; CHECK-SAME: ptr nocapture noundef [[FIR:%.*]], ptr nocapture noundef readonly [[INPUT:%.*]], ptr nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias nocapture noundef [[FIR:%.*]], ptr noalias nocapture noundef readonly [[INPUT:%.*]], ptr noalias nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = icmp sgt i32 [[LEN]], 2 -; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_PREHEADER:%.*]], label [[FOR_BODY_LR_PH_CLONE:%.*]] +; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_PREHEADER:%.*]], label [[FOR_BODY_LR_PH_CLONE_PREHEADER:%.*]] ; CHECK: for.cond.preheader: -; CHECK-NEXT: [[CMP67:%.*]] = icmp sgt i32 [[LEN]], 0 -; CHECK-NEXT: br i1 [[CMP67]], label [[FOR_BODY_LR_PH:%.*]], label [[IF_END:%.*]] -; CHECK: for.body.lr.ph: ; CHECK-NEXT: [[DELAY:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S:%.*]], ptr [[FIR]], i32 0, i32 1 ; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DELAY]], align 4 ; CHECK-NEXT: [[POS:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 3 ; CHECK-NEXT: [[N:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 2 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4 +; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[TMP2]], -7 ; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[POS]], align 4 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: if.end: -; CHECK-NEXT: ret i32 0 ; CHECK: for.body: -; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[DOTPRE]], [[FOR_BODY_LR_PH]] ], [ [[SPEC_STORE_SELECT:%.*]], [[FOR_COND_CLEANUP21:%.*]] ] -; CHECK-NEXT: [[I_068:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC33:%.*]], [[FOR_COND_CLEANUP21]] ] +; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[DOTPRE]], [[FOR_COND_PREHEADER]] ], [ [[SPEC_STORE_SELECT:%.*]], [[FOR_END:%.*]] ] +; CHECK-NEXT: [[I_068:%.*]] = phi i32 [ 0, [[FOR_COND_PREHEADER]] ], [ [[INC33_MODIFY:%.*]], [[FOR_END]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_068]] ; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[TMP3]] @@ -31,119 +27,286 @@ define dso_local noundef i32 @dsps_fir_f32_ansi(ptr nocapture noundef %fir, ptr ; CHECK-NEXT: [[CMP4_NOT:%.*]] = icmp slt i32 [[INC]], [[TMP2]] ; CHECK-NEXT: [[SPEC_STORE_SELECT]] = select i1 [[CMP4_NOT]], i32 [[INC]], i32 0 ; CHECK-NEXT: store i32 [[SPEC_STORE_SELECT]], ptr [[POS]], align 4 -; CHECK-NEXT: [[CMP957:%.*]] = icmp slt i32 [[SPEC_STORE_SELECT]], [[TMP2]] -; CHECK-NEXT: br i1 [[CMP957]], label [[FOR_BODY11_LR_PH:%.*]], label [[FOR_COND18_PREHEADER:%.*]] +; CHECK-NEXT: [[CMP957:%.*]] = icmp slt i32 [[SPEC_STORE_SELECT]], [[SUB]] +; CHECK-NEXT: br i1 [[CMP957]], label [[FOR_BODY11_LR_PH_MODIFY:%.*]], label [[FOR_COND18_PREHEADER_MODIFY:%.*]] +; CHECK: for.cond18.preheader.modify: +; CHECK-NEXT: [[N_060_MODIFY_CLONE:%.*]] = phi i32 [ [[SPEC_STORE_SELECT]], [[FOR_BODY]] ], [ [[INC16_MODIFY:%.*]], [[FOR_BODY11_MODIFY:%.*]] ] +; CHECK-NEXT: [[COEFF_POS_059_MODIFY_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[INC12_MODIFY:%.*]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC_058_MODIFY_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP24:%.*]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP25:%.*]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC4_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP26:%.*]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC7_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP27:%.*]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC10_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP28:%.*]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC13_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP29:%.*]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC17_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP30:%.*]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC20_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP31:%.*]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[N_060_MODIFY_CLONE]], [[TMP2]] +; CHECK-NEXT: br i1 [[CMP_SLT]], label [[FOR_BODY11_LR_PH:%.*]], label [[FOR_COND18_PREHEADER:%.*]] ; CHECK: for.body11.lr.ph: ; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[FIR]], align 4 -; CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP2]], [[SPEC_STORE_SELECT]] +; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP2]], [[COEFF_POS_059_MODIFY_CLONE]] ; CHECK-NEXT: br label [[FOR_BODY11:%.*]] +; CHECK: for.body11.lr.ph.modify: +; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: br label [[FOR_BODY11_MODIFY]] +; CHECK: for.body11.modify: +; CHECK-NEXT: [[N_060_MODIFY:%.*]] = phi i32 [ [[SPEC_STORE_SELECT]], [[FOR_BODY11_LR_PH_MODIFY]] ], [ [[INC16_MODIFY]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[COEFF_POS_059_MODIFY:%.*]] = phi i32 [ 0, [[FOR_BODY11_LR_PH_MODIFY]] ], [ [[INC12_MODIFY]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC_058_MODIFY:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_MODIFY]] ], [ [[TMP24]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_MODIFY]] ], [ [[TMP25]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC4:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_MODIFY]] ], [ [[TMP26]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC7:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_MODIFY]] ], [ [[TMP27]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC10:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_MODIFY]] ], [ [[TMP28]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC13:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_MODIFY]] ], [ [[TMP29]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC17:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_MODIFY]] ], [ [[TMP30]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[ACC20:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_MODIFY]] ], [ [[TMP31]], [[FOR_BODY11_MODIFY]] ] +; CHECK-NEXT: [[INC12_MODIFY]] = add nuw nsw i32 [[COEFF_POS_059_MODIFY]], 8 +; CHECK-NEXT: [[INC16_MODIFY]] = add nsw i32 [[N_060_MODIFY]], 8 +; CHECK-NEXT: [[ADD7:%.*]] = or disjoint i32 [[COEFF_POS_059_MODIFY]], 7 +; CHECK-NEXT: [[ARRAYIDX13_MODIFY:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[COEFF_POS_059_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX15_MODIFY:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[N_060_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX13_MODIFY]], i32 1 +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX15_MODIFY]], i32 1 +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX13_MODIFY]], i32 2 +; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX15_MODIFY]], i32 2 +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX13_MODIFY]], i32 3 +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX15_MODIFY]], i32 3 +; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX13_MODIFY]], i32 4 +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX15_MODIFY]], i32 4 +; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX13_MODIFY]], i32 5 +; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX15_MODIFY]], i32 5 +; CHECK-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX13_MODIFY]], i32 6 +; CHECK-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX15_MODIFY]], i32 6 +; CHECK-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[ADD7]] +; CHECK-NEXT: [[ARRAYIDX22:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX15_MODIFY]], i32 7 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX13_MODIFY]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX15_MODIFY]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX8]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX11]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX14]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX16]], align 4 +; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX18]], align 4 +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX19]], align 4 +; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX21]], align 4 +; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX22]], align 4 +; CHECK-NEXT: [[TMP24]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP9]], float [[ACC_058_MODIFY]]) +; CHECK-NEXT: [[TMP25]] = tail call float @llvm.fmuladd.f32(float [[TMP10]], float [[TMP11]], float [[ACC]]) +; CHECK-NEXT: [[TMP26]] = tail call float @llvm.fmuladd.f32(float [[TMP12]], float [[TMP13]], float [[ACC4]]) +; CHECK-NEXT: [[TMP27]] = tail call float @llvm.fmuladd.f32(float [[TMP14]], float [[TMP15]], float [[ACC7]]) +; CHECK-NEXT: [[TMP28]] = tail call float @llvm.fmuladd.f32(float [[TMP16]], float [[TMP17]], float [[ACC10]]) +; CHECK-NEXT: [[TMP29]] = tail call float @llvm.fmuladd.f32(float [[TMP18]], float [[TMP19]], float [[ACC13]]) +; CHECK-NEXT: [[TMP30]] = tail call float @llvm.fmuladd.f32(float [[TMP20]], float [[TMP21]], float [[ACC17]]) +; CHECK-NEXT: [[TMP31]] = tail call float @llvm.fmuladd.f32(float [[TMP22]], float [[TMP23]], float [[ACC20]]) +; CHECK-NEXT: [[CMP11:%.*]] = icmp slt i32 [[INC16_MODIFY]], [[SUB]] +; CHECK-NEXT: br i1 [[CMP11]], label [[FOR_BODY11_MODIFY]], label [[FOR_COND18_PREHEADER_MODIFY]] +; CHECK: for.cond18.preheader.loopexit: +; CHECK-NEXT: [[TMP32:%.*]] = sub i32 [[TMP6]], [[N_060_MODIFY_CLONE]] +; CHECK-NEXT: br label [[FOR_COND18_PREHEADER]] ; CHECK: for.cond18.preheader: -; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY]] ], [ [[TMP10:%.*]], [[FOR_BODY11]] ] -; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_BODY]] ], [ [[TMP6]], [[FOR_BODY11]] ] -; CHECK-NEXT: [[CMP2062:%.*]] = icmp sgt i32 [[SPEC_STORE_SELECT]], 0 -; CHECK-NEXT: br i1 [[CMP2062]], label [[FOR_BODY22_LR_PH:%.*]], label [[FOR_COND_CLEANUP21]] +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ [[ACC_058_MODIFY_CLONE]], [[FOR_COND18_PREHEADER_MODIFY]] ], [ [[TMP37:%.*]], [[FOR_COND18_PREHEADER_LOOPEXIT:%.*]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ [[COEFF_POS_059_MODIFY_CLONE]], [[FOR_COND18_PREHEADER_MODIFY]] ], [ [[TMP32]], [[FOR_COND18_PREHEADER_LOOPEXIT]] ] +; CHECK-NEXT: [[TMP33:%.*]] = add nsw i32 [[SPEC_STORE_SELECT]], -7 +; CHECK-NEXT: [[CMP2062:%.*]] = icmp sgt i32 [[SPEC_STORE_SELECT]], 7 +; CHECK-NEXT: br i1 [[CMP2062]], label [[FOR_BODY22_LR_PH_MODIFY:%.*]], label [[FOR_COND_CLEANUP21:%.*]] ; CHECK: for.body22.lr.ph: -; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP34:%.*]] = load ptr, ptr [[FIR]], align 4 ; CHECK-NEXT: br label [[FOR_BODY22:%.*]] ; CHECK: for.body11: -; CHECK-NEXT: [[N_060:%.*]] = phi i32 [ [[SPEC_STORE_SELECT]], [[FOR_BODY11_LR_PH]] ], [ [[INC16:%.*]], [[FOR_BODY11]] ] -; CHECK-NEXT: [[COEFF_POS_059:%.*]] = phi i32 [ 0, [[FOR_BODY11_LR_PH]] ], [ [[INC12:%.*]], [[FOR_BODY11]] ] -; CHECK-NEXT: [[ACC_058:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH]] ], [ [[TMP10]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[N_060:%.*]] = phi i32 [ [[N_060_MODIFY_CLONE]], [[FOR_BODY11_LR_PH]] ], [ [[INC16:%.*]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[COEFF_POS_059:%.*]] = phi i32 [ [[COEFF_POS_059_MODIFY_CLONE]], [[FOR_BODY11_LR_PH]] ], [ [[INC12:%.*]], [[FOR_BODY11]] ] +; CHECK-NEXT: [[ACC_058:%.*]] = phi float [ [[ACC_058_MODIFY_CLONE]], [[FOR_BODY11_LR_PH]] ], [ [[TMP37]], [[FOR_BODY11]] ] ; CHECK-NEXT: [[INC12]] = add nuw i32 [[COEFF_POS_059]], 1 ; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i32 [[COEFF_POS_059]] -; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX13]], align 4 +; CHECK-NEXT: [[TMP35:%.*]] = load float, ptr [[ARRAYIDX13]], align 4 ; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[N_060]] -; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 -; CHECK-NEXT: [[TMP10]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP9]], float [[ACC_058]]) +; CHECK-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP37]] = tail call float @llvm.fmuladd.f32(float [[TMP35]], float [[TMP36]], float [[ACC_058]]) ; CHECK-NEXT: [[INC16]] = add nsw i32 [[N_060]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC12]], [[TMP6]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND18_PREHEADER]], label [[FOR_BODY11]] +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC16]], [[TMP2]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND18_PREHEADER_LOOPEXIT]], label [[FOR_BODY11]] ; CHECK: for.cond.cleanup21: -; CHECK-NEXT: [[ACC_1_LCSSA:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND18_PREHEADER]] ], [ [[TMP13:%.*]], [[FOR_BODY22]] ] -; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_068]] -; CHECK-NEXT: store float [[ACC_1_LCSSA]], ptr [[ARRAYIDX31]], align 4 -; CHECK-NEXT: [[INC33]] = add nuw nsw i32 [[I_068]], 1 -; CHECK-NEXT: [[EXITCOND71_NOT:%.*]] = icmp eq i32 [[INC33]], [[LEN]] -; CHECK-NEXT: br i1 [[EXITCOND71_NOT]], label [[IF_END]], label [[FOR_BODY]] +; CHECK-NEXT: [[N17_065_MODIFY_CLONE:%.*]] = phi i32 [ 0, [[FOR_COND18_PREHEADER]] ], [ [[TMP39:%.*]], [[FOR_BODY22_MODIFY:%.*]] ] +; CHECK-NEXT: [[COEFF_POS_164_MODIFY_CLONE:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA]], [[FOR_COND18_PREHEADER]] ], [ [[INC24_MODIFY:%.*]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC_163_MODIFY_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND18_PREHEADER]] ], [ [[TMP56:%.*]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC23_CLONE:%.*]] = phi float [ [[ACC_CLONE]], [[FOR_COND18_PREHEADER]] ], [ [[TMP57:%.*]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC27_CLONE:%.*]] = phi float [ [[ACC4_CLONE]], [[FOR_COND18_PREHEADER]] ], [ [[TMP58:%.*]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC30_CLONE:%.*]] = phi float [ [[ACC7_CLONE]], [[FOR_COND18_PREHEADER]] ], [ [[TMP59:%.*]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC34_CLONE:%.*]] = phi float [ [[ACC10_CLONE]], [[FOR_COND18_PREHEADER]] ], [ [[TMP60:%.*]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC37_CLONE:%.*]] = phi float [ [[ACC13_CLONE]], [[FOR_COND18_PREHEADER]] ], [ [[TMP61:%.*]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC40_CLONE:%.*]] = phi float [ [[ACC17_CLONE]], [[FOR_COND18_PREHEADER]] ], [ [[TMP62:%.*]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC44_CLONE:%.*]] = phi float [ [[ACC20_CLONE]], [[FOR_COND18_PREHEADER]] ], [ [[TMP63:%.*]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[CMP47:%.*]] = icmp slt i32 [[N17_065_MODIFY_CLONE]], [[SPEC_STORE_SELECT]] +; CHECK-NEXT: br i1 [[CMP47]], label [[FOR_BODY22_LR_PH:%.*]], label [[FOR_END]] +; CHECK: for.body22.lr.ph.modify: +; CHECK-NEXT: [[TMP38:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP39]] = and i32 [[SPEC_STORE_SELECT]], 2147483640 +; CHECK-NEXT: br label [[FOR_BODY22_MODIFY]] +; CHECK: for.body22.modify: +; CHECK-NEXT: [[N17_065_MODIFY:%.*]] = phi i32 [ 0, [[FOR_BODY22_LR_PH_MODIFY]] ], [ [[INC29_MODIFY:%.*]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[COEFF_POS_164_MODIFY:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA]], [[FOR_BODY22_LR_PH_MODIFY]] ], [ [[INC24_MODIFY]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC_163_MODIFY:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_BODY22_LR_PH_MODIFY]] ], [ [[TMP56]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC23:%.*]] = phi float [ [[ACC_CLONE]], [[FOR_BODY22_LR_PH_MODIFY]] ], [ [[TMP57]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC27:%.*]] = phi float [ [[ACC4_CLONE]], [[FOR_BODY22_LR_PH_MODIFY]] ], [ [[TMP58]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC30:%.*]] = phi float [ [[ACC7_CLONE]], [[FOR_BODY22_LR_PH_MODIFY]] ], [ [[TMP59]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC34:%.*]] = phi float [ [[ACC10_CLONE]], [[FOR_BODY22_LR_PH_MODIFY]] ], [ [[TMP60]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC37:%.*]] = phi float [ [[ACC13_CLONE]], [[FOR_BODY22_LR_PH_MODIFY]] ], [ [[TMP61]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC40:%.*]] = phi float [ [[ACC17_CLONE]], [[FOR_BODY22_LR_PH_MODIFY]] ], [ [[TMP62]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[ACC44:%.*]] = phi float [ [[ACC20_CLONE]], [[FOR_BODY22_LR_PH_MODIFY]] ], [ [[TMP63]], [[FOR_BODY22_MODIFY]] ] +; CHECK-NEXT: [[INC24_MODIFY]] = add nuw nsw i32 [[COEFF_POS_164_MODIFY]], 8 +; CHECK-NEXT: [[INC29_MODIFY]] = add nuw nsw i32 [[N17_065_MODIFY]], 8 +; CHECK-NEXT: [[ADD1:%.*]] = or disjoint i32 [[N17_065_MODIFY]], 1 +; CHECK-NEXT: [[ADD2:%.*]] = or disjoint i32 [[N17_065_MODIFY]], 2 +; CHECK-NEXT: [[ADD3:%.*]] = or disjoint i32 [[N17_065_MODIFY]], 3 +; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[N17_065_MODIFY]], 4 +; CHECK-NEXT: [[ADD5:%.*]] = or disjoint i32 [[N17_065_MODIFY]], 5 +; CHECK-NEXT: [[ADD6:%.*]] = or disjoint i32 [[N17_065_MODIFY]], 6 +; CHECK-NEXT: [[ADD743:%.*]] = or disjoint i32 [[N17_065_MODIFY]], 7 +; CHECK-NEXT: [[ARRAYIDX25_MODIFY:%.*]] = getelementptr inbounds float, ptr [[TMP38]], i32 [[COEFF_POS_164_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX27_MODIFY:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[N17_065_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX25_MODIFY]], i32 1 +; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[ADD1]] +; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX25_MODIFY]], i32 2 +; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[ADD2]] +; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX25_MODIFY]], i32 3 +; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[ADD3]] +; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX25_MODIFY]], i32 4 +; CHECK-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[ADD4]] +; CHECK-NEXT: [[ARRAYIDX38:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX25_MODIFY]], i32 5 +; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[ADD5]] +; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX25_MODIFY]], i32 6 +; CHECK-NEXT: [[ARRAYIDX42:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[ADD6]] +; CHECK-NEXT: [[ARRAYIDX45:%.*]] = getelementptr inbounds float, ptr [[ARRAYIDX25_MODIFY]], i32 7 +; CHECK-NEXT: [[ARRAYIDX46:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[ADD743]] +; CHECK-NEXT: [[TMP40:%.*]] = load float, ptr [[ARRAYIDX25_MODIFY]], align 4 +; CHECK-NEXT: [[TMP41:%.*]] = load float, ptr [[ARRAYIDX27_MODIFY]], align 4 +; CHECK-NEXT: [[TMP42:%.*]] = load float, ptr [[ARRAYIDX24]], align 4 +; CHECK-NEXT: [[TMP43:%.*]] = load float, ptr [[ARRAYIDX26]], align 4 +; CHECK-NEXT: [[TMP44:%.*]] = load float, ptr [[ARRAYIDX28]], align 4 +; CHECK-NEXT: [[TMP45:%.*]] = load float, ptr [[ARRAYIDX29]], align 4 +; CHECK-NEXT: [[TMP46:%.*]] = load float, ptr [[ARRAYIDX32]], align 4 +; CHECK-NEXT: [[TMP47:%.*]] = load float, ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: [[TMP48:%.*]] = load float, ptr [[ARRAYIDX35]], align 4 +; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[ARRAYIDX36]], align 4 +; CHECK-NEXT: [[TMP50:%.*]] = load float, ptr [[ARRAYIDX38]], align 4 +; CHECK-NEXT: [[TMP51:%.*]] = load float, ptr [[ARRAYIDX39]], align 4 +; CHECK-NEXT: [[TMP52:%.*]] = load float, ptr [[ARRAYIDX41]], align 4 +; CHECK-NEXT: [[TMP53:%.*]] = load float, ptr [[ARRAYIDX42]], align 4 +; CHECK-NEXT: [[TMP54:%.*]] = load float, ptr [[ARRAYIDX45]], align 4 +; CHECK-NEXT: [[TMP55:%.*]] = load float, ptr [[ARRAYIDX46]], align 4 +; CHECK-NEXT: [[TMP56]] = tail call float @llvm.fmuladd.f32(float [[TMP40]], float [[TMP41]], float [[ACC_163_MODIFY]]) +; CHECK-NEXT: [[TMP57]] = tail call float @llvm.fmuladd.f32(float [[TMP42]], float [[TMP43]], float [[ACC23]]) +; CHECK-NEXT: [[TMP58]] = tail call float @llvm.fmuladd.f32(float [[TMP44]], float [[TMP45]], float [[ACC27]]) +; CHECK-NEXT: [[TMP59]] = tail call float @llvm.fmuladd.f32(float [[TMP46]], float [[TMP47]], float [[ACC30]]) +; CHECK-NEXT: [[TMP60]] = tail call float @llvm.fmuladd.f32(float [[TMP48]], float [[TMP49]], float [[ACC34]]) +; CHECK-NEXT: [[TMP61]] = tail call float @llvm.fmuladd.f32(float [[TMP50]], float [[TMP51]], float [[ACC37]]) +; CHECK-NEXT: [[TMP62]] = tail call float @llvm.fmuladd.f32(float [[TMP52]], float [[TMP53]], float [[ACC40]]) +; CHECK-NEXT: [[TMP63]] = tail call float @llvm.fmuladd.f32(float [[TMP54]], float [[TMP55]], float [[ACC44]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC29_MODIFY]], [[TMP33]] +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY22_MODIFY]], label [[FOR_COND_CLEANUP21]] ; CHECK: for.body22: -; CHECK-NEXT: [[N17_065:%.*]] = phi i32 [ 0, [[FOR_BODY22_LR_PH]] ], [ [[INC29:%.*]], [[FOR_BODY22]] ] -; CHECK-NEXT: [[COEFF_POS_164:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA]], [[FOR_BODY22_LR_PH]] ], [ [[INC24:%.*]], [[FOR_BODY22]] ] -; CHECK-NEXT: [[ACC_163:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_BODY22_LR_PH]] ], [ [[TMP13]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[N17_065:%.*]] = phi i32 [ [[N17_065_MODIFY_CLONE]], [[FOR_BODY22_LR_PH]] ], [ [[INC29:%.*]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[COEFF_POS_164:%.*]] = phi i32 [ [[COEFF_POS_164_MODIFY_CLONE]], [[FOR_BODY22_LR_PH]] ], [ [[INC24:%.*]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[ACC_163:%.*]] = phi float [ [[ACC_163_MODIFY_CLONE]], [[FOR_BODY22_LR_PH]] ], [ [[TMP66:%.*]], [[FOR_BODY22]] ] ; CHECK-NEXT: [[INC24]] = add nuw nsw i32 [[COEFF_POS_164]], 1 -; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[COEFF_POS_164]] -; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX25]], align 4 +; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[TMP34]], i32 [[COEFF_POS_164]] +; CHECK-NEXT: [[TMP64:%.*]] = load float, ptr [[ARRAYIDX25]], align 4 ; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 [[N17_065]] -; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX27]], align 4 -; CHECK-NEXT: [[TMP13]] = tail call float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[ACC_163]]) +; CHECK-NEXT: [[TMP65:%.*]] = load float, ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: [[TMP66]] = tail call float @llvm.fmuladd.f32(float [[TMP64]], float [[TMP65]], float [[ACC_163]]) ; CHECK-NEXT: [[INC29]] = add nuw nsw i32 [[N17_065]], 1 ; CHECK-NEXT: [[EXITCOND70_NOT:%.*]] = icmp eq i32 [[INC29]], [[SPEC_STORE_SELECT]] -; CHECK-NEXT: br i1 [[EXITCOND70_NOT]], label [[FOR_COND_CLEANUP21]], label [[FOR_BODY22]] +; CHECK-NEXT: br i1 [[EXITCOND70_NOT]], label [[FOR_END]], label [[FOR_BODY22]] +; CHECK: for.end: +; CHECK-NEXT: [[TMP67:%.*]] = phi float [ [[ACC_163_MODIFY_CLONE]], [[FOR_COND_CLEANUP21]] ], [ [[TMP66]], [[FOR_BODY22]] ] +; CHECK-NEXT: [[ADD139:%.*]] = fadd float [[TMP67]], [[ACC23_CLONE]] +; CHECK-NEXT: [[ADD140:%.*]] = fadd float [[ACC27_CLONE]], [[ACC30_CLONE]] +; CHECK-NEXT: [[ADD141:%.*]] = fadd float [[ACC34_CLONE]], [[ACC37_CLONE]] +; CHECK-NEXT: [[ADD142:%.*]] = fadd float [[ACC40_CLONE]], [[ACC44_CLONE]] +; CHECK-NEXT: [[ADD143:%.*]] = fadd float [[ADD139]], [[ADD140]] +; CHECK-NEXT: [[ADD144:%.*]] = fadd float [[ADD141]], [[ADD142]] +; CHECK-NEXT: [[ADD145:%.*]] = fadd float [[ADD143]], [[ADD144]] +; CHECK-NEXT: [[ARRAYIDX31_MODIFY:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_068]] +; CHECK-NEXT: store float [[ADD145]], ptr [[ARRAYIDX31_MODIFY]], align 4 +; CHECK-NEXT: [[INC33_MODIFY]] = add nuw nsw i32 [[I_068]], 1 +; CHECK-NEXT: [[EXITCOND71_NOT_MODIFY:%.*]] = icmp eq i32 [[INC33_MODIFY]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND71_NOT_MODIFY]], label [[IF_END:%.*]], label [[FOR_BODY]] +; CHECK: for.body.lr.ph.clone.preheader: +; CHECK-NEXT: [[CMP151349:%.*]] = icmp sgt i32 [[LEN]], 0 +; CHECK-NEXT: br i1 [[CMP151349]], label [[FOR_BODY_LR_PH_CLONE:%.*]], label [[IF_END]] ; CHECK: for.body.lr.ph.clone: ; CHECK-NEXT: [[DELAY_CLONE:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 1 -; CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DELAY_CLONE]], align 4 +; CHECK-NEXT: [[TMP68:%.*]] = load ptr, ptr [[DELAY_CLONE]], align 4 ; CHECK-NEXT: [[POS_CLONE:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 3 ; CHECK-NEXT: [[N_CLONE:%.*]] = getelementptr inbounds [[STRUCT_FIR_F32_S]], ptr [[FIR]], i32 0, i32 2 -; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[N_CLONE]], align 4 +; CHECK-NEXT: [[TMP69:%.*]] = load i32, ptr [[N_CLONE]], align 4 ; CHECK-NEXT: [[DOTPRE_CLONE:%.*]] = load i32, ptr [[POS_CLONE]], align 4 ; CHECK-NEXT: br label [[FOR_BODY_CLONE:%.*]] ; CHECK: for.body.clone: -; CHECK-NEXT: [[TMP16:%.*]] = phi i32 [ [[DOTPRE_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[SPEC_STORE_SELECT_CLONE:%.*]], [[FOR_COND_CLEANUP21_CLONE:%.*]] ] +; CHECK-NEXT: [[TMP70:%.*]] = phi i32 [ [[DOTPRE_CLONE]], [[FOR_BODY_LR_PH_CLONE]] ], [ [[SPEC_STORE_SELECT_CLONE:%.*]], [[FOR_COND_CLEANUP21_CLONE:%.*]] ] ; CHECK-NEXT: [[I_068_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH_CLONE]] ], [ [[INC33_CLONE:%.*]], [[FOR_COND_CLEANUP21_CLONE]] ] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_068_CLONE]] -; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 -; CHECK-NEXT: [[ARRAYIDX1_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP16]] -; CHECK-NEXT: store float [[TMP17]], ptr [[ARRAYIDX1_CLONE]], align 4 -; CHECK-NEXT: [[INC_CLONE:%.*]] = add nsw i32 [[TMP16]], 1 -; CHECK-NEXT: [[CMP4_NOT_CLONE:%.*]] = icmp slt i32 [[INC_CLONE]], [[TMP15]] +; CHECK-NEXT: [[TMP71:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX1_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 [[TMP70]] +; CHECK-NEXT: store float [[TMP71]], ptr [[ARRAYIDX1_CLONE]], align 4 +; CHECK-NEXT: [[INC_CLONE:%.*]] = add nsw i32 [[TMP70]], 1 +; CHECK-NEXT: [[CMP4_NOT_CLONE:%.*]] = icmp slt i32 [[INC_CLONE]], [[TMP69]] ; CHECK-NEXT: [[SPEC_STORE_SELECT_CLONE]] = select i1 [[CMP4_NOT_CLONE]], i32 [[INC_CLONE]], i32 0 ; CHECK-NEXT: store i32 [[SPEC_STORE_SELECT_CLONE]], ptr [[POS_CLONE]], align 4 -; CHECK-NEXT: [[CMP957_CLONE:%.*]] = icmp slt i32 [[SPEC_STORE_SELECT_CLONE]], [[TMP15]] +; CHECK-NEXT: [[CMP957_CLONE:%.*]] = icmp slt i32 [[SPEC_STORE_SELECT_CLONE]], [[TMP69]] ; CHECK-NEXT: br i1 [[CMP957_CLONE]], label [[FOR_BODY11_LR_PH_CLONE:%.*]], label [[FOR_COND18_PREHEADER_CLONE:%.*]] ; CHECK: for.body11.lr.ph.clone: -; CHECK-NEXT: [[TMP18:%.*]] = load ptr, ptr [[FIR]], align 4 -; CHECK-NEXT: [[TMP19:%.*]] = sub i32 [[TMP15]], [[SPEC_STORE_SELECT_CLONE]] +; CHECK-NEXT: [[TMP72:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP73:%.*]] = sub i32 [[TMP69]], [[SPEC_STORE_SELECT_CLONE]] ; CHECK-NEXT: br label [[FOR_BODY11_CLONE:%.*]] ; CHECK: for.body11.clone: ; CHECK-NEXT: [[N_060_CLONE:%.*]] = phi i32 [ [[SPEC_STORE_SELECT_CLONE]], [[FOR_BODY11_LR_PH_CLONE]] ], [ [[INC16_CLONE:%.*]], [[FOR_BODY11_CLONE]] ] ; CHECK-NEXT: [[COEFF_POS_059_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY11_LR_PH_CLONE]] ], [ [[INC12_CLONE:%.*]], [[FOR_BODY11_CLONE]] ] -; CHECK-NEXT: [[ACC_058_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_CLONE]] ], [ [[TMP22:%.*]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[ACC_058_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY11_LR_PH_CLONE]] ], [ [[TMP76:%.*]], [[FOR_BODY11_CLONE]] ] ; CHECK-NEXT: [[INC12_CLONE]] = add nuw i32 [[COEFF_POS_059_CLONE]], 1 -; CHECK-NEXT: [[ARRAYIDX13_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[COEFF_POS_059_CLONE]] -; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX13_CLONE]], align 4 -; CHECK-NEXT: [[ARRAYIDX15_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[N_060_CLONE]] -; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX15_CLONE]], align 4 -; CHECK-NEXT: [[TMP22]] = tail call float @llvm.fmuladd.f32(float [[TMP20]], float [[TMP21]], float [[ACC_058_CLONE]]) +; CHECK-NEXT: [[ARRAYIDX13_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP72]], i32 [[COEFF_POS_059_CLONE]] +; CHECK-NEXT: [[TMP74:%.*]] = load float, ptr [[ARRAYIDX13_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX15_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 [[N_060_CLONE]] +; CHECK-NEXT: [[TMP75:%.*]] = load float, ptr [[ARRAYIDX15_CLONE]], align 4 +; CHECK-NEXT: [[TMP76]] = tail call float @llvm.fmuladd.f32(float [[TMP74]], float [[TMP75]], float [[ACC_058_CLONE]]) ; CHECK-NEXT: [[INC16_CLONE]] = add nsw i32 [[N_060_CLONE]], 1 -; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC12_CLONE]], [[TMP19]] +; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC12_CLONE]], [[TMP73]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[FOR_COND18_PREHEADER_CLONE]], label [[FOR_BODY11_CLONE]] ; CHECK: for.cond18.preheader.clone: -; CHECK-NEXT: [[ACC_0_LCSSA_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_CLONE]] ], [ [[TMP22]], [[FOR_BODY11_CLONE]] ] -; CHECK-NEXT: [[COEFF_POS_0_LCSSA_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY_CLONE]] ], [ [[TMP19]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[ACC_0_LCSSA_CLONE:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_CLONE]] ], [ [[TMP76]], [[FOR_BODY11_CLONE]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY_CLONE]] ], [ [[TMP73]], [[FOR_BODY11_CLONE]] ] ; CHECK-NEXT: [[CMP2062_CLONE:%.*]] = icmp sgt i32 [[SPEC_STORE_SELECT_CLONE]], 0 ; CHECK-NEXT: br i1 [[CMP2062_CLONE]], label [[FOR_BODY22_LR_PH_CLONE:%.*]], label [[FOR_COND_CLEANUP21_CLONE]] ; CHECK: for.body22.lr.ph.clone: -; CHECK-NEXT: [[TMP23:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP77:%.*]] = load ptr, ptr [[FIR]], align 4 ; CHECK-NEXT: br label [[FOR_BODY22_CLONE:%.*]] ; CHECK: for.body22.clone: ; CHECK-NEXT: [[N17_065_CLONE:%.*]] = phi i32 [ 0, [[FOR_BODY22_LR_PH_CLONE]] ], [ [[INC29_CLONE:%.*]], [[FOR_BODY22_CLONE]] ] ; CHECK-NEXT: [[COEFF_POS_164_CLONE:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA_CLONE]], [[FOR_BODY22_LR_PH_CLONE]] ], [ [[INC24_CLONE:%.*]], [[FOR_BODY22_CLONE]] ] -; CHECK-NEXT: [[ACC_163_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_BODY22_LR_PH_CLONE]] ], [ [[TMP26:%.*]], [[FOR_BODY22_CLONE]] ] +; CHECK-NEXT: [[ACC_163_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_BODY22_LR_PH_CLONE]] ], [ [[TMP80:%.*]], [[FOR_BODY22_CLONE]] ] ; CHECK-NEXT: [[INC24_CLONE]] = add nuw nsw i32 [[COEFF_POS_164_CLONE]], 1 -; CHECK-NEXT: [[ARRAYIDX25_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[COEFF_POS_164_CLONE]] -; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX25_CLONE]], align 4 -; CHECK-NEXT: [[ARRAYIDX27_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[N17_065_CLONE]] -; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX27_CLONE]], align 4 -; CHECK-NEXT: [[TMP26]] = tail call float @llvm.fmuladd.f32(float [[TMP24]], float [[TMP25]], float [[ACC_163_CLONE]]) +; CHECK-NEXT: [[ARRAYIDX25_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP77]], i32 [[COEFF_POS_164_CLONE]] +; CHECK-NEXT: [[TMP78:%.*]] = load float, ptr [[ARRAYIDX25_CLONE]], align 4 +; CHECK-NEXT: [[ARRAYIDX27_CLONE:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 [[N17_065_CLONE]] +; CHECK-NEXT: [[TMP79:%.*]] = load float, ptr [[ARRAYIDX27_CLONE]], align 4 +; CHECK-NEXT: [[TMP80]] = tail call float @llvm.fmuladd.f32(float [[TMP78]], float [[TMP79]], float [[ACC_163_CLONE]]) ; CHECK-NEXT: [[INC29_CLONE]] = add nuw nsw i32 [[N17_065_CLONE]], 1 ; CHECK-NEXT: [[EXITCOND70_NOT_CLONE:%.*]] = icmp eq i32 [[INC29_CLONE]], [[SPEC_STORE_SELECT_CLONE]] ; CHECK-NEXT: br i1 [[EXITCOND70_NOT_CLONE]], label [[FOR_COND_CLEANUP21_CLONE]], label [[FOR_BODY22_CLONE]] ; CHECK: for.cond.cleanup21.clone: -; CHECK-NEXT: [[ACC_1_LCSSA_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_COND18_PREHEADER_CLONE]] ], [ [[TMP26]], [[FOR_BODY22_CLONE]] ] +; CHECK-NEXT: [[ACC_1_LCSSA_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_COND18_PREHEADER_CLONE]] ], [ [[TMP80]], [[FOR_BODY22_CLONE]] ] ; CHECK-NEXT: [[ARRAYIDX31_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_068_CLONE]] ; CHECK-NEXT: store float [[ACC_1_LCSSA_CLONE]], ptr [[ARRAYIDX31_CLONE]], align 4 ; CHECK-NEXT: [[INC33_CLONE]] = add nuw nsw i32 [[I_068_CLONE]], 1 ; CHECK-NEXT: [[EXITCOND71_NOT_CLONE:%.*]] = icmp eq i32 [[INC33_CLONE]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND71_NOT_CLONE]], label [[IF_END]], label [[FOR_BODY_CLONE]] +; CHECK: if.end: +; CHECK-NEXT: ret i32 0 ; entry: %0 = icmp sgt i32 %len, 2 diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll index 875710cf61b86..e7a15e8558512 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s %struct.fir_f32_s = type { ptr, ptr, i32, i32, i32, i16 } define dso_local noundef i32 @dsps_fird_f32_ansi(ptr nocapture noundef %fir, ptr nocapture noundef readonly %input, ptr nocapture noundef writeonly %output, i32 noundef %len) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_fird_f32_ansi( -; CHECK-SAME: ptr nocapture noundef [[FIR:%.*]], ptr nocapture noundef readonly [[INPUT:%.*]], ptr nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias nocapture noundef [[FIR:%.*]], ptr noalias nocapture noundef readonly [[INPUT:%.*]], ptr noalias nocapture noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP77:%.*]] = icmp sgt i32 [[LEN]], 0 ; CHECK-NEXT: br i1 [[CMP77]], label [[FOR_COND1_PREHEADER_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]] @@ -18,32 +18,57 @@ define dso_local noundef i32 @dsps_fird_f32_ansi(ptr nocapture noundef %fir, ptr ; CHECK-NEXT: [[POS9_PROMOTED:%.*]] = load i32, ptr [[POS]], align 4 ; CHECK-NEXT: br label [[FOR_COND1_PREHEADER:%.*]] ; CHECK: for.cond1.preheader: -; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[POS9_PROMOTED]], [[FOR_COND1_PREHEADER_LR_PH]] ], [ [[TMP4:%.*]], [[FOR_COND_CLEANUP26:%.*]] ] -; CHECK-NEXT: [[I_080:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_LR_PH]] ], [ [[INC39:%.*]], [[FOR_COND_CLEANUP26]] ] -; CHECK-NEXT: [[INPUT_ADDR_078:%.*]] = phi ptr [ [[INPUT]], [[FOR_COND1_PREHEADER_LR_PH]] ], [ [[INPUT_ADDR_1_LCSSA:%.*]], [[FOR_COND_CLEANUP26]] ] +; CHECK-NEXT: [[TMP2:%.*]] = phi i32 [ [[POS9_PROMOTED]], [[FOR_COND1_PREHEADER_LR_PH]] ], [ [[TMP4:%.*]], [[FOR_END141:%.*]] ] +; CHECK-NEXT: [[I_080:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_LR_PH]] ], [ [[INC152:%.*]], [[FOR_END141]] ] +; CHECK-NEXT: [[INPUT_ADDR_078:%.*]] = phi ptr [ [[INPUT]], [[FOR_COND1_PREHEADER_LR_PH]] ], [ [[INPUT_ADDR_1_LCSSA:%.*]], [[FOR_END141]] ] ; CHECK-NEXT: br i1 [[CMP263]], label [[FOR_BODY4_LR_PH:%.*]], label [[FOR_COND_CLEANUP3:%.*]] ; CHECK: for.body4.lr.ph: ; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DELAY]], align 4 ; CHECK-NEXT: br label [[FOR_BODY4:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LEN]], [[FOR_COND_CLEANUP26]] ] +; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LEN]], [[FOR_END141]] ] ; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]] ; CHECK: for.cond1.for.cond.cleanup3_crit_edge: -; CHECK-NEXT: store i32 [[SPEC_SELECT:%.*]], ptr [[POS]], align 4 +; CHECK-NEXT: [[INCDEC_PTR_LCSSA:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY4]] ] +; CHECK-NEXT: [[SPEC_SELECT_LCSSA:%.*]] = phi i32 [ [[SPEC_SELECT:%.*]], [[FOR_BODY4]] ] +; CHECK-NEXT: store i32 [[SPEC_SELECT_LCSSA]], ptr [[POS]], align 4 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP3]] ; CHECK: for.cond.cleanup3: -; CHECK-NEXT: [[TMP4]] = phi i32 [ [[SPEC_SELECT]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE:%.*]] ], [ [[TMP2]], [[FOR_COND1_PREHEADER]] ] -; CHECK-NEXT: [[INPUT_ADDR_1_LCSSA]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE]] ], [ [[INPUT_ADDR_078]], [[FOR_COND1_PREHEADER]] ] -; CHECK-NEXT: [[CMP1266:%.*]] = icmp slt i32 [[TMP4]], [[TMP1]] -; CHECK-NEXT: br i1 [[CMP1266]], label [[FOR_BODY14_LR_PH:%.*]], label [[FOR_COND23_PREHEADER:%.*]] +; CHECK-NEXT: [[TMP4]] = phi i32 [ [[SPEC_SELECT_LCSSA]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE:%.*]] ], [ [[TMP2]], [[FOR_COND1_PREHEADER]] ] +; CHECK-NEXT: [[INPUT_ADDR_1_LCSSA]] = phi ptr [ [[INCDEC_PTR_LCSSA]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE]] ], [ [[INPUT_ADDR_078]], [[FOR_COND1_PREHEADER]] ] +; CHECK-NEXT: [[ADD269:%.*]] = add nsw i32 [[TMP4]], 8 +; CHECK-NEXT: [[CMP1266:%.*]] = icmp sgt i32 [[ADD269]], [[TMP1]] +; CHECK-NEXT: br i1 [[CMP1266]], label [[FOR_COND63_PREHEADER:%.*]], label [[FOR_BODY14_LR_PH:%.*]] ; CHECK: for.body14.lr.ph: ; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[FIR]], align 4 ; CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DELAY]], align 4 -; CHECK-NEXT: [[TMP7:%.*]] = sub i32 [[TMP1]], [[TMP4]] -; CHECK-NEXT: br label [[FOR_BODY14:%.*]] +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[TMP6]], i32 28 +; CHECK-NEXT: [[TMP7:%.*]] = shl i32 [[TMP4]], 2 +; CHECK-NEXT: [[SCEVGEP101:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP105:%.*]] = getelementptr i8, ptr [[TMP6]], i32 24 +; CHECK-NEXT: [[SCEVGEP106:%.*]] = getelementptr i8, ptr [[SCEVGEP105]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP108:%.*]] = getelementptr i8, ptr [[TMP6]], i32 20 +; CHECK-NEXT: [[SCEVGEP109:%.*]] = getelementptr i8, ptr [[SCEVGEP108]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP111:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-NEXT: [[SCEVGEP112:%.*]] = getelementptr i8, ptr [[SCEVGEP111]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP114:%.*]] = getelementptr i8, ptr [[TMP6]], i32 12 +; CHECK-NEXT: [[SCEVGEP115:%.*]] = getelementptr i8, ptr [[SCEVGEP114]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP117:%.*]] = getelementptr i8, ptr [[TMP6]], i32 8 +; CHECK-NEXT: [[SCEVGEP118:%.*]] = getelementptr i8, ptr [[SCEVGEP117]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP120:%.*]] = getelementptr i8, ptr [[TMP6]], i32 4 +; CHECK-NEXT: [[SCEVGEP121:%.*]] = getelementptr i8, ptr [[SCEVGEP120]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP123:%.*]] = getelementptr i8, ptr [[TMP6]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP127:%.*]] = getelementptr i8, ptr [[TMP5]], i32 28 +; CHECK-NEXT: [[SCEVGEP129:%.*]] = getelementptr i8, ptr [[TMP5]], i32 24 +; CHECK-NEXT: [[SCEVGEP131:%.*]] = getelementptr i8, ptr [[TMP5]], i32 20 +; CHECK-NEXT: [[SCEVGEP133:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 +; CHECK-NEXT: [[SCEVGEP135:%.*]] = getelementptr i8, ptr [[TMP5]], i32 12 +; CHECK-NEXT: [[SCEVGEP137:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; CHECK-NEXT: [[SCEVGEP139:%.*]] = getelementptr i8, ptr [[TMP5]], i32 4 +; CHECK-NEXT: br label [[FOR_BODY14_7:%.*]] ; CHECK: for.body4: +; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY4]] ], [ [[TMP0]], [[FOR_BODY4_LR_PH]] ] ; CHECK-NEXT: [[TMP8:%.*]] = phi i32 [ [[TMP2]], [[FOR_BODY4_LR_PH]] ], [ [[SPEC_SELECT]], [[FOR_BODY4]] ] -; CHECK-NEXT: [[K_065:%.*]] = phi i32 [ 0, [[FOR_BODY4_LR_PH]] ], [ [[INC8:%.*]], [[FOR_BODY4]] ] ; CHECK-NEXT: [[INPUT_ADDR_164:%.*]] = phi ptr [ [[INPUT_ADDR_078]], [[FOR_BODY4_LR_PH]] ], [ [[INCDEC_PTR]], [[FOR_BODY4]] ] ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds float, ptr [[INPUT_ADDR_164]], i32 1 ; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[INPUT_ADDR_164]], align 4 @@ -52,51 +77,247 @@ define dso_local noundef i32 @dsps_fird_f32_ansi(ptr nocapture noundef %fir, ptr ; CHECK-NEXT: store float [[TMP9]], ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP6_NOT:%.*]] = icmp slt i32 [[INC]], [[TMP1]] ; CHECK-NEXT: [[SPEC_SELECT]] = select i1 [[CMP6_NOT]], i32 [[INC]], i32 0 -; CHECK-NEXT: [[INC8]] = add nuw nsw i32 [[K_065]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC8]], [[TMP0]] +; CHECK-NEXT: [[LSR_IV_NEXT]] = add i32 [[LSR_IV]], -1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LSR_IV_NEXT]], 0 ; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE]], label [[FOR_BODY4]] -; CHECK: for.cond23.preheader: -; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP14:%.*]], [[FOR_BODY14]] ] -; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_COND_CLEANUP3]] ], [ [[TMP7]], [[FOR_BODY14]] ] -; CHECK-NEXT: [[CMP2572:%.*]] = icmp sgt i32 [[TMP4]], 0 -; CHECK-NEXT: br i1 [[CMP2572]], label [[FOR_BODY27_LR_PH:%.*]], label [[FOR_COND_CLEANUP26]] +; CHECK: for.cond63.preheader: +; CHECK-NEXT: [[ACC_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP18:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC_1_LCSSA2:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP21:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC_2_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP24:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC_3_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP27:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC_4_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP30:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC_5_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP33:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC_6_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP36:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC_7_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP39:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_COND_CLEANUP3]] ], [ [[LSR_IV_NEXT126:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[N_0_LCSSA:%.*]] = phi i32 [ [[TMP4]], [[FOR_COND_CLEANUP3]] ], [ [[LSR_IV_NEXT100:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[CMP2572:%.*]] = icmp slt i32 [[N_0_LCSSA]], [[TMP1]] +; CHECK-NEXT: br i1 [[CMP2572]], label [[FOR_BODY27_LR_PH:%.*]], label [[FOR_COND_CLEANUP26:%.*]] ; CHECK: for.body27.lr.ph: ; CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[FIR]], align 4 ; CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DELAY]], align 4 -; CHECK-NEXT: br label [[FOR_BODY27:%.*]] -; CHECK: for.body14: -; CHECK-NEXT: [[N_069:%.*]] = phi i32 [ [[TMP4]], [[FOR_BODY14_LR_PH]] ], [ [[INC20:%.*]], [[FOR_BODY14]] ] -; CHECK-NEXT: [[COEFF_POS_068:%.*]] = phi i32 [ 0, [[FOR_BODY14_LR_PH]] ], [ [[INC15:%.*]], [[FOR_BODY14]] ] -; CHECK-NEXT: [[ACC_067:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP14]], [[FOR_BODY14]] ] -; CHECK-NEXT: [[INC15]] = add nuw i32 [[COEFF_POS_068]], 1 -; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i32 [[COEFF_POS_068]] -; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX16]], align 4 -; CHECK-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 [[N_069]] -; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX18]], align 4 -; CHECK-NEXT: [[TMP14]] = tail call float @llvm.fmuladd.f32(float [[TMP12]], float [[TMP13]], float [[ACC_067]]) -; CHECK-NEXT: [[INC20]] = add nsw i32 [[N_069]], 1 -; CHECK-NEXT: [[EXITCOND83_NOT:%.*]] = icmp eq i32 [[INC15]], [[TMP7]] -; CHECK-NEXT: br i1 [[EXITCOND83_NOT]], label [[FOR_COND23_PREHEADER]], label [[FOR_BODY14]] +; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[COEFF_POS_0_LCSSA]], [[TMP1]] +; CHECK-NEXT: [[TMP13:%.*]] = sub i32 [[TMP1]], [[N_0_LCSSA]] +; CHECK-NEXT: [[TMP14:%.*]] = shl i32 [[N_0_LCSSA]], 2 +; CHECK-NEXT: [[SCEVGEP144:%.*]] = getelementptr i8, ptr [[TMP11]], i32 [[TMP14]] +; CHECK-NEXT: [[TMP15:%.*]] = shl i32 [[COEFF_POS_0_LCSSA]], 2 +; CHECK-NEXT: [[SCEVGEP147:%.*]] = getelementptr i8, ptr [[TMP10]], i32 [[TMP15]] +; CHECK-NEXT: br label [[FOR_BODY14_CLONE:%.*]] +; CHECK: for.body14.7: +; CHECK-NEXT: [[LSR_IV125:%.*]] = phi i32 [ 0, [[FOR_BODY14_LR_PH]] ], [ [[LSR_IV_NEXT126]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[LSR_IV102:%.*]] = phi i32 [ 0, [[FOR_BODY14_LR_PH]] ], [ [[LSR_IV_NEXT103:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[LSR_IV99:%.*]] = phi i32 [ [[TMP4]], [[FOR_BODY14_LR_PH]] ], [ [[LSR_IV_NEXT100]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP18]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC3:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP21]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC4:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP24]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC5:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP27]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC6:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP30]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC7:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP33]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC8:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP36]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[ACC9:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP39]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[SCEVGEP141:%.*]] = getelementptr i8, ptr [[TMP5]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[SCEVGEP141]], align 4 +; CHECK-NEXT: [[SCEVGEP124:%.*]] = getelementptr i8, ptr [[SCEVGEP123]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[SCEVGEP124]], align 4 +; CHECK-NEXT: [[TMP18]] = tail call float @llvm.fmuladd.f32(float [[TMP16]], float [[TMP17]], float [[ACC]]) +; CHECK-NEXT: [[SCEVGEP140:%.*]] = getelementptr i8, ptr [[SCEVGEP139]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[SCEVGEP140]], align 4 +; CHECK-NEXT: [[SCEVGEP122:%.*]] = getelementptr i8, ptr [[SCEVGEP121]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[SCEVGEP122]], align 4 +; CHECK-NEXT: [[TMP21]] = tail call float @llvm.fmuladd.f32(float [[TMP19]], float [[TMP20]], float [[ACC3]]) +; CHECK-NEXT: [[SCEVGEP138:%.*]] = getelementptr i8, ptr [[SCEVGEP137]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[SCEVGEP138]], align 4 +; CHECK-NEXT: [[SCEVGEP119:%.*]] = getelementptr i8, ptr [[SCEVGEP118]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[SCEVGEP119]], align 4 +; CHECK-NEXT: [[TMP24]] = tail call float @llvm.fmuladd.f32(float [[TMP22]], float [[TMP23]], float [[ACC4]]) +; CHECK-NEXT: [[SCEVGEP136:%.*]] = getelementptr i8, ptr [[SCEVGEP135]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[SCEVGEP136]], align 4 +; CHECK-NEXT: [[SCEVGEP116:%.*]] = getelementptr i8, ptr [[SCEVGEP115]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[SCEVGEP116]], align 4 +; CHECK-NEXT: [[TMP27]] = tail call float @llvm.fmuladd.f32(float [[TMP25]], float [[TMP26]], float [[ACC5]]) +; CHECK-NEXT: [[SCEVGEP134:%.*]] = getelementptr i8, ptr [[SCEVGEP133]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[SCEVGEP134]], align 4 +; CHECK-NEXT: [[SCEVGEP113:%.*]] = getelementptr i8, ptr [[SCEVGEP112]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[SCEVGEP113]], align 4 +; CHECK-NEXT: [[TMP30]] = tail call float @llvm.fmuladd.f32(float [[TMP28]], float [[TMP29]], float [[ACC6]]) +; CHECK-NEXT: [[SCEVGEP132:%.*]] = getelementptr i8, ptr [[SCEVGEP131]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP31:%.*]] = load float, ptr [[SCEVGEP132]], align 4 +; CHECK-NEXT: [[SCEVGEP110:%.*]] = getelementptr i8, ptr [[SCEVGEP109]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP32:%.*]] = load float, ptr [[SCEVGEP110]], align 4 +; CHECK-NEXT: [[TMP33]] = tail call float @llvm.fmuladd.f32(float [[TMP31]], float [[TMP32]], float [[ACC7]]) +; CHECK-NEXT: [[SCEVGEP130:%.*]] = getelementptr i8, ptr [[SCEVGEP129]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP34:%.*]] = load float, ptr [[SCEVGEP130]], align 4 +; CHECK-NEXT: [[SCEVGEP107:%.*]] = getelementptr i8, ptr [[SCEVGEP106]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP35:%.*]] = load float, ptr [[SCEVGEP107]], align 4 +; CHECK-NEXT: [[TMP36]] = tail call float @llvm.fmuladd.f32(float [[TMP34]], float [[TMP35]], float [[ACC8]]) +; CHECK-NEXT: [[SCEVGEP128:%.*]] = getelementptr i8, ptr [[SCEVGEP127]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP37:%.*]] = load float, ptr [[SCEVGEP128]], align 4 +; CHECK-NEXT: [[SCEVGEP104:%.*]] = getelementptr i8, ptr [[SCEVGEP101]], i32 [[LSR_IV102]] +; CHECK-NEXT: [[TMP38:%.*]] = load float, ptr [[SCEVGEP104]], align 4 +; CHECK-NEXT: [[TMP39]] = tail call float @llvm.fmuladd.f32(float [[TMP37]], float [[TMP38]], float [[ACC9]]) +; CHECK-NEXT: [[LSR_IV_NEXT100]] = add nsw i32 [[LSR_IV99]], 8 +; CHECK-NEXT: [[TMP40:%.*]] = add i32 [[LSR_IV_NEXT100]], 8 +; CHECK-NEXT: [[LSR_IV_NEXT103]] = add nuw i32 [[LSR_IV102]], 32 +; CHECK-NEXT: [[LSR_IV_NEXT126]] = add nuw i32 [[LSR_IV125]], 8 +; CHECK-NEXT: [[EXITCOND83_NOT_7:%.*]] = icmp sgt i32 [[TMP40]], [[TMP1]] +; CHECK-NEXT: br i1 [[EXITCOND83_NOT_7]], label [[FOR_COND63_PREHEADER]], label [[FOR_BODY14_7]] +; CHECK: for.body79.lr.ph: +; CHECK-NEXT: [[TMP41:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP42:%.*]] = load ptr, ptr [[DELAY]], align 4 +; CHECK-NEXT: [[TMP43:%.*]] = and i32 [[TMP4]], 2147483640 +; CHECK-NEXT: [[SCEVGEP150:%.*]] = getelementptr i8, ptr [[TMP42]], i32 28 +; CHECK-NEXT: [[SCEVGEP154:%.*]] = getelementptr i8, ptr [[TMP42]], i32 24 +; CHECK-NEXT: [[SCEVGEP156:%.*]] = getelementptr i8, ptr [[TMP42]], i32 20 +; CHECK-NEXT: [[SCEVGEP158:%.*]] = getelementptr i8, ptr [[TMP42]], i32 16 +; CHECK-NEXT: [[SCEVGEP160:%.*]] = getelementptr i8, ptr [[TMP42]], i32 12 +; CHECK-NEXT: [[SCEVGEP162:%.*]] = getelementptr i8, ptr [[TMP42]], i32 8 +; CHECK-NEXT: [[SCEVGEP164:%.*]] = getelementptr i8, ptr [[TMP42]], i32 4 +; CHECK-NEXT: [[SCEVGEP169:%.*]] = getelementptr i8, ptr [[TMP41]], i32 28 +; CHECK-NEXT: [[TMP44:%.*]] = shl i32 [[COEFF_POS_1_LCSSA:%.*]], 2 +; CHECK-NEXT: [[SCEVGEP170:%.*]] = getelementptr i8, ptr [[SCEVGEP169]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP172:%.*]] = getelementptr i8, ptr [[TMP41]], i32 24 +; CHECK-NEXT: [[SCEVGEP173:%.*]] = getelementptr i8, ptr [[SCEVGEP172]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP175:%.*]] = getelementptr i8, ptr [[TMP41]], i32 20 +; CHECK-NEXT: [[SCEVGEP176:%.*]] = getelementptr i8, ptr [[SCEVGEP175]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP178:%.*]] = getelementptr i8, ptr [[TMP41]], i32 16 +; CHECK-NEXT: [[SCEVGEP179:%.*]] = getelementptr i8, ptr [[SCEVGEP178]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP181:%.*]] = getelementptr i8, ptr [[TMP41]], i32 12 +; CHECK-NEXT: [[SCEVGEP182:%.*]] = getelementptr i8, ptr [[SCEVGEP181]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP184:%.*]] = getelementptr i8, ptr [[TMP41]], i32 8 +; CHECK-NEXT: [[SCEVGEP185:%.*]] = getelementptr i8, ptr [[SCEVGEP184]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP187:%.*]] = getelementptr i8, ptr [[TMP41]], i32 4 +; CHECK-NEXT: [[SCEVGEP188:%.*]] = getelementptr i8, ptr [[SCEVGEP187]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP190:%.*]] = getelementptr i8, ptr [[TMP41]], i32 [[TMP44]] +; CHECK-NEXT: br label [[FOR_BODY27_7:%.*]] +; CHECK: for.body14.clone: +; CHECK-NEXT: [[LSR_IV148:%.*]] = phi ptr [ [[SCEVGEP149:%.*]], [[FOR_BODY14_CLONE]] ], [ [[SCEVGEP147]], [[FOR_BODY27_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV145:%.*]] = phi ptr [ [[SCEVGEP146:%.*]], [[FOR_BODY14_CLONE]] ], [ [[SCEVGEP144]], [[FOR_BODY27_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV142:%.*]] = phi i32 [ [[LSR_IV_NEXT143:%.*]], [[FOR_BODY14_CLONE]] ], [ [[TMP13]], [[FOR_BODY27_LR_PH]] ] +; CHECK-NEXT: [[ACC_067_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_BODY27_LR_PH]] ], [ [[TMP47:%.*]], [[FOR_BODY14_CLONE]] ] +; CHECK-NEXT: [[TMP45:%.*]] = load float, ptr [[LSR_IV148]], align 4 +; CHECK-NEXT: [[TMP46:%.*]] = load float, ptr [[LSR_IV145]], align 4 +; CHECK-NEXT: [[TMP47]] = tail call float @llvm.fmuladd.f32(float [[TMP45]], float [[TMP46]], float [[ACC_067_CLONE]]) +; CHECK-NEXT: [[LSR_IV_NEXT143]] = add i32 [[LSR_IV142]], -1 +; CHECK-NEXT: [[SCEVGEP146]] = getelementptr i8, ptr [[LSR_IV145]], i32 4 +; CHECK-NEXT: [[SCEVGEP149]] = getelementptr i8, ptr [[LSR_IV148]], i32 4 +; CHECK-NEXT: [[EXITCOND83_NOT_CLONE:%.*]] = icmp eq i32 [[LSR_IV_NEXT143]], 0 +; CHECK-NEXT: br i1 [[EXITCOND83_NOT_CLONE]], label [[FOR_COND_CLEANUP26_LOOPEXIT:%.*]], label [[FOR_BODY14_CLONE]] +; CHECK: for.cond130.preheader: +; CHECK-NEXT: [[ACC_0_LCSSA_CLONE:%.*]] = phi float [ [[ACC_1_LCSSA:%.*]], [[FOR_COND_CLEANUP26]] ], [ [[TMP51:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC_1_LCSSA2_CLONE:%.*]] = phi float [ [[ACC_1_LCSSA2]], [[FOR_COND_CLEANUP26]] ], [ [[TMP54:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC_2_LCSSA_CLONE:%.*]] = phi float [ [[ACC_2_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[TMP57:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC_3_LCSSA_CLONE:%.*]] = phi float [ [[ACC_3_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[TMP60:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC_4_LCSSA_CLONE:%.*]] = phi float [ [[ACC_4_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[TMP63:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC_5_LCSSA_CLONE:%.*]] = phi float [ [[ACC_5_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[TMP66:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC_6_LCSSA_CLONE:%.*]] = phi float [ [[ACC_6_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[TMP69:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC_7_LCSSA_CLONE:%.*]] = phi float [ [[ACC_7_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[TMP72:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA_CLONE:%.*]] = phi i32 [ [[COEFF_POS_1_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[LSR_IV_NEXT168:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[N_0_LCSSA_CLONE:%.*]] = phi i32 [ 0, [[FOR_COND_CLEANUP26]] ], [ [[TMP43]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[CMP2572_CLONE:%.*]] = icmp slt i32 [[N_0_LCSSA_CLONE]], [[TMP4]] +; CHECK-NEXT: br i1 [[CMP2572_CLONE]], label [[FOR_BODY133_LR_PH:%.*]], label [[FOR_END141]] +; CHECK: for.cond.cleanup26.loopexit: +; CHECK-NEXT: [[DOTLCSSA207:%.*]] = phi float [ [[TMP47]], [[FOR_BODY14_CLONE]] ] +; CHECK-NEXT: [[N_0_LCSSA_NEG:%.*]] = sub i32 0, [[N_0_LCSSA]] +; CHECK-NEXT: [[TMP48:%.*]] = add i32 [[TMP12]], [[N_0_LCSSA_NEG]] +; CHECK-NEXT: br label [[FOR_COND_CLEANUP26]] ; CHECK: for.cond.cleanup26: -; CHECK-NEXT: [[ACC_1_LCSSA:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND23_PREHEADER]] ], [ [[TMP17:%.*]], [[FOR_BODY27]] ] -; CHECK-NEXT: [[INC39]] = add nuw nsw i32 [[I_080]], 1 +; CHECK-NEXT: [[COEFF_POS_1_LCSSA]] = phi i32 [ [[COEFF_POS_0_LCSSA]], [[FOR_COND63_PREHEADER]] ], [ [[TMP48]], [[FOR_COND_CLEANUP26_LOOPEXIT]] ] +; CHECK-NEXT: [[ACC_1_LCSSA]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND63_PREHEADER]] ], [ [[DOTLCSSA207]], [[FOR_COND_CLEANUP26_LOOPEXIT]] ] +; CHECK-NEXT: [[EXITCOND85_NOT:%.*]] = icmp slt i32 [[TMP4]], 8 +; CHECK-NEXT: br i1 [[EXITCOND85_NOT]], label [[FOR_COND130_PREHEADER:%.*]], label [[FOR_BODY79_LR_PH:%.*]] +; CHECK: for.body27.7: +; CHECK-NEXT: [[LSR_IV167:%.*]] = phi i32 [ [[COEFF_POS_1_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[LSR_IV_NEXT168]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[LSR_IV151:%.*]] = phi i32 [ 0, [[FOR_BODY79_LR_PH]] ], [ [[LSR_IV_NEXT152:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ADD76310:%.*]] = phi i32 [ 8, [[FOR_BODY79_LR_PH]] ], [ [[ADD76:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC38:%.*]] = phi float [ [[ACC_1_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP51]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC39:%.*]] = phi float [ [[ACC_1_LCSSA2]], [[FOR_BODY79_LR_PH]] ], [ [[TMP54]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC40:%.*]] = phi float [ [[ACC_2_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP57]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC41:%.*]] = phi float [ [[ACC_3_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP60]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC42:%.*]] = phi float [ [[ACC_4_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP63]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC43:%.*]] = phi float [ [[ACC_5_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP66]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC44:%.*]] = phi float [ [[ACC_6_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP69]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC45:%.*]] = phi float [ [[ACC_7_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP72]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[SCEVGEP191:%.*]] = getelementptr i8, ptr [[SCEVGEP190]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[SCEVGEP191]], align 4 +; CHECK-NEXT: [[SCEVGEP166:%.*]] = getelementptr i8, ptr [[TMP42]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP50:%.*]] = load float, ptr [[SCEVGEP166]], align 4 +; CHECK-NEXT: [[TMP51]] = tail call float @llvm.fmuladd.f32(float [[TMP49]], float [[TMP50]], float [[ACC38]]) +; CHECK-NEXT: [[SCEVGEP189:%.*]] = getelementptr i8, ptr [[SCEVGEP188]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP52:%.*]] = load float, ptr [[SCEVGEP189]], align 4 +; CHECK-NEXT: [[SCEVGEP165:%.*]] = getelementptr i8, ptr [[SCEVGEP164]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP53:%.*]] = load float, ptr [[SCEVGEP165]], align 4 +; CHECK-NEXT: [[TMP54]] = tail call float @llvm.fmuladd.f32(float [[TMP52]], float [[TMP53]], float [[ACC39]]) +; CHECK-NEXT: [[SCEVGEP186:%.*]] = getelementptr i8, ptr [[SCEVGEP185]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP55:%.*]] = load float, ptr [[SCEVGEP186]], align 4 +; CHECK-NEXT: [[SCEVGEP163:%.*]] = getelementptr i8, ptr [[SCEVGEP162]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP56:%.*]] = load float, ptr [[SCEVGEP163]], align 4 +; CHECK-NEXT: [[TMP57]] = tail call float @llvm.fmuladd.f32(float [[TMP55]], float [[TMP56]], float [[ACC40]]) +; CHECK-NEXT: [[SCEVGEP183:%.*]] = getelementptr i8, ptr [[SCEVGEP182]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP58:%.*]] = load float, ptr [[SCEVGEP183]], align 4 +; CHECK-NEXT: [[SCEVGEP161:%.*]] = getelementptr i8, ptr [[SCEVGEP160]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP59:%.*]] = load float, ptr [[SCEVGEP161]], align 4 +; CHECK-NEXT: [[TMP60]] = tail call float @llvm.fmuladd.f32(float [[TMP58]], float [[TMP59]], float [[ACC41]]) +; CHECK-NEXT: [[SCEVGEP180:%.*]] = getelementptr i8, ptr [[SCEVGEP179]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP61:%.*]] = load float, ptr [[SCEVGEP180]], align 4 +; CHECK-NEXT: [[SCEVGEP159:%.*]] = getelementptr i8, ptr [[SCEVGEP158]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP62:%.*]] = load float, ptr [[SCEVGEP159]], align 4 +; CHECK-NEXT: [[TMP63]] = tail call float @llvm.fmuladd.f32(float [[TMP61]], float [[TMP62]], float [[ACC42]]) +; CHECK-NEXT: [[SCEVGEP177:%.*]] = getelementptr i8, ptr [[SCEVGEP176]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP64:%.*]] = load float, ptr [[SCEVGEP177]], align 4 +; CHECK-NEXT: [[SCEVGEP157:%.*]] = getelementptr i8, ptr [[SCEVGEP156]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP65:%.*]] = load float, ptr [[SCEVGEP157]], align 4 +; CHECK-NEXT: [[TMP66]] = tail call float @llvm.fmuladd.f32(float [[TMP64]], float [[TMP65]], float [[ACC43]]) +; CHECK-NEXT: [[SCEVGEP174:%.*]] = getelementptr i8, ptr [[SCEVGEP173]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP67:%.*]] = load float, ptr [[SCEVGEP174]], align 4 +; CHECK-NEXT: [[SCEVGEP155:%.*]] = getelementptr i8, ptr [[SCEVGEP154]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP68:%.*]] = load float, ptr [[SCEVGEP155]], align 4 +; CHECK-NEXT: [[TMP69]] = tail call float @llvm.fmuladd.f32(float [[TMP67]], float [[TMP68]], float [[ACC44]]) +; CHECK-NEXT: [[SCEVGEP171:%.*]] = getelementptr i8, ptr [[SCEVGEP170]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP70:%.*]] = load float, ptr [[SCEVGEP171]], align 4 +; CHECK-NEXT: [[SCEVGEP153:%.*]] = getelementptr i8, ptr [[SCEVGEP150]], i32 [[LSR_IV151]] +; CHECK-NEXT: [[TMP71:%.*]] = load float, ptr [[SCEVGEP153]], align 4 +; CHECK-NEXT: [[TMP72]] = tail call float @llvm.fmuladd.f32(float [[TMP70]], float [[TMP71]], float [[ACC45]]) +; CHECK-NEXT: [[ADD76]] = add nuw nsw i32 [[ADD76310]], 8 +; CHECK-NEXT: [[LSR_IV_NEXT152]] = add nuw i32 [[LSR_IV151]], 32 +; CHECK-NEXT: [[LSR_IV_NEXT168]] = add i32 [[LSR_IV167]], 8 +; CHECK-NEXT: [[EXITCOND84_NOT_7:%.*]] = icmp sgt i32 [[ADD76]], [[TMP4]] +; CHECK-NEXT: br i1 [[EXITCOND84_NOT_7]], label [[FOR_COND130_PREHEADER]], label [[FOR_BODY27_7]] +; CHECK: for.body133.lr.ph: +; CHECK-NEXT: [[TMP73:%.*]] = load ptr, ptr [[FIR]], align 4 +; CHECK-NEXT: [[TMP74:%.*]] = load ptr, ptr [[DELAY]], align 4 +; CHECK-NEXT: [[TMP75:%.*]] = sub i32 [[TMP4]], [[N_0_LCSSA_CLONE]] +; CHECK-NEXT: [[TMP76:%.*]] = shl i32 [[N_0_LCSSA_CLONE]], 2 +; CHECK-NEXT: [[SCEVGEP194:%.*]] = getelementptr i8, ptr [[TMP74]], i32 [[TMP76]] +; CHECK-NEXT: [[TMP77:%.*]] = shl i32 [[COEFF_POS_0_LCSSA_CLONE]], 2 +; CHECK-NEXT: [[SCEVGEP197:%.*]] = getelementptr i8, ptr [[TMP73]], i32 [[TMP77]] +; CHECK-NEXT: br label [[FOR_BODY27_CLONE:%.*]] +; CHECK: for.body27.clone: +; CHECK-NEXT: [[LSR_IV198:%.*]] = phi ptr [ [[SCEVGEP199:%.*]], [[FOR_BODY27_CLONE]] ], [ [[SCEVGEP197]], [[FOR_BODY133_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV195:%.*]] = phi ptr [ [[SCEVGEP196:%.*]], [[FOR_BODY27_CLONE]] ], [ [[SCEVGEP194]], [[FOR_BODY133_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV192:%.*]] = phi i32 [ [[LSR_IV_NEXT193:%.*]], [[FOR_BODY27_CLONE]] ], [ [[TMP75]], [[FOR_BODY133_LR_PH]] ] +; CHECK-NEXT: [[ACC_173_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_BODY133_LR_PH]] ], [ [[TMP80:%.*]], [[FOR_BODY27_CLONE]] ] +; CHECK-NEXT: [[TMP78:%.*]] = load float, ptr [[LSR_IV198]], align 4 +; CHECK-NEXT: [[TMP79:%.*]] = load float, ptr [[LSR_IV195]], align 4 +; CHECK-NEXT: [[TMP80]] = tail call float @llvm.fmuladd.f32(float [[TMP78]], float [[TMP79]], float [[ACC_173_CLONE]]) +; CHECK-NEXT: [[LSR_IV_NEXT193]] = add i32 [[LSR_IV192]], -1 +; CHECK-NEXT: [[SCEVGEP196]] = getelementptr i8, ptr [[LSR_IV195]], i32 4 +; CHECK-NEXT: [[SCEVGEP199]] = getelementptr i8, ptr [[LSR_IV198]], i32 4 +; CHECK-NEXT: [[EXITCOND84_NOT_CLONE:%.*]] = icmp eq i32 [[LSR_IV_NEXT193]], 0 +; CHECK-NEXT: br i1 [[EXITCOND84_NOT_CLONE]], label [[FOR_END141]], label [[FOR_BODY27_CLONE]] +; CHECK: for.end141: +; CHECK-NEXT: [[ACC0_3_LCSSA:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_COND130_PREHEADER]] ], [ [[TMP80]], [[FOR_BODY27_CLONE]] ] +; CHECK-NEXT: [[ADD60:%.*]] = fadd float [[ACC_1_LCSSA2_CLONE]], [[ACC0_3_LCSSA]] +; CHECK-NEXT: [[ADD6179:%.*]] = fadd float [[ACC_2_LCSSA_CLONE]], [[ACC_3_LCSSA_CLONE]] +; CHECK-NEXT: [[ADD62:%.*]] = fadd float [[ACC_4_LCSSA_CLONE]], [[ACC_5_LCSSA_CLONE]] +; CHECK-NEXT: [[ADD6380:%.*]] = fadd float [[ACC_6_LCSSA_CLONE]], [[ACC_7_LCSSA_CLONE]] +; CHECK-NEXT: [[ADD64:%.*]] = fadd float [[ADD6179]], [[ADD60]] +; CHECK-NEXT: [[ADD6581:%.*]] = fadd float [[ADD62]], [[ADD6380]] +; CHECK-NEXT: [[ADD66:%.*]] = fadd float [[ADD6581]], [[ADD64]] ; CHECK-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_080]] -; CHECK-NEXT: store float [[ACC_1_LCSSA]], ptr [[ARRAYIDX37]], align 4 -; CHECK-NEXT: [[EXITCOND85_NOT:%.*]] = icmp eq i32 [[INC39]], [[LEN]] -; CHECK-NEXT: br i1 [[EXITCOND85_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER]] -; CHECK: for.body27: -; CHECK-NEXT: [[N22_075:%.*]] = phi i32 [ 0, [[FOR_BODY27_LR_PH]] ], [ [[INC34:%.*]], [[FOR_BODY27]] ] -; CHECK-NEXT: [[COEFF_POS_174:%.*]] = phi i32 [ [[COEFF_POS_0_LCSSA]], [[FOR_BODY27_LR_PH]] ], [ [[INC29:%.*]], [[FOR_BODY27]] ] -; CHECK-NEXT: [[ACC_173:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_BODY27_LR_PH]] ], [ [[TMP17]], [[FOR_BODY27]] ] -; CHECK-NEXT: [[INC29]] = add nuw nsw i32 [[COEFF_POS_174]], 1 -; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 [[COEFF_POS_174]] -; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX30]], align 4 -; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i32 [[N22_075]] -; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX32]], align 4 -; CHECK-NEXT: [[TMP17]] = tail call float @llvm.fmuladd.f32(float [[TMP15]], float [[TMP16]], float [[ACC_173]]) -; CHECK-NEXT: [[INC34]] = add nuw nsw i32 [[N22_075]], 1 -; CHECK-NEXT: [[EXITCOND84_NOT:%.*]] = icmp eq i32 [[INC34]], [[TMP4]] -; CHECK-NEXT: br i1 [[EXITCOND84_NOT]], label [[FOR_COND_CLEANUP26]], label [[FOR_BODY27]] +; CHECK-NEXT: store float [[ADD66]], ptr [[ARRAYIDX37]], align 4 +; CHECK-NEXT: [[INC152]] = add nuw nsw i32 [[I_080]], 1 +; CHECK-NEXT: [[EXITCOND350_NOT:%.*]] = icmp eq i32 [[INC152]], [[LEN]] +; CHECK-NEXT: br i1 [[EXITCOND350_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER]] ; entry: %cmp77 = icmp sgt i32 %len, 0 diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll index a4fb7808a4f8e..aa9f66e46f4e8 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll @@ -1,23 +1,79 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local float @test_loop(ptr nocapture noundef readonly %data1, ptr nocapture noundef readonly %data2) local_unnamed_addr { ; CHECK-LABEL: define dso_local float @test_loop( -; CHECK-SAME: ptr nocapture noundef readonly [[DATA1:%.*]], ptr nocapture noundef readonly [[DATA2:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias nocapture noundef readonly [[DATA1:%.*]], ptr noalias nocapture noundef readonly [[DATA2:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.cond.cleanup: -; CHECK-NEXT: ret float [[TMP2:%.*]] +; CHECK: for.end: +; CHECK-NEXT: [[ADD37:%.*]] = fadd float [[TMP16:%.*]], [[TMP17:%.*]] +; CHECK-NEXT: [[ADD38:%.*]] = fadd float [[TMP18:%.*]], [[TMP19:%.*]] +; CHECK-NEXT: [[ADD39:%.*]] = fadd float [[TMP20:%.*]], [[TMP21:%.*]] +; CHECK-NEXT: [[ADD40:%.*]] = fadd float [[TMP22:%.*]], [[TMP23:%.*]] +; CHECK-NEXT: [[ADD41:%.*]] = fadd float [[ADD37]], [[ADD38]] +; CHECK-NEXT: [[ADD42:%.*]] = fadd float [[ADD39]], [[ADD40]] +; CHECK-NEXT: [[ADD43:%.*]] = fadd float [[ADD41]], [[ADD42]] +; CHECK-NEXT: ret float [[ADD43]] ; CHECK: for.body: -; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[RESULT_06:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP2]], [[FOR_BODY]] ] +; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC_7:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[DOTPHI:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP16]], [[FOR_BODY]] ] +; CHECK-NEXT: [[DOTPHI1:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP17]], [[FOR_BODY]] ] +; CHECK-NEXT: [[DOTPHI2:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP18]], [[FOR_BODY]] ] +; CHECK-NEXT: [[DOTPHI3:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP19]], [[FOR_BODY]] ] +; CHECK-NEXT: [[DOTPHI4:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP20]], [[FOR_BODY]] ] +; CHECK-NEXT: [[DOTPHI5:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP21]], [[FOR_BODY]] ] +; CHECK-NEXT: [[DOTPHI6:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP22]], [[FOR_BODY]] ] +; CHECK-NEXT: [[DOTPHI7:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP23]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INC_7]] = add nuw nsw i32 [[I_07]], 8 +; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[I_07]], 1 +; CHECK-NEXT: [[ADD9:%.*]] = or disjoint i32 [[I_07]], 2 +; CHECK-NEXT: [[ADD11:%.*]] = or disjoint i32 [[I_07]], 3 +; CHECK-NEXT: [[ADD13:%.*]] = or disjoint i32 [[I_07]], 4 +; CHECK-NEXT: [[ADD15:%.*]] = or disjoint i32 [[I_07]], 5 +; CHECK-NEXT: [[ADD17:%.*]] = or disjoint i32 [[I_07]], 6 +; CHECK-NEXT: [[ADD19:%.*]] = or disjoint i32 [[I_07]], 7 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[DATA1]], i32 [[I_07]] -; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[DATA2]], i32 [[I_07]] +; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[DATA1]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX1_1:%.*]] = getelementptr inbounds float, ptr [[DATA2]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[DATA1]], i32 [[ADD9]] +; CHECK-NEXT: [[ARRAYIDX1_2:%.*]] = getelementptr inbounds float, ptr [[DATA2]], i32 [[ADD9]] +; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[DATA1]], i32 [[ADD11]] +; CHECK-NEXT: [[ARRAYIDX1_3:%.*]] = getelementptr inbounds float, ptr [[DATA2]], i32 [[ADD11]] +; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, ptr [[DATA1]], i32 [[ADD13]] +; CHECK-NEXT: [[ARRAYIDX1_4:%.*]] = getelementptr inbounds float, ptr [[DATA2]], i32 [[ADD13]] +; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, ptr [[DATA1]], i32 [[ADD15]] +; CHECK-NEXT: [[ARRAYIDX1_5:%.*]] = getelementptr inbounds float, ptr [[DATA2]], i32 [[ADD15]] +; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, ptr [[DATA1]], i32 [[ADD17]] +; CHECK-NEXT: [[ARRAYIDX1_6:%.*]] = getelementptr inbounds float, ptr [[DATA2]], i32 [[ADD17]] +; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, ptr [[DATA1]], i32 [[ADD19]] +; CHECK-NEXT: [[ARRAYIDX1_7:%.*]] = getelementptr inbounds float, ptr [[DATA2]], i32 [[ADD19]] +; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 -; CHECK-NEXT: [[TMP2]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float [[RESULT_06]]) -; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX1_1]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX1_2]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX1_3]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX_4]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1_4]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX_5]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX1_5]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX_6]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX1_6]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX_7]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX1_7]], align 4 +; CHECK-NEXT: [[TMP16]] = tail call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float [[DOTPHI]]) +; CHECK-NEXT: [[TMP17]] = tail call float @llvm.fmuladd.f32(float [[TMP2]], float [[TMP3]], float [[DOTPHI1]]) +; CHECK-NEXT: [[TMP18]] = tail call float @llvm.fmuladd.f32(float [[TMP4]], float [[TMP5]], float [[DOTPHI2]]) +; CHECK-NEXT: [[TMP19]] = tail call float @llvm.fmuladd.f32(float [[TMP6]], float [[TMP7]], float [[DOTPHI3]]) +; CHECK-NEXT: [[TMP20]] = tail call float @llvm.fmuladd.f32(float [[TMP8]], float [[TMP9]], float [[DOTPHI4]]) +; CHECK-NEXT: [[TMP21]] = tail call float @llvm.fmuladd.f32(float [[TMP10]], float [[TMP11]], float [[DOTPHI5]]) +; CHECK-NEXT: [[TMP22]] = tail call float @llvm.fmuladd.f32(float [[TMP12]], float [[TMP13]], float [[DOTPHI6]]) +; CHECK-NEXT: [[TMP23]] = tail call float @llvm.fmuladd.f32(float [[TMP14]], float [[TMP15]], float [[DOTPHI7]]) +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC_7]], 1009 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] ; entry: br label %for.body diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mul.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mul.ll index bcf9852fd491e..1a6c4fda2b512 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mul.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mul.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_mul_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_mul_f32_ansi( -; CHECK-SAME: ptr noundef readonly [[INPUT1:%.*]], ptr noundef readonly [[INPUT2:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias noundef readonly [[INPUT1:%.*]], ptr noalias noundef readonly [[INPUT2:%.*]], ptr noalias noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT1]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[INPUT2]], null @@ -12,19 +12,159 @@ define dso_local noundef i32 @dsps_mul_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: br i1 [[OR_COND20]], label [[RETURN:%.*]], label [[IF_END:%.*]] ; CHECK: if.end: ; CHECK-NEXT: [[CMP41:%.*]] = icmp sgt i32 [[LEN]], 2 -; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_COND_PREHEADER_NEW:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP721:%.*]] = icmp sgt i32 [[LEN]], 0 ; CHECK-NEXT: br i1 [[CMP721]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.cond.preheader.new: +; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[LEN]], -16 +; CHECK-NEXT: [[CMP6_NOT207:%.*]] = icmp ult i32 [[LEN]], 16 +; CHECK-NEXT: br i1 [[CMP6_NOT207]], label [[FOR_COND_PREHEADER_NEW2:%.*]], label [[FOR_BODY_MODIFY:%.*]] +; CHECK: for.cond.preheader.new2: +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[TMP1:%.*]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[CMP85209:%.*]] = icmp slt i32 [[TMP0]], [[LEN]] +; CHECK-NEXT: br i1 [[CMP85209]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body.modify: +; CHECK-NEXT: [[I_022_MODIFY:%.*]] = phi i32 [ [[TMP1]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[TMP1]] = add nuw i32 [[I_022_MODIFY]], 16 +; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[I_022_MODIFY]], 1 +; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[I_022_MODIFY]], 2 +; CHECK-NEXT: [[ADD8:%.*]] = or disjoint i32 [[I_022_MODIFY]], 3 +; CHECK-NEXT: [[ADD14:%.*]] = or disjoint i32 [[I_022_MODIFY]], 4 +; CHECK-NEXT: [[ADD18:%.*]] = or disjoint i32 [[I_022_MODIFY]], 5 +; CHECK-NEXT: [[ADD22:%.*]] = or disjoint i32 [[I_022_MODIFY]], 6 +; CHECK-NEXT: [[ADD26:%.*]] = or disjoint i32 [[I_022_MODIFY]], 7 +; CHECK-NEXT: [[ADD30:%.*]] = or disjoint i32 [[I_022_MODIFY]], 8 +; CHECK-NEXT: [[ADD34:%.*]] = or disjoint i32 [[I_022_MODIFY]], 9 +; CHECK-NEXT: [[ADD38:%.*]] = or disjoint i32 [[I_022_MODIFY]], 10 +; CHECK-NEXT: [[ADD42:%.*]] = or disjoint i32 [[I_022_MODIFY]], 11 +; CHECK-NEXT: [[ADD46:%.*]] = or disjoint i32 [[I_022_MODIFY]], 12 +; CHECK-NEXT: [[ADD50:%.*]] = or disjoint i32 [[I_022_MODIFY]], 13 +; CHECK-NEXT: [[ADD54:%.*]] = or disjoint i32 [[I_022_MODIFY]], 14 +; CHECK-NEXT: [[ADD58:%.*]] = or disjoint i32 [[I_022_MODIFY]], 15 +; CHECK-NEXT: [[ARRAYIDX_MODIFY:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[I_022_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX9_MODIFY:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[I_022_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX12_MODIFY:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_022_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD4]] +; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD4]] +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD4]] +; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD8]] +; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD8]] +; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD8]] +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD14]] +; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD14]] +; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD14]] +; CHECK-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD22]] +; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD22]] +; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD22]] +; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD26]] +; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD26]] +; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD26]] +; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD34]] +; CHECK-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD34]] +; CHECK-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD34]] +; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX40:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX43:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD42]] +; CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD42]] +; CHECK-NEXT: [[ARRAYIDX45:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD42]] +; CHECK-NEXT: [[ARRAYIDX47:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD46]] +; CHECK-NEXT: [[ARRAYIDX48:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD46]] +; CHECK-NEXT: [[ARRAYIDX49:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD46]] +; CHECK-NEXT: [[ARRAYIDX51:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD50]] +; CHECK-NEXT: [[ARRAYIDX52:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD50]] +; CHECK-NEXT: [[ARRAYIDX53:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD50]] +; CHECK-NEXT: [[ARRAYIDX55:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD54]] +; CHECK-NEXT: [[ARRAYIDX56:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD54]] +; CHECK-NEXT: [[ARRAYIDX57:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD54]] +; CHECK-NEXT: [[ARRAYIDX59:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD58]] +; CHECK-NEXT: [[ARRAYIDX60:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD58]] +; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD58]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_MODIFY]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_MODIFY]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX11]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX16]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX19]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX20]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX23]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX24]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX28]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX31]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX32]], align 4 +; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX35]], align 4 +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX36]], align 4 +; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX39]], align 4 +; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX40]], align 4 +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX43]], align 4 +; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX44]], align 4 +; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX47]], align 4 +; CHECK-NEXT: [[TMP27:%.*]] = load float, ptr [[ARRAYIDX48]], align 4 +; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[ARRAYIDX51]], align 4 +; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX52]], align 4 +; CHECK-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX55]], align 4 +; CHECK-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX56]], align 4 +; CHECK-NEXT: [[TMP32:%.*]] = load float, ptr [[ARRAYIDX59]], align 4 +; CHECK-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX60]], align 4 +; CHECK-NEXT: [[MUL10_MODIFY:%.*]] = fmul float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[TMP34:%.*]] = fmul float [[TMP4]], [[TMP5]] +; CHECK-NEXT: [[TMP35:%.*]] = fmul float [[TMP6]], [[TMP7]] +; CHECK-NEXT: [[TMP36:%.*]] = fmul float [[TMP8]], [[TMP9]] +; CHECK-NEXT: [[TMP37:%.*]] = fmul float [[TMP10]], [[TMP11]] +; CHECK-NEXT: [[TMP38:%.*]] = fmul float [[TMP12]], [[TMP13]] +; CHECK-NEXT: [[TMP39:%.*]] = fmul float [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP40:%.*]] = fmul float [[TMP16]], [[TMP17]] +; CHECK-NEXT: [[TMP41:%.*]] = fmul float [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[TMP42:%.*]] = fmul float [[TMP20]], [[TMP21]] +; CHECK-NEXT: [[TMP43:%.*]] = fmul float [[TMP22]], [[TMP23]] +; CHECK-NEXT: [[TMP44:%.*]] = fmul float [[TMP24]], [[TMP25]] +; CHECK-NEXT: [[TMP45:%.*]] = fmul float [[TMP26]], [[TMP27]] +; CHECK-NEXT: [[TMP46:%.*]] = fmul float [[TMP28]], [[TMP29]] +; CHECK-NEXT: [[TMP47:%.*]] = fmul float [[TMP30]], [[TMP31]] +; CHECK-NEXT: [[TMP48:%.*]] = fmul float [[TMP32]], [[TMP33]] +; CHECK-NEXT: store float [[MUL10_MODIFY]], ptr [[ARRAYIDX12_MODIFY]], align 4 +; CHECK-NEXT: store float [[TMP34]], ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: store float [[TMP35]], ptr [[ARRAYIDX7]], align 4 +; CHECK-NEXT: store float [[TMP36]], ptr [[ARRAYIDX13]], align 4 +; CHECK-NEXT: store float [[TMP37]], ptr [[ARRAYIDX17]], align 4 +; CHECK-NEXT: store float [[TMP38]], ptr [[ARRAYIDX21]], align 4 +; CHECK-NEXT: store float [[TMP39]], ptr [[ARRAYIDX25]], align 4 +; CHECK-NEXT: store float [[TMP40]], ptr [[ARRAYIDX29]], align 4 +; CHECK-NEXT: store float [[TMP41]], ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: store float [[TMP42]], ptr [[ARRAYIDX37]], align 4 +; CHECK-NEXT: store float [[TMP43]], ptr [[ARRAYIDX41]], align 4 +; CHECK-NEXT: store float [[TMP44]], ptr [[ARRAYIDX45]], align 4 +; CHECK-NEXT: store float [[TMP45]], ptr [[ARRAYIDX49]], align 4 +; CHECK-NEXT: store float [[TMP46]], ptr [[ARRAYIDX53]], align 4 +; CHECK-NEXT: store float [[TMP47]], ptr [[ARRAYIDX57]], align 4 +; CHECK-NEXT: store float [[TMP48]], ptr [[ARRAYIDX61]], align 4 +; CHECK-NEXT: [[EXITCOND_NOT_MODIFY:%.*]] = icmp sgt i32 [[TMP1]], [[SUB]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_MODIFY]], label [[FOR_COND_PREHEADER_NEW2]], label [[FOR_BODY_MODIFY]] ; CHECK: for.body: -; CHECK-NEXT: [[I_022:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[I_022:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[TMP0]], [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_022]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] -; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[MUL8:%.*]] = mul nsw i32 [[I_022]], [[STEP2]] ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 -; CHECK-NEXT: [[MUL10:%.*]] = fmul float [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP50:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[MUL10:%.*]] = fmul float [[TMP49]], [[TMP50]] ; CHECK-NEXT: [[MUL11:%.*]] = mul nsw i32 [[I_022]], [[STEP_OUT]] ; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL11]] ; CHECK-NEXT: store float [[MUL10]], ptr [[ARRAYIDX12]], align 4 @@ -35,11 +175,11 @@ define dso_local noundef i32 @dsps_mul_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[I_022_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] ; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_022_CLONE]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL_CLONE]] -; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[TMP51:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 ; CHECK-NEXT: [[MUL8_CLONE:%.*]] = mul nsw i32 [[I_022_CLONE]], [[STEP2]] ; CHECK-NEXT: [[ARRAYIDX9_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8_CLONE]] -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 -; CHECK-NEXT: [[MUL10_CLONE:%.*]] = fmul float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[TMP52:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 +; CHECK-NEXT: [[MUL10_CLONE:%.*]] = fmul float [[TMP51]], [[TMP52]] ; CHECK-NEXT: [[MUL11_CLONE:%.*]] = mul nsw i32 [[I_022_CLONE]], [[STEP_OUT]] ; CHECK-NEXT: [[ARRAYIDX12_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL11_CLONE]] ; CHECK-NEXT: store float [[MUL10_CLONE]], ptr [[ARRAYIDX12_CLONE]], align 4 @@ -47,7 +187,7 @@ define dso_local noundef i32 @dsps_mul_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mulc.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mulc.ll index 2c81f5bfd4b6f..bf4e757def137 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mulc.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mulc.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_mulc_f32_ansi(ptr noalias noundef readonly %input, ptr noalias noundef writeonly %output, i32 noundef %len, float noundef %C, i32 noundef %step_in, i32 noundef %step_out) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_mulc_f32_ansi( ; CHECK-SAME: ptr noalias noundef readonly [[INPUT:%.*]], ptr noalias noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], float noundef [[C:%.*]], i32 noundef [[STEP_IN:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { @@ -10,16 +10,124 @@ define dso_local noundef i32 @dsps_mulc_f32_ansi(ptr noalias noundef readonly %i ; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[IF_END:%.*]] ; CHECK: if.end: ; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[LEN]], 2 -; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_COND_PREHEADER_NEW:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP413:%.*]] = icmp sgt i32 [[LEN]], 0 ; CHECK-NEXT: br i1 [[CMP413]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.cond.preheader.new: +; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[LEN]], -16 +; CHECK-NEXT: [[CMP6_NOT207:%.*]] = icmp ult i32 [[LEN]], 16 +; CHECK-NEXT: br i1 [[CMP6_NOT207]], label [[FOR_COND_PREHEADER_NEW2:%.*]], label [[FOR_BODY_MODIFY:%.*]] +; CHECK: for.cond.preheader.new2: +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[TMP1:%.*]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[CMP85209:%.*]] = icmp slt i32 [[TMP0]], [[LEN]] +; CHECK-NEXT: br i1 [[CMP85209]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body.modify: +; CHECK-NEXT: [[I_014_MODIFY:%.*]] = phi i32 [ [[TMP1]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[TMP1]] = add nuw i32 [[I_014_MODIFY]], 16 +; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[I_014_MODIFY]], 1 +; CHECK-NEXT: [[ADD3:%.*]] = or disjoint i32 [[I_014_MODIFY]], 2 +; CHECK-NEXT: [[ADD6:%.*]] = or disjoint i32 [[I_014_MODIFY]], 3 +; CHECK-NEXT: [[ADD10:%.*]] = or disjoint i32 [[I_014_MODIFY]], 4 +; CHECK-NEXT: [[ADD13:%.*]] = or disjoint i32 [[I_014_MODIFY]], 5 +; CHECK-NEXT: [[ADD16:%.*]] = or disjoint i32 [[I_014_MODIFY]], 6 +; CHECK-NEXT: [[ADD19:%.*]] = or disjoint i32 [[I_014_MODIFY]], 7 +; CHECK-NEXT: [[ADD22:%.*]] = or disjoint i32 [[I_014_MODIFY]], 8 +; CHECK-NEXT: [[ADD25:%.*]] = or disjoint i32 [[I_014_MODIFY]], 9 +; CHECK-NEXT: [[ADD28:%.*]] = or disjoint i32 [[I_014_MODIFY]], 10 +; CHECK-NEXT: [[ADD31:%.*]] = or disjoint i32 [[I_014_MODIFY]], 11 +; CHECK-NEXT: [[ADD34:%.*]] = or disjoint i32 [[I_014_MODIFY]], 12 +; CHECK-NEXT: [[ADD37:%.*]] = or disjoint i32 [[I_014_MODIFY]], 13 +; CHECK-NEXT: [[ADD40:%.*]] = or disjoint i32 [[I_014_MODIFY]], 14 +; CHECK-NEXT: [[ADD43:%.*]] = or disjoint i32 [[I_014_MODIFY]], 15 +; CHECK-NEXT: [[ARRAYIDX_MODIFY:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_014_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX7_MODIFY:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_014_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD3]] +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD3]] +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD6]] +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD6]] +; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD10]] +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD10]] +; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD13]] +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD13]] +; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD16]] +; CHECK-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD16]] +; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD19]] +; CHECK-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD19]] +; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD22]] +; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD22]] +; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD25]] +; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD25]] +; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD28]] +; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD28]] +; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD31]] +; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD31]] +; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD34]] +; CHECK-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD34]] +; CHECK-NEXT: [[ARRAYIDX38:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD37]] +; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD37]] +; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD40]] +; CHECK-NEXT: [[ARRAYIDX42:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD40]] +; CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD43]] +; CHECK-NEXT: [[ARRAYIDX45:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD43]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_MODIFY]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX8]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX11]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX14]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX17]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX20]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX23]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX26]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX29]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX32]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX35]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX38]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX41]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX44]], align 4 +; CHECK-NEXT: [[MUL5_MODIFY:%.*]] = fmul float [[C]], [[TMP2]] +; CHECK-NEXT: [[TMP18:%.*]] = fmul float [[C]], [[TMP3]] +; CHECK-NEXT: [[TMP19:%.*]] = fmul float [[C]], [[TMP4]] +; CHECK-NEXT: [[TMP20:%.*]] = fmul float [[C]], [[TMP5]] +; CHECK-NEXT: [[TMP21:%.*]] = fmul float [[C]], [[TMP6]] +; CHECK-NEXT: [[TMP22:%.*]] = fmul float [[C]], [[TMP7]] +; CHECK-NEXT: [[TMP23:%.*]] = fmul float [[C]], [[TMP8]] +; CHECK-NEXT: [[TMP24:%.*]] = fmul float [[C]], [[TMP9]] +; CHECK-NEXT: [[TMP25:%.*]] = fmul float [[C]], [[TMP10]] +; CHECK-NEXT: [[TMP26:%.*]] = fmul float [[C]], [[TMP11]] +; CHECK-NEXT: [[TMP27:%.*]] = fmul float [[C]], [[TMP12]] +; CHECK-NEXT: [[TMP28:%.*]] = fmul float [[C]], [[TMP13]] +; CHECK-NEXT: [[TMP29:%.*]] = fmul float [[C]], [[TMP14]] +; CHECK-NEXT: [[TMP30:%.*]] = fmul float [[C]], [[TMP15]] +; CHECK-NEXT: [[TMP31:%.*]] = fmul float [[C]], [[TMP16]] +; CHECK-NEXT: [[TMP32:%.*]] = fmul float [[C]], [[TMP17]] +; CHECK-NEXT: store float [[MUL5_MODIFY]], ptr [[ARRAYIDX7_MODIFY]], align 4 +; CHECK-NEXT: store float [[TMP18]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: store float [[TMP19]], ptr [[ARRAYIDX5]], align 4 +; CHECK-NEXT: store float [[TMP20]], ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: store float [[TMP21]], ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: store float [[TMP22]], ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: store float [[TMP23]], ptr [[ARRAYIDX18]], align 4 +; CHECK-NEXT: store float [[TMP24]], ptr [[ARRAYIDX21]], align 4 +; CHECK-NEXT: store float [[TMP25]], ptr [[ARRAYIDX24]], align 4 +; CHECK-NEXT: store float [[TMP26]], ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: store float [[TMP27]], ptr [[ARRAYIDX30]], align 4 +; CHECK-NEXT: store float [[TMP28]], ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: store float [[TMP29]], ptr [[ARRAYIDX36]], align 4 +; CHECK-NEXT: store float [[TMP30]], ptr [[ARRAYIDX39]], align 4 +; CHECK-NEXT: store float [[TMP31]], ptr [[ARRAYIDX42]], align 4 +; CHECK-NEXT: store float [[TMP32]], ptr [[ARRAYIDX45]], align 4 +; CHECK-NEXT: [[EXITCOND_NOT_MODIFY:%.*]] = icmp sgt i32 [[TMP1]], [[SUB]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_MODIFY]], label [[FOR_COND_PREHEADER_NEW2]], label [[FOR_BODY_MODIFY]] ; CHECK: for.body: -; CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[TMP0]], [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_014]], [[STEP_IN]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL]] -; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[MUL5:%.*]] = fmul float [[TMP0]], [[C]] +; CHECK-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL5:%.*]] = fmul float [[C]], [[TMP33]] ; CHECK-NEXT: [[MUL6:%.*]] = mul nsw i32 [[I_014]], [[STEP_OUT]] ; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL6]] ; CHECK-NEXT: store float [[MUL5]], ptr [[ARRAYIDX7]], align 4 @@ -30,8 +138,8 @@ define dso_local noundef i32 @dsps_mulc_f32_ansi(ptr noalias noundef readonly %i ; CHECK-NEXT: [[I_014_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] ; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_014_CLONE]], [[STEP_IN]] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[MUL_CLONE]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 -; CHECK-NEXT: [[MUL5_CLONE:%.*]] = fmul float [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP34:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[MUL5_CLONE:%.*]] = fmul float [[C]], [[TMP34]] ; CHECK-NEXT: [[MUL6_CLONE:%.*]] = mul nsw i32 [[I_014_CLONE]], [[STEP_OUT]] ; CHECK-NEXT: [[ARRAYIDX7_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL6_CLONE]] ; CHECK-NEXT: store float [[MUL5_CLONE]], ptr [[ARRAYIDX7_CLONE]], align 4 @@ -39,7 +147,7 @@ define dso_local noundef i32 @dsps_mulc_f32_ansi(ptr noalias noundef readonly %i ; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll index 99ac2877f76c6..89c891af40669 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, ptr noundef writeonly %output, i32 noundef %len) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_sqrt_f32_ansi( -; CHECK-SAME: ptr noundef readonly [[INPUT:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias noundef readonly [[INPUT:%.*]], ptr noalias noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[OUTPUT]], null @@ -10,15 +10,139 @@ define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, pt ; CHECK-NEXT: br i1 [[OR_COND]], label [[RETURN:%.*]], label [[IF_END:%.*]] ; CHECK: if.end: ; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[LEN]], 2 -; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_COND_PREHEADER_NEW:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP411:%.*]] = icmp sgt i32 [[LEN]], 0 ; CHECK-NEXT: br i1 [[CMP411]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.cond.preheader.new: +; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[LEN]], -16 +; CHECK-NEXT: [[CMP6_NOT207:%.*]] = icmp ult i32 [[LEN]], 16 +; CHECK-NEXT: br i1 [[CMP6_NOT207]], label [[FOR_COND_PREHEADER_NEW2:%.*]], label [[FOR_BODY_MODIFY:%.*]] +; CHECK: for.cond.preheader.new2: +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[TMP32:%.*]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[CMP85209:%.*]] = icmp slt i32 [[TMP0]], [[LEN]] +; CHECK-NEXT: br i1 [[CMP85209]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body.modify: +; CHECK-NEXT: [[I_012_MODIFY:%.*]] = phi i32 [ [[TMP32]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[ARRAYIDX_MODIFY:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012_MODIFY]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_MODIFY]], align 4 +; CHECK-NEXT: [[SHR_I_MODIFY:%.*]] = ashr i32 [[TMP1]], 1 +; CHECK-NEXT: [[ADD48:%.*]] = or disjoint i32 [[SHR_I_MODIFY]], 532365312 +; CHECK-NEXT: [[ARRAYIDX5_MODIFY:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_012_MODIFY]] +; CHECK-NEXT: store i32 [[ADD48]], ptr [[ARRAYIDX5_MODIFY]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[I_012_MODIFY]], 1 +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = ashr i32 [[TMP2]], 1 +; CHECK-NEXT: [[ADD50:%.*]] = or disjoint i32 [[TMP3]], 532365312 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD]] +; CHECK-NEXT: store i32 [[ADD50]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[ADD3:%.*]] = or disjoint i32 [[I_012_MODIFY]], 2 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD3]] +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = ashr i32 [[TMP4]], 1 +; CHECK-NEXT: [[ADD52:%.*]] = or disjoint i32 [[TMP5]], 532365312 +; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD3]] +; CHECK-NEXT: store i32 [[ADD52]], ptr [[ARRAYIDX6]], align 4 +; CHECK-NEXT: [[ADD7:%.*]] = or disjoint i32 [[I_012_MODIFY]], 3 +; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD7]] +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX8]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = ashr i32 [[TMP6]], 1 +; CHECK-NEXT: [[ADD54:%.*]] = or disjoint i32 [[TMP7]], 532365312 +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD7]] +; CHECK-NEXT: store i32 [[ADD54]], ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[ADD10:%.*]] = or disjoint i32 [[I_012_MODIFY]], 4 +; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD10]] +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = ashr i32 [[TMP8]], 1 +; CHECK-NEXT: [[ADD56:%.*]] = or disjoint i32 [[TMP9]], 532365312 +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD10]] +; CHECK-NEXT: store i32 [[ADD56]], ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: [[ADD13:%.*]] = or disjoint i32 [[I_012_MODIFY]], 5 +; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD13]] +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX14]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = ashr i32 [[TMP10]], 1 +; CHECK-NEXT: [[ADD58:%.*]] = or disjoint i32 [[TMP11]], 532365312 +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD13]] +; CHECK-NEXT: store i32 [[ADD58]], ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[ADD16:%.*]] = or disjoint i32 [[I_012_MODIFY]], 6 +; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD16]] +; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX17]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = ashr i32 [[TMP12]], 1 +; CHECK-NEXT: [[ADD60:%.*]] = or disjoint i32 [[TMP13]], 532365312 +; CHECK-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD16]] +; CHECK-NEXT: store i32 [[ADD60]], ptr [[ARRAYIDX18]], align 4 +; CHECK-NEXT: [[ADD19:%.*]] = or disjoint i32 [[I_012_MODIFY]], 7 +; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD19]] +; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = ashr i32 [[TMP14]], 1 +; CHECK-NEXT: [[ADD62:%.*]] = or disjoint i32 [[TMP15]], 532365312 +; CHECK-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD19]] +; CHECK-NEXT: store i32 [[ADD62]], ptr [[ARRAYIDX21]], align 4 +; CHECK-NEXT: [[ADD22:%.*]] = or disjoint i32 [[I_012_MODIFY]], 8 +; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD22]] +; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX23]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = ashr i32 [[TMP16]], 1 +; CHECK-NEXT: [[ADD64:%.*]] = or disjoint i32 [[TMP17]], 532365312 +; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD22]] +; CHECK-NEXT: store i32 [[ADD64]], ptr [[ARRAYIDX24]], align 4 +; CHECK-NEXT: [[ADD25:%.*]] = or disjoint i32 [[I_012_MODIFY]], 9 +; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD25]] +; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX26]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = ashr i32 [[TMP18]], 1 +; CHECK-NEXT: [[ADD66:%.*]] = or disjoint i32 [[TMP19]], 532365312 +; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD25]] +; CHECK-NEXT: store i32 [[ADD66]], ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: [[ADD28:%.*]] = or disjoint i32 [[I_012_MODIFY]], 10 +; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD28]] +; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX29]], align 4 +; CHECK-NEXT: [[TMP21:%.*]] = ashr i32 [[TMP20]], 1 +; CHECK-NEXT: [[ADD68:%.*]] = or disjoint i32 [[TMP21]], 532365312 +; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD28]] +; CHECK-NEXT: store i32 [[ADD68]], ptr [[ARRAYIDX30]], align 4 +; CHECK-NEXT: [[ADD31:%.*]] = or disjoint i32 [[I_012_MODIFY]], 11 +; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD31]] +; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX32]], align 4 +; CHECK-NEXT: [[TMP23:%.*]] = ashr i32 [[TMP22]], 1 +; CHECK-NEXT: [[ADD70:%.*]] = or disjoint i32 [[TMP23]], 532365312 +; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD31]] +; CHECK-NEXT: store i32 [[ADD70]], ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: [[ADD34:%.*]] = or disjoint i32 [[I_012_MODIFY]], 12 +; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD34]] +; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX35]], align 4 +; CHECK-NEXT: [[TMP25:%.*]] = ashr i32 [[TMP24]], 1 +; CHECK-NEXT: [[ADD72:%.*]] = or disjoint i32 [[TMP25]], 532365312 +; CHECK-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD34]] +; CHECK-NEXT: store i32 [[ADD72]], ptr [[ARRAYIDX36]], align 4 +; CHECK-NEXT: [[ADD37:%.*]] = or disjoint i32 [[I_012_MODIFY]], 13 +; CHECK-NEXT: [[ARRAYIDX38:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD37]] +; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX38]], align 4 +; CHECK-NEXT: [[TMP27:%.*]] = ashr i32 [[TMP26]], 1 +; CHECK-NEXT: [[ADD74:%.*]] = or disjoint i32 [[TMP27]], 532365312 +; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD37]] +; CHECK-NEXT: store i32 [[ADD74]], ptr [[ARRAYIDX39]], align 4 +; CHECK-NEXT: [[ADD40:%.*]] = or disjoint i32 [[I_012_MODIFY]], 14 +; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD40]] +; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX41]], align 4 +; CHECK-NEXT: [[TMP29:%.*]] = ashr i32 [[TMP28]], 1 +; CHECK-NEXT: [[ADD76:%.*]] = or disjoint i32 [[TMP29]], 532365312 +; CHECK-NEXT: [[ARRAYIDX42:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD40]] +; CHECK-NEXT: store i32 [[ADD76]], ptr [[ARRAYIDX42]], align 4 +; CHECK-NEXT: [[ADD43:%.*]] = or disjoint i32 [[I_012_MODIFY]], 15 +; CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD43]] +; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX44]], align 4 +; CHECK-NEXT: [[TMP31:%.*]] = ashr i32 [[TMP30]], 1 +; CHECK-NEXT: [[ADD78:%.*]] = or disjoint i32 [[TMP31]], 532365312 +; CHECK-NEXT: [[ARRAYIDX45:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD43]] +; CHECK-NEXT: store i32 [[ADD78]], ptr [[ARRAYIDX45]], align 4 +; CHECK-NEXT: [[TMP32]] = add nuw i32 [[I_012_MODIFY]], 16 +; CHECK-NEXT: [[EXITCOND_NOT_MODIFY:%.*]] = icmp sgt i32 [[TMP32]], [[SUB]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_MODIFY]], label [[FOR_COND_PREHEADER_NEW2]], label [[FOR_BODY_MODIFY]] ; CHECK: for.body: -; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[TMP0]], [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[SHR_I:%.*]] = ashr i32 [[TMP0]], 1 +; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[SHR_I:%.*]] = ashr i32 [[TMP33]], 1 ; CHECK-NEXT: [[ADD_I:%.*]] = add nsw i32 [[SHR_I]], 532365312 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_012]] ; CHECK-NEXT: store i32 [[ADD_I]], ptr [[ARRAYIDX5]], align 4 @@ -28,8 +152,8 @@ define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, pt ; CHECK: for.body.clone: ; CHECK-NEXT: [[I_012_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012_CLONE]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_CLONE]], align 4 -; CHECK-NEXT: [[SHR_I_CLONE:%.*]] = ashr i32 [[TMP1]], 1 +; CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[SHR_I_CLONE:%.*]] = ashr i32 [[TMP34]], 1 ; CHECK-NEXT: [[ADD_I_CLONE:%.*]] = add nsw i32 [[SHR_I_CLONE]], 532365312 ; CHECK-NEXT: [[ARRAYIDX5_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_012_CLONE]] ; CHECK-NEXT: store i32 [[ADD_I_CLONE]], ptr [[ARRAYIDX5_CLONE]], align 4 @@ -37,7 +161,7 @@ define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, pt ; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sub.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sub.ll index 9468a11ba6232..19bca2d13e120 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sub.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sub.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 -; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=false < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s define dso_local noundef i32 @dsps_sub_f32_ansi(ptr noundef readonly %input1, ptr noundef readonly %input2, ptr noundef writeonly %output, i32 noundef %len, i32 noundef %step1, i32 noundef %step2, i32 noundef %step_out) local_unnamed_addr { ; CHECK-LABEL: define dso_local noundef i32 @dsps_sub_f32_ansi( -; CHECK-SAME: ptr noundef readonly [[INPUT1:%.*]], ptr noundef readonly [[INPUT2:%.*]], ptr noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { +; CHECK-SAME: ptr noalias noundef readonly [[INPUT1:%.*]], ptr noalias noundef readonly [[INPUT2:%.*]], ptr noalias noundef writeonly [[OUTPUT:%.*]], i32 noundef [[LEN:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[STEP2:%.*]], i32 noundef [[STEP_OUT:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[INPUT1]], null ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq ptr [[INPUT2]], null @@ -12,19 +12,159 @@ define dso_local noundef i32 @dsps_sub_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: br i1 [[OR_COND19]], label [[RETURN:%.*]], label [[IF_END:%.*]] ; CHECK: if.end: ; CHECK-NEXT: [[CMP41:%.*]] = icmp sgt i32 [[LEN]], 2 -; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_BODY:%.*]], label [[FOR_COND_PREHEADER:%.*]] +; CHECK-NEXT: br i1 [[CMP41]], label [[FOR_COND_PREHEADER_NEW:%.*]], label [[FOR_COND_PREHEADER:%.*]] ; CHECK: for.cond.preheader: ; CHECK-NEXT: [[CMP720:%.*]] = icmp sgt i32 [[LEN]], 0 ; CHECK-NEXT: br i1 [[CMP720]], label [[FOR_BODY_CLONE:%.*]], label [[RETURN]] +; CHECK: for.cond.preheader.new: +; CHECK-NEXT: [[SUB63:%.*]] = add nsw i32 [[LEN]], -16 +; CHECK-NEXT: [[CMP6_NOT207:%.*]] = icmp ult i32 [[LEN]], 16 +; CHECK-NEXT: br i1 [[CMP6_NOT207]], label [[FOR_COND_PREHEADER_NEW2:%.*]], label [[FOR_BODY_MODIFY:%.*]] +; CHECK: for.cond.preheader.new2: +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[TMP1:%.*]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[CMP85209:%.*]] = icmp slt i32 [[TMP0]], [[LEN]] +; CHECK-NEXT: br i1 [[CMP85209]], label [[FOR_BODY:%.*]], label [[RETURN]] +; CHECK: for.body.modify: +; CHECK-NEXT: [[I_021_MODIFY:%.*]] = phi i32 [ [[TMP1]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[TMP1]] = add nuw i32 [[I_021_MODIFY]], 16 +; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[I_021_MODIFY]], 1 +; CHECK-NEXT: [[ADD4:%.*]] = or disjoint i32 [[I_021_MODIFY]], 2 +; CHECK-NEXT: [[ADD8:%.*]] = or disjoint i32 [[I_021_MODIFY]], 3 +; CHECK-NEXT: [[ADD14:%.*]] = or disjoint i32 [[I_021_MODIFY]], 4 +; CHECK-NEXT: [[ADD18:%.*]] = or disjoint i32 [[I_021_MODIFY]], 5 +; CHECK-NEXT: [[ADD22:%.*]] = or disjoint i32 [[I_021_MODIFY]], 6 +; CHECK-NEXT: [[ADD26:%.*]] = or disjoint i32 [[I_021_MODIFY]], 7 +; CHECK-NEXT: [[ADD30:%.*]] = or disjoint i32 [[I_021_MODIFY]], 8 +; CHECK-NEXT: [[ADD34:%.*]] = or disjoint i32 [[I_021_MODIFY]], 9 +; CHECK-NEXT: [[ADD38:%.*]] = or disjoint i32 [[I_021_MODIFY]], 10 +; CHECK-NEXT: [[ADD42:%.*]] = or disjoint i32 [[I_021_MODIFY]], 11 +; CHECK-NEXT: [[ADD46:%.*]] = or disjoint i32 [[I_021_MODIFY]], 12 +; CHECK-NEXT: [[ADD50:%.*]] = or disjoint i32 [[I_021_MODIFY]], 13 +; CHECK-NEXT: [[ADD54:%.*]] = or disjoint i32 [[I_021_MODIFY]], 14 +; CHECK-NEXT: [[ADD58:%.*]] = or disjoint i32 [[I_021_MODIFY]], 15 +; CHECK-NEXT: [[ARRAYIDX_MODIFY:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[I_021_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX9_MODIFY:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[I_021_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX11_MODIFY:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_021_MODIFY]] +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD]] +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD4]] +; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD4]] +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD4]] +; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD8]] +; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD8]] +; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD8]] +; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD14]] +; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD14]] +; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD14]] +; CHECK-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD18]] +; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD22]] +; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD22]] +; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD22]] +; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD26]] +; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD26]] +; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD26]] +; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD30]] +; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD34]] +; CHECK-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD34]] +; CHECK-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD34]] +; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX40:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD38]] +; CHECK-NEXT: [[ARRAYIDX43:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD42]] +; CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD42]] +; CHECK-NEXT: [[ARRAYIDX45:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD42]] +; CHECK-NEXT: [[ARRAYIDX47:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD46]] +; CHECK-NEXT: [[ARRAYIDX48:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD46]] +; CHECK-NEXT: [[ARRAYIDX49:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD46]] +; CHECK-NEXT: [[ARRAYIDX51:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD50]] +; CHECK-NEXT: [[ARRAYIDX52:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD50]] +; CHECK-NEXT: [[ARRAYIDX53:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD50]] +; CHECK-NEXT: [[ARRAYIDX55:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD54]] +; CHECK-NEXT: [[ARRAYIDX56:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD54]] +; CHECK-NEXT: [[ARRAYIDX57:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD54]] +; CHECK-NEXT: [[ARRAYIDX59:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[ADD58]] +; CHECK-NEXT: [[ARRAYIDX60:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[ADD58]] +; CHECK-NEXT: [[ARRAYIDX61:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD58]] +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_MODIFY]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_MODIFY]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX16]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX19]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX20]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX23]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX24]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX28]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX31]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX32]], align 4 +; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX35]], align 4 +; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX36]], align 4 +; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX39]], align 4 +; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX40]], align 4 +; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX43]], align 4 +; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX44]], align 4 +; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX47]], align 4 +; CHECK-NEXT: [[TMP27:%.*]] = load float, ptr [[ARRAYIDX48]], align 4 +; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[ARRAYIDX51]], align 4 +; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX52]], align 4 +; CHECK-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX55]], align 4 +; CHECK-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX56]], align 4 +; CHECK-NEXT: [[TMP32:%.*]] = load float, ptr [[ARRAYIDX59]], align 4 +; CHECK-NEXT: [[TMP33:%.*]] = load float, ptr [[ARRAYIDX60]], align 4 +; CHECK-NEXT: [[SUB_MODIFY:%.*]] = fsub float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[TMP34:%.*]] = fsub float [[TMP4]], [[TMP5]] +; CHECK-NEXT: [[TMP35:%.*]] = fsub float [[TMP6]], [[TMP7]] +; CHECK-NEXT: [[TMP36:%.*]] = fsub float [[TMP8]], [[TMP9]] +; CHECK-NEXT: [[TMP37:%.*]] = fsub float [[TMP10]], [[TMP11]] +; CHECK-NEXT: [[TMP38:%.*]] = fsub float [[TMP12]], [[TMP13]] +; CHECK-NEXT: [[TMP39:%.*]] = fsub float [[TMP14]], [[TMP15]] +; CHECK-NEXT: [[TMP40:%.*]] = fsub float [[TMP16]], [[TMP17]] +; CHECK-NEXT: [[TMP41:%.*]] = fsub float [[TMP18]], [[TMP19]] +; CHECK-NEXT: [[TMP42:%.*]] = fsub float [[TMP20]], [[TMP21]] +; CHECK-NEXT: [[TMP43:%.*]] = fsub float [[TMP22]], [[TMP23]] +; CHECK-NEXT: [[TMP44:%.*]] = fsub float [[TMP24]], [[TMP25]] +; CHECK-NEXT: [[TMP45:%.*]] = fsub float [[TMP26]], [[TMP27]] +; CHECK-NEXT: [[TMP46:%.*]] = fsub float [[TMP28]], [[TMP29]] +; CHECK-NEXT: [[TMP47:%.*]] = fsub float [[TMP30]], [[TMP31]] +; CHECK-NEXT: [[TMP48:%.*]] = fsub float [[TMP32]], [[TMP33]] +; CHECK-NEXT: store float [[SUB_MODIFY]], ptr [[ARRAYIDX11_MODIFY]], align 4 +; CHECK-NEXT: store float [[TMP34]], ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: store float [[TMP35]], ptr [[ARRAYIDX7]], align 4 +; CHECK-NEXT: store float [[TMP36]], ptr [[ARRAYIDX13]], align 4 +; CHECK-NEXT: store float [[TMP37]], ptr [[ARRAYIDX17]], align 4 +; CHECK-NEXT: store float [[TMP38]], ptr [[ARRAYIDX21]], align 4 +; CHECK-NEXT: store float [[TMP39]], ptr [[ARRAYIDX25]], align 4 +; CHECK-NEXT: store float [[TMP40]], ptr [[ARRAYIDX29]], align 4 +; CHECK-NEXT: store float [[TMP41]], ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: store float [[TMP42]], ptr [[ARRAYIDX37]], align 4 +; CHECK-NEXT: store float [[TMP43]], ptr [[ARRAYIDX41]], align 4 +; CHECK-NEXT: store float [[TMP44]], ptr [[ARRAYIDX45]], align 4 +; CHECK-NEXT: store float [[TMP45]], ptr [[ARRAYIDX49]], align 4 +; CHECK-NEXT: store float [[TMP46]], ptr [[ARRAYIDX53]], align 4 +; CHECK-NEXT: store float [[TMP47]], ptr [[ARRAYIDX57]], align 4 +; CHECK-NEXT: store float [[TMP48]], ptr [[ARRAYIDX61]], align 4 +; CHECK-NEXT: [[EXITCOND_NOT_MODIFY:%.*]] = icmp sgt i32 [[TMP1]], [[SUB63]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_MODIFY]], label [[FOR_COND_PREHEADER_NEW2]], label [[FOR_BODY_MODIFY]] ; CHECK: for.body: -; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[IF_END]] ] +; CHECK-NEXT: [[I_021:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[TMP0]], [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_021]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL]] -; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[MUL8:%.*]] = mul nsw i32 [[I_021]], [[STEP2]] ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 -; CHECK-NEXT: [[SUB:%.*]] = fsub float [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP50:%.*]] = load float, ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[SUB:%.*]] = fsub float [[TMP49]], [[TMP50]] ; CHECK-NEXT: [[MUL10:%.*]] = mul nsw i32 [[I_021]], [[STEP_OUT]] ; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10]] ; CHECK-NEXT: store float [[SUB]], ptr [[ARRAYIDX11]], align 4 @@ -35,11 +175,11 @@ define dso_local noundef i32 @dsps_sub_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[I_021_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] ; CHECK-NEXT: [[MUL_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP1]] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT1]], i32 [[MUL_CLONE]] -; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[TMP51:%.*]] = load float, ptr [[ARRAYIDX_CLONE]], align 4 ; CHECK-NEXT: [[MUL8_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP2]] ; CHECK-NEXT: [[ARRAYIDX9_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT2]], i32 [[MUL8_CLONE]] -; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 -; CHECK-NEXT: [[SUB_CLONE:%.*]] = fsub float [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[TMP52:%.*]] = load float, ptr [[ARRAYIDX9_CLONE]], align 4 +; CHECK-NEXT: [[SUB_CLONE:%.*]] = fsub float [[TMP51]], [[TMP52]] ; CHECK-NEXT: [[MUL10_CLONE:%.*]] = mul nsw i32 [[I_021_CLONE]], [[STEP_OUT]] ; CHECK-NEXT: [[ARRAYIDX11_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[MUL10_CLONE]] ; CHECK-NEXT: store float [[SUB_CLONE]], ptr [[ARRAYIDX11_CLONE]], align 4 @@ -47,7 +187,7 @@ define dso_local noundef i32 @dsps_sub_f32_ansi(ptr noundef readonly %input1, pt ; CHECK-NEXT: [[EXITCOND_NOT_CLONE:%.*]] = icmp eq i32 [[INC_CLONE]], [[LEN]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_CLONE]], label [[RETURN]], label [[FOR_BODY_CLONE]] ; CHECK: return: -; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ] +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 458755, [[ENTRY:%.*]] ], [ 0, [[FOR_COND_PREHEADER]] ], [ 0, [[FOR_BODY]] ], [ 0, [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: ret i32 [[RETVAL_0]] ; entry: From 9d9606951a01d01ed399d828b9fac139f5ca6e71 Mon Sep 17 00:00:00 2001 From: "chen.qian" Date: Fri, 6 Dec 2024 18:39:26 +0800 Subject: [PATCH 270/289] [Test] rename testcases and change sqrt/fird --- .../{add.ll => dsps_add_f32_ansi.ll} | 0 .../{addc.ll => dsps_addc_f32_ansi.ll} | 0 .../{ccorr.ll => dsps_ccorr_f32_ansi.ll} | 0 ...l => dsps_complicated_dotprod_f32_ansi.ll} | 4 +- .../{conv.ll => dsps_conv_f32_ansi.ll} | 0 .../{corr.ll => dsps_corr_f32_ansi.ll} | 0 .../{dotprod.ll => dsps_dotprod_f32_ansi.ll} | 0 ...{dotprode.ll => dsps_dotprode_f32_ansi.ll} | 0 .../{fir.ll => dsps_fir_f32_ansi.ll} | 0 .../{fird.ll => dsps_fird_f32_ansi.ll} | 332 +++++++++--------- .../{mul.ll => dsps_mul_f32_ansi.ll} | 0 .../{mulc.ll => dsps_mulc_f32_ansi.ll} | 0 ...ant.ll => dsps_simple_dotprod_f32_ansi.ll} | 4 +- .../{sqrt.ll => dsps_sqrt_f32_ansi.ll} | 136 +++---- .../{sub.ll => dsps_sub_f32_ansi.ll} | 0 15 files changed, 238 insertions(+), 238 deletions(-) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{add.ll => dsps_add_f32_ansi.ll} (100%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{addc.ll => dsps_addc_f32_ansi.ll} (100%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{ccorr.ll => dsps_ccorr_f32_ansi.ll} (100%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{dotprod_template_complex.ll => dsps_complicated_dotprod_f32_ansi.ll} (96%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{conv.ll => dsps_conv_f32_ansi.ll} (100%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{corr.ll => dsps_corr_f32_ansi.ll} (100%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{dotprod.ll => dsps_dotprod_f32_ansi.ll} (100%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{dotprode.ll => dsps_dotprode_f32_ansi.ll} (100%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{fir.ll => dsps_fir_f32_ansi.ll} (100%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{fird.ll => dsps_fird_f32_ansi.ll} (66%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{mul.ll => dsps_mul_f32_ansi.ll} (100%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{mulc.ll => dsps_mulc_f32_ansi.ll} (100%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{loopsecvconstant.ll => dsps_simple_dotprod_f32_ansi.ll} (96%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{sqrt.ll => dsps_sqrt_f32_ansi.ll} (71%) rename llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/{sub.ll => dsps_sub_f32_ansi.ll} (100%) diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/add.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_add_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/add.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_add_f32_ansi.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/addc.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_addc_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/addc.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_addc_f32_ansi.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/ccorr.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_ccorr_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/ccorr.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_ccorr_f32_ansi.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_complicated_dotprod_f32_ansi.ll similarity index 96% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_complicated_dotprod_f32_ansi.ll index 60c76b1ad159d..5604e46356331 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod_template_complex.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_complicated_dotprod_f32_ansi.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 ; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s -define dso_local float @test_loop(ptr nocapture noundef readonly %data1, ptr nocapture noundef readonly %data2, i32 noundef %start_index, i32 noundef %end_index, i32 noundef %update1, i32 noundef %update2, float noundef %offset) local_unnamed_addr { -; CHECK-LABEL: define dso_local float @test_loop( +define dso_local float @dsps_complicated_dotprod_f32_ansi(ptr nocapture noundef readonly %data1, ptr nocapture noundef readonly %data2, i32 noundef %start_index, i32 noundef %end_index, i32 noundef %update1, i32 noundef %update2, float noundef %offset) local_unnamed_addr { +; CHECK-LABEL: define dso_local float @dsps_complicated_dotprod_f32_ansi( ; CHECK-SAME: ptr noalias nocapture noundef readonly [[DATA1:%.*]], ptr noalias nocapture noundef readonly [[DATA2:%.*]], i32 noundef [[START_INDEX:%.*]], i32 noundef [[END_INDEX:%.*]], i32 noundef [[UPDATE1:%.*]], i32 noundef [[UPDATE2:%.*]], float noundef [[OFFSET:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr float, ptr [[DATA1]], i32 [[UPDATE1]] diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/conv.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_conv_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/conv.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_conv_f32_ansi.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/corr.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_corr_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/corr.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_corr_f32_ansi.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_dotprod_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprod.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_dotprod_f32_ansi.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprode.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_dotprode_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dotprode.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_dotprode_f32_ansi.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fir.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_fir_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fir.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_fir_f32_ansi.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_fird_f32_ansi.ll similarity index 66% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_fird_f32_ansi.ll index e7a15e8558512..2cb5b837f9eb2 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/fird.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_fird_f32_ansi.ll @@ -44,27 +44,27 @@ define dso_local noundef i32 @dsps_fird_f32_ansi(ptr nocapture noundef %fir, ptr ; CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DELAY]], align 4 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[TMP6]], i32 28 ; CHECK-NEXT: [[TMP7:%.*]] = shl i32 [[TMP4]], 2 -; CHECK-NEXT: [[SCEVGEP101:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i32 [[TMP7]] -; CHECK-NEXT: [[SCEVGEP105:%.*]] = getelementptr i8, ptr [[TMP6]], i32 24 -; CHECK-NEXT: [[SCEVGEP106:%.*]] = getelementptr i8, ptr [[SCEVGEP105]], i32 [[TMP7]] -; CHECK-NEXT: [[SCEVGEP108:%.*]] = getelementptr i8, ptr [[TMP6]], i32 20 -; CHECK-NEXT: [[SCEVGEP109:%.*]] = getelementptr i8, ptr [[SCEVGEP108]], i32 [[TMP7]] -; CHECK-NEXT: [[SCEVGEP111:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 -; CHECK-NEXT: [[SCEVGEP112:%.*]] = getelementptr i8, ptr [[SCEVGEP111]], i32 [[TMP7]] -; CHECK-NEXT: [[SCEVGEP114:%.*]] = getelementptr i8, ptr [[TMP6]], i32 12 -; CHECK-NEXT: [[SCEVGEP115:%.*]] = getelementptr i8, ptr [[SCEVGEP114]], i32 [[TMP7]] -; CHECK-NEXT: [[SCEVGEP117:%.*]] = getelementptr i8, ptr [[TMP6]], i32 8 -; CHECK-NEXT: [[SCEVGEP118:%.*]] = getelementptr i8, ptr [[SCEVGEP117]], i32 [[TMP7]] -; CHECK-NEXT: [[SCEVGEP120:%.*]] = getelementptr i8, ptr [[TMP6]], i32 4 -; CHECK-NEXT: [[SCEVGEP121:%.*]] = getelementptr i8, ptr [[SCEVGEP120]], i32 [[TMP7]] -; CHECK-NEXT: [[SCEVGEP123:%.*]] = getelementptr i8, ptr [[TMP6]], i32 [[TMP7]] -; CHECK-NEXT: [[SCEVGEP127:%.*]] = getelementptr i8, ptr [[TMP5]], i32 28 -; CHECK-NEXT: [[SCEVGEP129:%.*]] = getelementptr i8, ptr [[TMP5]], i32 24 -; CHECK-NEXT: [[SCEVGEP131:%.*]] = getelementptr i8, ptr [[TMP5]], i32 20 -; CHECK-NEXT: [[SCEVGEP133:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 -; CHECK-NEXT: [[SCEVGEP135:%.*]] = getelementptr i8, ptr [[TMP5]], i32 12 -; CHECK-NEXT: [[SCEVGEP137:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 -; CHECK-NEXT: [[SCEVGEP139:%.*]] = getelementptr i8, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[SCEVGEP79:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP83:%.*]] = getelementptr i8, ptr [[TMP6]], i32 24 +; CHECK-NEXT: [[SCEVGEP84:%.*]] = getelementptr i8, ptr [[SCEVGEP83]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP86:%.*]] = getelementptr i8, ptr [[TMP6]], i32 20 +; CHECK-NEXT: [[SCEVGEP87:%.*]] = getelementptr i8, ptr [[SCEVGEP86]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP89:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16 +; CHECK-NEXT: [[SCEVGEP90:%.*]] = getelementptr i8, ptr [[SCEVGEP89]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP92:%.*]] = getelementptr i8, ptr [[TMP6]], i32 12 +; CHECK-NEXT: [[SCEVGEP93:%.*]] = getelementptr i8, ptr [[SCEVGEP92]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP95:%.*]] = getelementptr i8, ptr [[TMP6]], i32 8 +; CHECK-NEXT: [[SCEVGEP96:%.*]] = getelementptr i8, ptr [[SCEVGEP95]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP98:%.*]] = getelementptr i8, ptr [[TMP6]], i32 4 +; CHECK-NEXT: [[SCEVGEP99:%.*]] = getelementptr i8, ptr [[SCEVGEP98]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP101:%.*]] = getelementptr i8, ptr [[TMP6]], i32 [[TMP7]] +; CHECK-NEXT: [[SCEVGEP103:%.*]] = getelementptr i8, ptr [[TMP5]], i32 24 +; CHECK-NEXT: [[SCEVGEP105:%.*]] = getelementptr i8, ptr [[TMP5]], i32 20 +; CHECK-NEXT: [[SCEVGEP107:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16 +; CHECK-NEXT: [[SCEVGEP109:%.*]] = getelementptr i8, ptr [[TMP5]], i32 12 +; CHECK-NEXT: [[SCEVGEP111:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8 +; CHECK-NEXT: [[SCEVGEP113:%.*]] = getelementptr i8, ptr [[TMP5]], i32 4 +; CHECK-NEXT: [[SCEVGEP115:%.*]] = getelementptr i8, ptr [[TMP5]], i32 28 ; CHECK-NEXT: br label [[FOR_BODY14_7:%.*]] ; CHECK: for.body4: ; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[FOR_BODY4]] ], [ [[TMP0]], [[FOR_BODY4_LR_PH]] ] @@ -89,8 +89,8 @@ define dso_local noundef i32 @dsps_fird_f32_ansi(ptr nocapture noundef %fir, ptr ; CHECK-NEXT: [[ACC_5_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP33:%.*]], [[FOR_BODY14_7]] ] ; CHECK-NEXT: [[ACC_6_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP36:%.*]], [[FOR_BODY14_7]] ] ; CHECK-NEXT: [[ACC_7_LCSSA:%.*]] = phi float [ 0.000000e+00, [[FOR_COND_CLEANUP3]] ], [ [[TMP39:%.*]], [[FOR_BODY14_7]] ] -; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_COND_CLEANUP3]] ], [ [[LSR_IV_NEXT126:%.*]], [[FOR_BODY14_7]] ] -; CHECK-NEXT: [[N_0_LCSSA:%.*]] = phi i32 [ [[TMP4]], [[FOR_COND_CLEANUP3]] ], [ [[LSR_IV_NEXT100:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA:%.*]] = phi i32 [ 0, [[FOR_COND_CLEANUP3]] ], [ [[INC15_7:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[N_0_LCSSA:%.*]] = phi i32 [ [[TMP4]], [[FOR_COND_CLEANUP3]] ], [ [[LSR_IV_NEXT78:%.*]], [[FOR_BODY14_7]] ] ; CHECK-NEXT: [[CMP2572:%.*]] = icmp slt i32 [[N_0_LCSSA]], [[TMP1]] ; CHECK-NEXT: br i1 [[CMP2572]], label [[FOR_BODY27_LR_PH:%.*]], label [[FOR_COND_CLEANUP26:%.*]] ; CHECK: for.body27.lr.ph: @@ -99,14 +99,14 @@ define dso_local noundef i32 @dsps_fird_f32_ansi(ptr nocapture noundef %fir, ptr ; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[COEFF_POS_0_LCSSA]], [[TMP1]] ; CHECK-NEXT: [[TMP13:%.*]] = sub i32 [[TMP1]], [[N_0_LCSSA]] ; CHECK-NEXT: [[TMP14:%.*]] = shl i32 [[N_0_LCSSA]], 2 -; CHECK-NEXT: [[SCEVGEP144:%.*]] = getelementptr i8, ptr [[TMP11]], i32 [[TMP14]] +; CHECK-NEXT: [[SCEVGEP120:%.*]] = getelementptr i8, ptr [[TMP11]], i32 [[TMP14]] ; CHECK-NEXT: [[TMP15:%.*]] = shl i32 [[COEFF_POS_0_LCSSA]], 2 -; CHECK-NEXT: [[SCEVGEP147:%.*]] = getelementptr i8, ptr [[TMP10]], i32 [[TMP15]] +; CHECK-NEXT: [[SCEVGEP123:%.*]] = getelementptr i8, ptr [[TMP10]], i32 [[TMP15]] ; CHECK-NEXT: br label [[FOR_BODY14_CLONE:%.*]] ; CHECK: for.body14.7: -; CHECK-NEXT: [[LSR_IV125:%.*]] = phi i32 [ 0, [[FOR_BODY14_LR_PH]] ], [ [[LSR_IV_NEXT126]], [[FOR_BODY14_7]] ] -; CHECK-NEXT: [[LSR_IV102:%.*]] = phi i32 [ 0, [[FOR_BODY14_LR_PH]] ], [ [[LSR_IV_NEXT103:%.*]], [[FOR_BODY14_7]] ] -; CHECK-NEXT: [[LSR_IV99:%.*]] = phi i32 [ [[TMP4]], [[FOR_BODY14_LR_PH]] ], [ [[LSR_IV_NEXT100]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[LSR_IV80:%.*]] = phi i32 [ 0, [[FOR_BODY14_LR_PH]] ], [ [[LSR_IV_NEXT81:%.*]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[LSR_IV77:%.*]] = phi i32 [ [[TMP4]], [[FOR_BODY14_LR_PH]] ], [ [[LSR_IV_NEXT78]], [[FOR_BODY14_7]] ] +; CHECK-NEXT: [[COEFF_POS_068:%.*]] = phi i32 [ 0, [[FOR_BODY14_LR_PH]] ], [ [[INC15_7]], [[FOR_BODY14_7]] ] ; CHECK-NEXT: [[ACC:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP18]], [[FOR_BODY14_7]] ] ; CHECK-NEXT: [[ACC3:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP21]], [[FOR_BODY14_7]] ] ; CHECK-NEXT: [[ACC4:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP24]], [[FOR_BODY14_7]] ] @@ -115,92 +115,92 @@ define dso_local noundef i32 @dsps_fird_f32_ansi(ptr nocapture noundef %fir, ptr ; CHECK-NEXT: [[ACC7:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP33]], [[FOR_BODY14_7]] ] ; CHECK-NEXT: [[ACC8:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP36]], [[FOR_BODY14_7]] ] ; CHECK-NEXT: [[ACC9:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY14_LR_PH]] ], [ [[TMP39]], [[FOR_BODY14_7]] ] -; CHECK-NEXT: [[SCEVGEP141:%.*]] = getelementptr i8, ptr [[TMP5]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[SCEVGEP141]], align 4 -; CHECK-NEXT: [[SCEVGEP124:%.*]] = getelementptr i8, ptr [[SCEVGEP123]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[SCEVGEP124]], align 4 +; CHECK-NEXT: [[SCEVGEP117:%.*]] = getelementptr i8, ptr [[TMP5]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[SCEVGEP117]], align 4 +; CHECK-NEXT: [[SCEVGEP102:%.*]] = getelementptr i8, ptr [[SCEVGEP101]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[SCEVGEP102]], align 4 ; CHECK-NEXT: [[TMP18]] = tail call float @llvm.fmuladd.f32(float [[TMP16]], float [[TMP17]], float [[ACC]]) -; CHECK-NEXT: [[SCEVGEP140:%.*]] = getelementptr i8, ptr [[SCEVGEP139]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[SCEVGEP140]], align 4 -; CHECK-NEXT: [[SCEVGEP122:%.*]] = getelementptr i8, ptr [[SCEVGEP121]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[SCEVGEP122]], align 4 +; CHECK-NEXT: [[SCEVGEP114:%.*]] = getelementptr i8, ptr [[SCEVGEP113]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[SCEVGEP114]], align 4 +; CHECK-NEXT: [[SCEVGEP100:%.*]] = getelementptr i8, ptr [[SCEVGEP99]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[SCEVGEP100]], align 4 ; CHECK-NEXT: [[TMP21]] = tail call float @llvm.fmuladd.f32(float [[TMP19]], float [[TMP20]], float [[ACC3]]) -; CHECK-NEXT: [[SCEVGEP138:%.*]] = getelementptr i8, ptr [[SCEVGEP137]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[SCEVGEP138]], align 4 -; CHECK-NEXT: [[SCEVGEP119:%.*]] = getelementptr i8, ptr [[SCEVGEP118]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[SCEVGEP119]], align 4 +; CHECK-NEXT: [[SCEVGEP112:%.*]] = getelementptr i8, ptr [[SCEVGEP111]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[SCEVGEP112]], align 4 +; CHECK-NEXT: [[SCEVGEP97:%.*]] = getelementptr i8, ptr [[SCEVGEP96]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[SCEVGEP97]], align 4 ; CHECK-NEXT: [[TMP24]] = tail call float @llvm.fmuladd.f32(float [[TMP22]], float [[TMP23]], float [[ACC4]]) -; CHECK-NEXT: [[SCEVGEP136:%.*]] = getelementptr i8, ptr [[SCEVGEP135]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[SCEVGEP136]], align 4 -; CHECK-NEXT: [[SCEVGEP116:%.*]] = getelementptr i8, ptr [[SCEVGEP115]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[SCEVGEP116]], align 4 +; CHECK-NEXT: [[SCEVGEP110:%.*]] = getelementptr i8, ptr [[SCEVGEP109]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[SCEVGEP110]], align 4 +; CHECK-NEXT: [[SCEVGEP94:%.*]] = getelementptr i8, ptr [[SCEVGEP93]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[SCEVGEP94]], align 4 ; CHECK-NEXT: [[TMP27]] = tail call float @llvm.fmuladd.f32(float [[TMP25]], float [[TMP26]], float [[ACC5]]) -; CHECK-NEXT: [[SCEVGEP134:%.*]] = getelementptr i8, ptr [[SCEVGEP133]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[SCEVGEP134]], align 4 -; CHECK-NEXT: [[SCEVGEP113:%.*]] = getelementptr i8, ptr [[SCEVGEP112]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[SCEVGEP113]], align 4 +; CHECK-NEXT: [[SCEVGEP108:%.*]] = getelementptr i8, ptr [[SCEVGEP107]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[SCEVGEP108]], align 4 +; CHECK-NEXT: [[SCEVGEP91:%.*]] = getelementptr i8, ptr [[SCEVGEP90]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[SCEVGEP91]], align 4 ; CHECK-NEXT: [[TMP30]] = tail call float @llvm.fmuladd.f32(float [[TMP28]], float [[TMP29]], float [[ACC6]]) -; CHECK-NEXT: [[SCEVGEP132:%.*]] = getelementptr i8, ptr [[SCEVGEP131]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP31:%.*]] = load float, ptr [[SCEVGEP132]], align 4 -; CHECK-NEXT: [[SCEVGEP110:%.*]] = getelementptr i8, ptr [[SCEVGEP109]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP32:%.*]] = load float, ptr [[SCEVGEP110]], align 4 +; CHECK-NEXT: [[SCEVGEP106:%.*]] = getelementptr i8, ptr [[SCEVGEP105]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP31:%.*]] = load float, ptr [[SCEVGEP106]], align 4 +; CHECK-NEXT: [[SCEVGEP88:%.*]] = getelementptr i8, ptr [[SCEVGEP87]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP32:%.*]] = load float, ptr [[SCEVGEP88]], align 4 ; CHECK-NEXT: [[TMP33]] = tail call float @llvm.fmuladd.f32(float [[TMP31]], float [[TMP32]], float [[ACC7]]) -; CHECK-NEXT: [[SCEVGEP130:%.*]] = getelementptr i8, ptr [[SCEVGEP129]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP34:%.*]] = load float, ptr [[SCEVGEP130]], align 4 -; CHECK-NEXT: [[SCEVGEP107:%.*]] = getelementptr i8, ptr [[SCEVGEP106]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP35:%.*]] = load float, ptr [[SCEVGEP107]], align 4 +; CHECK-NEXT: [[SCEVGEP104:%.*]] = getelementptr i8, ptr [[SCEVGEP103]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP34:%.*]] = load float, ptr [[SCEVGEP104]], align 4 +; CHECK-NEXT: [[SCEVGEP85:%.*]] = getelementptr i8, ptr [[SCEVGEP84]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP35:%.*]] = load float, ptr [[SCEVGEP85]], align 4 ; CHECK-NEXT: [[TMP36]] = tail call float @llvm.fmuladd.f32(float [[TMP34]], float [[TMP35]], float [[ACC8]]) -; CHECK-NEXT: [[SCEVGEP128:%.*]] = getelementptr i8, ptr [[SCEVGEP127]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP37:%.*]] = load float, ptr [[SCEVGEP128]], align 4 -; CHECK-NEXT: [[SCEVGEP104:%.*]] = getelementptr i8, ptr [[SCEVGEP101]], i32 [[LSR_IV102]] -; CHECK-NEXT: [[TMP38:%.*]] = load float, ptr [[SCEVGEP104]], align 4 +; CHECK-NEXT: [[INC15_7]] = add i32 [[COEFF_POS_068]], 8 +; CHECK-NEXT: [[SCEVGEP116:%.*]] = getelementptr i8, ptr [[SCEVGEP115]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP37:%.*]] = load float, ptr [[SCEVGEP116]], align 4 +; CHECK-NEXT: [[SCEVGEP82:%.*]] = getelementptr i8, ptr [[SCEVGEP79]], i32 [[LSR_IV80]] +; CHECK-NEXT: [[TMP38:%.*]] = load float, ptr [[SCEVGEP82]], align 4 ; CHECK-NEXT: [[TMP39]] = tail call float @llvm.fmuladd.f32(float [[TMP37]], float [[TMP38]], float [[ACC9]]) -; CHECK-NEXT: [[LSR_IV_NEXT100]] = add nsw i32 [[LSR_IV99]], 8 -; CHECK-NEXT: [[TMP40:%.*]] = add i32 [[LSR_IV_NEXT100]], 8 -; CHECK-NEXT: [[LSR_IV_NEXT103]] = add nuw i32 [[LSR_IV102]], 32 -; CHECK-NEXT: [[LSR_IV_NEXT126]] = add nuw i32 [[LSR_IV125]], 8 +; CHECK-NEXT: [[LSR_IV_NEXT78]] = add nsw i32 [[LSR_IV77]], 8 +; CHECK-NEXT: [[TMP40:%.*]] = add i32 [[LSR_IV_NEXT78]], 8 +; CHECK-NEXT: [[LSR_IV_NEXT81]] = add i32 [[LSR_IV80]], 32 ; CHECK-NEXT: [[EXITCOND83_NOT_7:%.*]] = icmp sgt i32 [[TMP40]], [[TMP1]] ; CHECK-NEXT: br i1 [[EXITCOND83_NOT_7]], label [[FOR_COND63_PREHEADER]], label [[FOR_BODY14_7]] ; CHECK: for.body79.lr.ph: ; CHECK-NEXT: [[TMP41:%.*]] = load ptr, ptr [[FIR]], align 4 ; CHECK-NEXT: [[TMP42:%.*]] = load ptr, ptr [[DELAY]], align 4 ; CHECK-NEXT: [[TMP43:%.*]] = and i32 [[TMP4]], 2147483640 -; CHECK-NEXT: [[SCEVGEP150:%.*]] = getelementptr i8, ptr [[TMP42]], i32 28 -; CHECK-NEXT: [[SCEVGEP154:%.*]] = getelementptr i8, ptr [[TMP42]], i32 24 -; CHECK-NEXT: [[SCEVGEP156:%.*]] = getelementptr i8, ptr [[TMP42]], i32 20 -; CHECK-NEXT: [[SCEVGEP158:%.*]] = getelementptr i8, ptr [[TMP42]], i32 16 -; CHECK-NEXT: [[SCEVGEP160:%.*]] = getelementptr i8, ptr [[TMP42]], i32 12 -; CHECK-NEXT: [[SCEVGEP162:%.*]] = getelementptr i8, ptr [[TMP42]], i32 8 -; CHECK-NEXT: [[SCEVGEP164:%.*]] = getelementptr i8, ptr [[TMP42]], i32 4 -; CHECK-NEXT: [[SCEVGEP169:%.*]] = getelementptr i8, ptr [[TMP41]], i32 28 +; CHECK-NEXT: [[SCEVGEP126:%.*]] = getelementptr i8, ptr [[TMP42]], i32 28 +; CHECK-NEXT: [[SCEVGEP130:%.*]] = getelementptr i8, ptr [[TMP42]], i32 24 +; CHECK-NEXT: [[SCEVGEP132:%.*]] = getelementptr i8, ptr [[TMP42]], i32 20 +; CHECK-NEXT: [[SCEVGEP134:%.*]] = getelementptr i8, ptr [[TMP42]], i32 16 +; CHECK-NEXT: [[SCEVGEP136:%.*]] = getelementptr i8, ptr [[TMP42]], i32 12 +; CHECK-NEXT: [[SCEVGEP138:%.*]] = getelementptr i8, ptr [[TMP42]], i32 8 +; CHECK-NEXT: [[SCEVGEP140:%.*]] = getelementptr i8, ptr [[TMP42]], i32 4 +; CHECK-NEXT: [[SCEVGEP145:%.*]] = getelementptr i8, ptr [[TMP41]], i32 28 ; CHECK-NEXT: [[TMP44:%.*]] = shl i32 [[COEFF_POS_1_LCSSA:%.*]], 2 -; CHECK-NEXT: [[SCEVGEP170:%.*]] = getelementptr i8, ptr [[SCEVGEP169]], i32 [[TMP44]] -; CHECK-NEXT: [[SCEVGEP172:%.*]] = getelementptr i8, ptr [[TMP41]], i32 24 -; CHECK-NEXT: [[SCEVGEP173:%.*]] = getelementptr i8, ptr [[SCEVGEP172]], i32 [[TMP44]] -; CHECK-NEXT: [[SCEVGEP175:%.*]] = getelementptr i8, ptr [[TMP41]], i32 20 -; CHECK-NEXT: [[SCEVGEP176:%.*]] = getelementptr i8, ptr [[SCEVGEP175]], i32 [[TMP44]] -; CHECK-NEXT: [[SCEVGEP178:%.*]] = getelementptr i8, ptr [[TMP41]], i32 16 -; CHECK-NEXT: [[SCEVGEP179:%.*]] = getelementptr i8, ptr [[SCEVGEP178]], i32 [[TMP44]] -; CHECK-NEXT: [[SCEVGEP181:%.*]] = getelementptr i8, ptr [[TMP41]], i32 12 -; CHECK-NEXT: [[SCEVGEP182:%.*]] = getelementptr i8, ptr [[SCEVGEP181]], i32 [[TMP44]] -; CHECK-NEXT: [[SCEVGEP184:%.*]] = getelementptr i8, ptr [[TMP41]], i32 8 -; CHECK-NEXT: [[SCEVGEP185:%.*]] = getelementptr i8, ptr [[SCEVGEP184]], i32 [[TMP44]] -; CHECK-NEXT: [[SCEVGEP187:%.*]] = getelementptr i8, ptr [[TMP41]], i32 4 -; CHECK-NEXT: [[SCEVGEP188:%.*]] = getelementptr i8, ptr [[SCEVGEP187]], i32 [[TMP44]] -; CHECK-NEXT: [[SCEVGEP190:%.*]] = getelementptr i8, ptr [[TMP41]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP146:%.*]] = getelementptr i8, ptr [[SCEVGEP145]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP148:%.*]] = getelementptr i8, ptr [[TMP41]], i32 24 +; CHECK-NEXT: [[SCEVGEP149:%.*]] = getelementptr i8, ptr [[SCEVGEP148]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP151:%.*]] = getelementptr i8, ptr [[TMP41]], i32 20 +; CHECK-NEXT: [[SCEVGEP152:%.*]] = getelementptr i8, ptr [[SCEVGEP151]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP154:%.*]] = getelementptr i8, ptr [[TMP41]], i32 16 +; CHECK-NEXT: [[SCEVGEP155:%.*]] = getelementptr i8, ptr [[SCEVGEP154]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP157:%.*]] = getelementptr i8, ptr [[TMP41]], i32 12 +; CHECK-NEXT: [[SCEVGEP158:%.*]] = getelementptr i8, ptr [[SCEVGEP157]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP160:%.*]] = getelementptr i8, ptr [[TMP41]], i32 8 +; CHECK-NEXT: [[SCEVGEP161:%.*]] = getelementptr i8, ptr [[SCEVGEP160]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP163:%.*]] = getelementptr i8, ptr [[TMP41]], i32 4 +; CHECK-NEXT: [[SCEVGEP164:%.*]] = getelementptr i8, ptr [[SCEVGEP163]], i32 [[TMP44]] +; CHECK-NEXT: [[SCEVGEP166:%.*]] = getelementptr i8, ptr [[TMP41]], i32 [[TMP44]] ; CHECK-NEXT: br label [[FOR_BODY27_7:%.*]] ; CHECK: for.body14.clone: -; CHECK-NEXT: [[LSR_IV148:%.*]] = phi ptr [ [[SCEVGEP149:%.*]], [[FOR_BODY14_CLONE]] ], [ [[SCEVGEP147]], [[FOR_BODY27_LR_PH]] ] -; CHECK-NEXT: [[LSR_IV145:%.*]] = phi ptr [ [[SCEVGEP146:%.*]], [[FOR_BODY14_CLONE]] ], [ [[SCEVGEP144]], [[FOR_BODY27_LR_PH]] ] -; CHECK-NEXT: [[LSR_IV142:%.*]] = phi i32 [ [[LSR_IV_NEXT143:%.*]], [[FOR_BODY14_CLONE]] ], [ [[TMP13]], [[FOR_BODY27_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV124:%.*]] = phi ptr [ [[SCEVGEP125:%.*]], [[FOR_BODY14_CLONE]] ], [ [[SCEVGEP123]], [[FOR_BODY27_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV121:%.*]] = phi ptr [ [[SCEVGEP122:%.*]], [[FOR_BODY14_CLONE]] ], [ [[SCEVGEP120]], [[FOR_BODY27_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV118:%.*]] = phi i32 [ [[LSR_IV_NEXT119:%.*]], [[FOR_BODY14_CLONE]] ], [ [[TMP13]], [[FOR_BODY27_LR_PH]] ] ; CHECK-NEXT: [[ACC_067_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA]], [[FOR_BODY27_LR_PH]] ], [ [[TMP47:%.*]], [[FOR_BODY14_CLONE]] ] -; CHECK-NEXT: [[TMP45:%.*]] = load float, ptr [[LSR_IV148]], align 4 -; CHECK-NEXT: [[TMP46:%.*]] = load float, ptr [[LSR_IV145]], align 4 +; CHECK-NEXT: [[TMP45:%.*]] = load float, ptr [[LSR_IV124]], align 4 +; CHECK-NEXT: [[TMP46:%.*]] = load float, ptr [[LSR_IV121]], align 4 ; CHECK-NEXT: [[TMP47]] = tail call float @llvm.fmuladd.f32(float [[TMP45]], float [[TMP46]], float [[ACC_067_CLONE]]) -; CHECK-NEXT: [[LSR_IV_NEXT143]] = add i32 [[LSR_IV142]], -1 -; CHECK-NEXT: [[SCEVGEP146]] = getelementptr i8, ptr [[LSR_IV145]], i32 4 -; CHECK-NEXT: [[SCEVGEP149]] = getelementptr i8, ptr [[LSR_IV148]], i32 4 -; CHECK-NEXT: [[EXITCOND83_NOT_CLONE:%.*]] = icmp eq i32 [[LSR_IV_NEXT143]], 0 +; CHECK-NEXT: [[LSR_IV_NEXT119]] = add i32 [[LSR_IV118]], -1 +; CHECK-NEXT: [[SCEVGEP122]] = getelementptr i8, ptr [[LSR_IV121]], i32 4 +; CHECK-NEXT: [[SCEVGEP125]] = getelementptr i8, ptr [[LSR_IV124]], i32 4 +; CHECK-NEXT: [[EXITCOND83_NOT_CLONE:%.*]] = icmp eq i32 [[LSR_IV_NEXT119]], 0 ; CHECK-NEXT: br i1 [[EXITCOND83_NOT_CLONE]], label [[FOR_COND_CLEANUP26_LOOPEXIT:%.*]], label [[FOR_BODY14_CLONE]] ; CHECK: for.cond130.preheader: ; CHECK-NEXT: [[ACC_0_LCSSA_CLONE:%.*]] = phi float [ [[ACC_1_LCSSA:%.*]], [[FOR_COND_CLEANUP26]] ], [ [[TMP51:%.*]], [[FOR_BODY27_7]] ] @@ -211,75 +211,75 @@ define dso_local noundef i32 @dsps_fird_f32_ansi(ptr nocapture noundef %fir, ptr ; CHECK-NEXT: [[ACC_5_LCSSA_CLONE:%.*]] = phi float [ [[ACC_5_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[TMP66:%.*]], [[FOR_BODY27_7]] ] ; CHECK-NEXT: [[ACC_6_LCSSA_CLONE:%.*]] = phi float [ [[ACC_6_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[TMP69:%.*]], [[FOR_BODY27_7]] ] ; CHECK-NEXT: [[ACC_7_LCSSA_CLONE:%.*]] = phi float [ [[ACC_7_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[TMP72:%.*]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[COEFF_POS_0_LCSSA_CLONE:%.*]] = phi i32 [ [[COEFF_POS_1_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[LSR_IV_NEXT168:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[COEFF_POS_0_LCSSA_CLONE:%.*]] = phi i32 [ [[COEFF_POS_1_LCSSA]], [[FOR_COND_CLEANUP26]] ], [ [[LSR_IV_NEXT144:%.*]], [[FOR_BODY27_7]] ] ; CHECK-NEXT: [[N_0_LCSSA_CLONE:%.*]] = phi i32 [ 0, [[FOR_COND_CLEANUP26]] ], [ [[TMP43]], [[FOR_BODY27_7]] ] ; CHECK-NEXT: [[CMP2572_CLONE:%.*]] = icmp slt i32 [[N_0_LCSSA_CLONE]], [[TMP4]] ; CHECK-NEXT: br i1 [[CMP2572_CLONE]], label [[FOR_BODY133_LR_PH:%.*]], label [[FOR_END141]] ; CHECK: for.cond.cleanup26.loopexit: -; CHECK-NEXT: [[DOTLCSSA207:%.*]] = phi float [ [[TMP47]], [[FOR_BODY14_CLONE]] ] +; CHECK-NEXT: [[DOTLCSSA183:%.*]] = phi float [ [[TMP47]], [[FOR_BODY14_CLONE]] ] ; CHECK-NEXT: [[N_0_LCSSA_NEG:%.*]] = sub i32 0, [[N_0_LCSSA]] ; CHECK-NEXT: [[TMP48:%.*]] = add i32 [[TMP12]], [[N_0_LCSSA_NEG]] ; CHECK-NEXT: br label [[FOR_COND_CLEANUP26]] ; CHECK: for.cond.cleanup26: ; CHECK-NEXT: [[COEFF_POS_1_LCSSA]] = phi i32 [ [[COEFF_POS_0_LCSSA]], [[FOR_COND63_PREHEADER]] ], [ [[TMP48]], [[FOR_COND_CLEANUP26_LOOPEXIT]] ] -; CHECK-NEXT: [[ACC_1_LCSSA]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND63_PREHEADER]] ], [ [[DOTLCSSA207]], [[FOR_COND_CLEANUP26_LOOPEXIT]] ] +; CHECK-NEXT: [[ACC_1_LCSSA]] = phi float [ [[ACC_0_LCSSA]], [[FOR_COND63_PREHEADER]] ], [ [[DOTLCSSA183]], [[FOR_COND_CLEANUP26_LOOPEXIT]] ] ; CHECK-NEXT: [[EXITCOND85_NOT:%.*]] = icmp slt i32 [[TMP4]], 8 ; CHECK-NEXT: br i1 [[EXITCOND85_NOT]], label [[FOR_COND130_PREHEADER:%.*]], label [[FOR_BODY79_LR_PH:%.*]] ; CHECK: for.body27.7: -; CHECK-NEXT: [[LSR_IV167:%.*]] = phi i32 [ [[COEFF_POS_1_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[LSR_IV_NEXT168]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[LSR_IV151:%.*]] = phi i32 [ 0, [[FOR_BODY79_LR_PH]] ], [ [[LSR_IV_NEXT152:%.*]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[LSR_IV143:%.*]] = phi i32 [ [[COEFF_POS_1_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[LSR_IV_NEXT144]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[LSR_IV127:%.*]] = phi i32 [ 0, [[FOR_BODY79_LR_PH]] ], [ [[LSR_IV_NEXT128:%.*]], [[FOR_BODY27_7]] ] ; CHECK-NEXT: [[ADD76310:%.*]] = phi i32 [ 8, [[FOR_BODY79_LR_PH]] ], [ [[ADD76:%.*]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[ACC38:%.*]] = phi float [ [[ACC_1_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP51]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[ACC39:%.*]] = phi float [ [[ACC_1_LCSSA2]], [[FOR_BODY79_LR_PH]] ], [ [[TMP54]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[ACC40:%.*]] = phi float [ [[ACC_2_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP57]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[ACC41:%.*]] = phi float [ [[ACC_3_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP60]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[ACC42:%.*]] = phi float [ [[ACC_4_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP63]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[ACC43:%.*]] = phi float [ [[ACC_5_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP66]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[ACC44:%.*]] = phi float [ [[ACC_6_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP69]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[ACC45:%.*]] = phi float [ [[ACC_7_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP72]], [[FOR_BODY27_7]] ] -; CHECK-NEXT: [[SCEVGEP191:%.*]] = getelementptr i8, ptr [[SCEVGEP190]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[SCEVGEP191]], align 4 -; CHECK-NEXT: [[SCEVGEP166:%.*]] = getelementptr i8, ptr [[TMP42]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP50:%.*]] = load float, ptr [[SCEVGEP166]], align 4 -; CHECK-NEXT: [[TMP51]] = tail call float @llvm.fmuladd.f32(float [[TMP49]], float [[TMP50]], float [[ACC38]]) -; CHECK-NEXT: [[SCEVGEP189:%.*]] = getelementptr i8, ptr [[SCEVGEP188]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP52:%.*]] = load float, ptr [[SCEVGEP189]], align 4 -; CHECK-NEXT: [[SCEVGEP165:%.*]] = getelementptr i8, ptr [[SCEVGEP164]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP53:%.*]] = load float, ptr [[SCEVGEP165]], align 4 -; CHECK-NEXT: [[TMP54]] = tail call float @llvm.fmuladd.f32(float [[TMP52]], float [[TMP53]], float [[ACC39]]) -; CHECK-NEXT: [[SCEVGEP186:%.*]] = getelementptr i8, ptr [[SCEVGEP185]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP55:%.*]] = load float, ptr [[SCEVGEP186]], align 4 -; CHECK-NEXT: [[SCEVGEP163:%.*]] = getelementptr i8, ptr [[SCEVGEP162]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP56:%.*]] = load float, ptr [[SCEVGEP163]], align 4 -; CHECK-NEXT: [[TMP57]] = tail call float @llvm.fmuladd.f32(float [[TMP55]], float [[TMP56]], float [[ACC40]]) -; CHECK-NEXT: [[SCEVGEP183:%.*]] = getelementptr i8, ptr [[SCEVGEP182]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP58:%.*]] = load float, ptr [[SCEVGEP183]], align 4 -; CHECK-NEXT: [[SCEVGEP161:%.*]] = getelementptr i8, ptr [[SCEVGEP160]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP59:%.*]] = load float, ptr [[SCEVGEP161]], align 4 -; CHECK-NEXT: [[TMP60]] = tail call float @llvm.fmuladd.f32(float [[TMP58]], float [[TMP59]], float [[ACC41]]) -; CHECK-NEXT: [[SCEVGEP180:%.*]] = getelementptr i8, ptr [[SCEVGEP179]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP61:%.*]] = load float, ptr [[SCEVGEP180]], align 4 -; CHECK-NEXT: [[SCEVGEP159:%.*]] = getelementptr i8, ptr [[SCEVGEP158]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP62:%.*]] = load float, ptr [[SCEVGEP159]], align 4 -; CHECK-NEXT: [[TMP63]] = tail call float @llvm.fmuladd.f32(float [[TMP61]], float [[TMP62]], float [[ACC42]]) -; CHECK-NEXT: [[SCEVGEP177:%.*]] = getelementptr i8, ptr [[SCEVGEP176]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP64:%.*]] = load float, ptr [[SCEVGEP177]], align 4 -; CHECK-NEXT: [[SCEVGEP157:%.*]] = getelementptr i8, ptr [[SCEVGEP156]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP65:%.*]] = load float, ptr [[SCEVGEP157]], align 4 -; CHECK-NEXT: [[TMP66]] = tail call float @llvm.fmuladd.f32(float [[TMP64]], float [[TMP65]], float [[ACC43]]) -; CHECK-NEXT: [[SCEVGEP174:%.*]] = getelementptr i8, ptr [[SCEVGEP173]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP67:%.*]] = load float, ptr [[SCEVGEP174]], align 4 -; CHECK-NEXT: [[SCEVGEP155:%.*]] = getelementptr i8, ptr [[SCEVGEP154]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP68:%.*]] = load float, ptr [[SCEVGEP155]], align 4 -; CHECK-NEXT: [[TMP69]] = tail call float @llvm.fmuladd.f32(float [[TMP67]], float [[TMP68]], float [[ACC44]]) -; CHECK-NEXT: [[SCEVGEP171:%.*]] = getelementptr i8, ptr [[SCEVGEP170]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP70:%.*]] = load float, ptr [[SCEVGEP171]], align 4 -; CHECK-NEXT: [[SCEVGEP153:%.*]] = getelementptr i8, ptr [[SCEVGEP150]], i32 [[LSR_IV151]] -; CHECK-NEXT: [[TMP71:%.*]] = load float, ptr [[SCEVGEP153]], align 4 -; CHECK-NEXT: [[TMP72]] = tail call float @llvm.fmuladd.f32(float [[TMP70]], float [[TMP71]], float [[ACC45]]) +; CHECK-NEXT: [[ACC20:%.*]] = phi float [ [[ACC_1_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP51]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC21:%.*]] = phi float [ [[ACC_1_LCSSA2]], [[FOR_BODY79_LR_PH]] ], [ [[TMP54]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC22:%.*]] = phi float [ [[ACC_2_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP57]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC23:%.*]] = phi float [ [[ACC_3_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP60]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC24:%.*]] = phi float [ [[ACC_4_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP63]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC25:%.*]] = phi float [ [[ACC_5_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP66]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC26:%.*]] = phi float [ [[ACC_6_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP69]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[ACC27:%.*]] = phi float [ [[ACC_7_LCSSA]], [[FOR_BODY79_LR_PH]] ], [ [[TMP72]], [[FOR_BODY27_7]] ] +; CHECK-NEXT: [[SCEVGEP167:%.*]] = getelementptr i8, ptr [[SCEVGEP166]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP49:%.*]] = load float, ptr [[SCEVGEP167]], align 4 +; CHECK-NEXT: [[SCEVGEP142:%.*]] = getelementptr i8, ptr [[TMP42]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP50:%.*]] = load float, ptr [[SCEVGEP142]], align 4 +; CHECK-NEXT: [[TMP51]] = tail call float @llvm.fmuladd.f32(float [[TMP49]], float [[TMP50]], float [[ACC20]]) +; CHECK-NEXT: [[SCEVGEP165:%.*]] = getelementptr i8, ptr [[SCEVGEP164]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP52:%.*]] = load float, ptr [[SCEVGEP165]], align 4 +; CHECK-NEXT: [[SCEVGEP141:%.*]] = getelementptr i8, ptr [[SCEVGEP140]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP53:%.*]] = load float, ptr [[SCEVGEP141]], align 4 +; CHECK-NEXT: [[TMP54]] = tail call float @llvm.fmuladd.f32(float [[TMP52]], float [[TMP53]], float [[ACC21]]) +; CHECK-NEXT: [[SCEVGEP162:%.*]] = getelementptr i8, ptr [[SCEVGEP161]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP55:%.*]] = load float, ptr [[SCEVGEP162]], align 4 +; CHECK-NEXT: [[SCEVGEP139:%.*]] = getelementptr i8, ptr [[SCEVGEP138]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP56:%.*]] = load float, ptr [[SCEVGEP139]], align 4 +; CHECK-NEXT: [[TMP57]] = tail call float @llvm.fmuladd.f32(float [[TMP55]], float [[TMP56]], float [[ACC22]]) +; CHECK-NEXT: [[SCEVGEP159:%.*]] = getelementptr i8, ptr [[SCEVGEP158]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP58:%.*]] = load float, ptr [[SCEVGEP159]], align 4 +; CHECK-NEXT: [[SCEVGEP137:%.*]] = getelementptr i8, ptr [[SCEVGEP136]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP59:%.*]] = load float, ptr [[SCEVGEP137]], align 4 +; CHECK-NEXT: [[TMP60]] = tail call float @llvm.fmuladd.f32(float [[TMP58]], float [[TMP59]], float [[ACC23]]) +; CHECK-NEXT: [[SCEVGEP156:%.*]] = getelementptr i8, ptr [[SCEVGEP155]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP61:%.*]] = load float, ptr [[SCEVGEP156]], align 4 +; CHECK-NEXT: [[SCEVGEP135:%.*]] = getelementptr i8, ptr [[SCEVGEP134]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP62:%.*]] = load float, ptr [[SCEVGEP135]], align 4 +; CHECK-NEXT: [[TMP63]] = tail call float @llvm.fmuladd.f32(float [[TMP61]], float [[TMP62]], float [[ACC24]]) +; CHECK-NEXT: [[SCEVGEP153:%.*]] = getelementptr i8, ptr [[SCEVGEP152]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP64:%.*]] = load float, ptr [[SCEVGEP153]], align 4 +; CHECK-NEXT: [[SCEVGEP133:%.*]] = getelementptr i8, ptr [[SCEVGEP132]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP65:%.*]] = load float, ptr [[SCEVGEP133]], align 4 +; CHECK-NEXT: [[TMP66]] = tail call float @llvm.fmuladd.f32(float [[TMP64]], float [[TMP65]], float [[ACC25]]) +; CHECK-NEXT: [[SCEVGEP150:%.*]] = getelementptr i8, ptr [[SCEVGEP149]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP67:%.*]] = load float, ptr [[SCEVGEP150]], align 4 +; CHECK-NEXT: [[SCEVGEP131:%.*]] = getelementptr i8, ptr [[SCEVGEP130]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP68:%.*]] = load float, ptr [[SCEVGEP131]], align 4 +; CHECK-NEXT: [[TMP69]] = tail call float @llvm.fmuladd.f32(float [[TMP67]], float [[TMP68]], float [[ACC26]]) +; CHECK-NEXT: [[SCEVGEP147:%.*]] = getelementptr i8, ptr [[SCEVGEP146]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP70:%.*]] = load float, ptr [[SCEVGEP147]], align 4 +; CHECK-NEXT: [[SCEVGEP129:%.*]] = getelementptr i8, ptr [[SCEVGEP126]], i32 [[LSR_IV127]] +; CHECK-NEXT: [[TMP71:%.*]] = load float, ptr [[SCEVGEP129]], align 4 +; CHECK-NEXT: [[TMP72]] = tail call float @llvm.fmuladd.f32(float [[TMP70]], float [[TMP71]], float [[ACC27]]) ; CHECK-NEXT: [[ADD76]] = add nuw nsw i32 [[ADD76310]], 8 -; CHECK-NEXT: [[LSR_IV_NEXT152]] = add nuw i32 [[LSR_IV151]], 32 -; CHECK-NEXT: [[LSR_IV_NEXT168]] = add i32 [[LSR_IV167]], 8 +; CHECK-NEXT: [[LSR_IV_NEXT128]] = add nuw i32 [[LSR_IV127]], 32 +; CHECK-NEXT: [[LSR_IV_NEXT144]] = add i32 [[LSR_IV143]], 8 ; CHECK-NEXT: [[EXITCOND84_NOT_7:%.*]] = icmp sgt i32 [[ADD76]], [[TMP4]] ; CHECK-NEXT: br i1 [[EXITCOND84_NOT_7]], label [[FOR_COND130_PREHEADER]], label [[FOR_BODY27_7]] ; CHECK: for.body133.lr.ph: @@ -287,32 +287,32 @@ define dso_local noundef i32 @dsps_fird_f32_ansi(ptr nocapture noundef %fir, ptr ; CHECK-NEXT: [[TMP74:%.*]] = load ptr, ptr [[DELAY]], align 4 ; CHECK-NEXT: [[TMP75:%.*]] = sub i32 [[TMP4]], [[N_0_LCSSA_CLONE]] ; CHECK-NEXT: [[TMP76:%.*]] = shl i32 [[N_0_LCSSA_CLONE]], 2 -; CHECK-NEXT: [[SCEVGEP194:%.*]] = getelementptr i8, ptr [[TMP74]], i32 [[TMP76]] +; CHECK-NEXT: [[SCEVGEP170:%.*]] = getelementptr i8, ptr [[TMP74]], i32 [[TMP76]] ; CHECK-NEXT: [[TMP77:%.*]] = shl i32 [[COEFF_POS_0_LCSSA_CLONE]], 2 -; CHECK-NEXT: [[SCEVGEP197:%.*]] = getelementptr i8, ptr [[TMP73]], i32 [[TMP77]] +; CHECK-NEXT: [[SCEVGEP173:%.*]] = getelementptr i8, ptr [[TMP73]], i32 [[TMP77]] ; CHECK-NEXT: br label [[FOR_BODY27_CLONE:%.*]] ; CHECK: for.body27.clone: -; CHECK-NEXT: [[LSR_IV198:%.*]] = phi ptr [ [[SCEVGEP199:%.*]], [[FOR_BODY27_CLONE]] ], [ [[SCEVGEP197]], [[FOR_BODY133_LR_PH]] ] -; CHECK-NEXT: [[LSR_IV195:%.*]] = phi ptr [ [[SCEVGEP196:%.*]], [[FOR_BODY27_CLONE]] ], [ [[SCEVGEP194]], [[FOR_BODY133_LR_PH]] ] -; CHECK-NEXT: [[LSR_IV192:%.*]] = phi i32 [ [[LSR_IV_NEXT193:%.*]], [[FOR_BODY27_CLONE]] ], [ [[TMP75]], [[FOR_BODY133_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV174:%.*]] = phi ptr [ [[SCEVGEP175:%.*]], [[FOR_BODY27_CLONE]] ], [ [[SCEVGEP173]], [[FOR_BODY133_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV171:%.*]] = phi ptr [ [[SCEVGEP172:%.*]], [[FOR_BODY27_CLONE]] ], [ [[SCEVGEP170]], [[FOR_BODY133_LR_PH]] ] +; CHECK-NEXT: [[LSR_IV168:%.*]] = phi i32 [ [[LSR_IV_NEXT169:%.*]], [[FOR_BODY27_CLONE]] ], [ [[TMP75]], [[FOR_BODY133_LR_PH]] ] ; CHECK-NEXT: [[ACC_173_CLONE:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_BODY133_LR_PH]] ], [ [[TMP80:%.*]], [[FOR_BODY27_CLONE]] ] -; CHECK-NEXT: [[TMP78:%.*]] = load float, ptr [[LSR_IV198]], align 4 -; CHECK-NEXT: [[TMP79:%.*]] = load float, ptr [[LSR_IV195]], align 4 +; CHECK-NEXT: [[TMP78:%.*]] = load float, ptr [[LSR_IV174]], align 4 +; CHECK-NEXT: [[TMP79:%.*]] = load float, ptr [[LSR_IV171]], align 4 ; CHECK-NEXT: [[TMP80]] = tail call float @llvm.fmuladd.f32(float [[TMP78]], float [[TMP79]], float [[ACC_173_CLONE]]) -; CHECK-NEXT: [[LSR_IV_NEXT193]] = add i32 [[LSR_IV192]], -1 -; CHECK-NEXT: [[SCEVGEP196]] = getelementptr i8, ptr [[LSR_IV195]], i32 4 -; CHECK-NEXT: [[SCEVGEP199]] = getelementptr i8, ptr [[LSR_IV198]], i32 4 -; CHECK-NEXT: [[EXITCOND84_NOT_CLONE:%.*]] = icmp eq i32 [[LSR_IV_NEXT193]], 0 +; CHECK-NEXT: [[LSR_IV_NEXT169]] = add i32 [[LSR_IV168]], -1 +; CHECK-NEXT: [[SCEVGEP172]] = getelementptr i8, ptr [[LSR_IV171]], i32 4 +; CHECK-NEXT: [[SCEVGEP175]] = getelementptr i8, ptr [[LSR_IV174]], i32 4 +; CHECK-NEXT: [[EXITCOND84_NOT_CLONE:%.*]] = icmp eq i32 [[LSR_IV_NEXT169]], 0 ; CHECK-NEXT: br i1 [[EXITCOND84_NOT_CLONE]], label [[FOR_END141]], label [[FOR_BODY27_CLONE]] ; CHECK: for.end141: ; CHECK-NEXT: [[ACC0_3_LCSSA:%.*]] = phi float [ [[ACC_0_LCSSA_CLONE]], [[FOR_COND130_PREHEADER]] ], [ [[TMP80]], [[FOR_BODY27_CLONE]] ] ; CHECK-NEXT: [[ADD60:%.*]] = fadd float [[ACC_1_LCSSA2_CLONE]], [[ACC0_3_LCSSA]] -; CHECK-NEXT: [[ADD6179:%.*]] = fadd float [[ACC_2_LCSSA_CLONE]], [[ACC_3_LCSSA_CLONE]] +; CHECK-NEXT: [[ADD61:%.*]] = fadd float [[ACC_2_LCSSA_CLONE]], [[ACC_3_LCSSA_CLONE]] ; CHECK-NEXT: [[ADD62:%.*]] = fadd float [[ACC_4_LCSSA_CLONE]], [[ACC_5_LCSSA_CLONE]] -; CHECK-NEXT: [[ADD6380:%.*]] = fadd float [[ACC_6_LCSSA_CLONE]], [[ACC_7_LCSSA_CLONE]] -; CHECK-NEXT: [[ADD64:%.*]] = fadd float [[ADD6179]], [[ADD60]] -; CHECK-NEXT: [[ADD6581:%.*]] = fadd float [[ADD62]], [[ADD6380]] -; CHECK-NEXT: [[ADD66:%.*]] = fadd float [[ADD6581]], [[ADD64]] +; CHECK-NEXT: [[ADD63:%.*]] = fadd float [[ACC_6_LCSSA_CLONE]], [[ACC_7_LCSSA_CLONE]] +; CHECK-NEXT: [[ADD64:%.*]] = fadd float [[ADD61]], [[ADD60]] +; CHECK-NEXT: [[ADD65:%.*]] = fadd float [[ADD62]], [[ADD63]] +; CHECK-NEXT: [[ADD66:%.*]] = fadd float [[ADD65]], [[ADD64]] ; CHECK-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_080]] ; CHECK-NEXT: store float [[ADD66]], ptr [[ARRAYIDX37]], align 4 ; CHECK-NEXT: [[INC152]] = add nuw nsw i32 [[I_080]], 1 diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mul.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_mul_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mul.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_mul_f32_ansi.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mulc.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_mulc_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/mulc.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_mulc_f32_ansi.ll diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_simple_dotprod_f32_ansi.ll similarity index 96% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_simple_dotprod_f32_ansi.ll index aa9f66e46f4e8..f1e0295970d3c 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/loopsecvconstant.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_simple_dotprod_f32_ansi.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 ; RUN: opt -S -mtriple=riscv32-esp-unknown-elf -passes=riscv-loop-unroll-and-remainder -riscv-loop-unroll-and-remainder=true < %s | FileCheck %s -define dso_local float @test_loop(ptr nocapture noundef readonly %data1, ptr nocapture noundef readonly %data2) local_unnamed_addr { -; CHECK-LABEL: define dso_local float @test_loop( +define dso_local float @dsps_simple_dotprod_f32_ansi(ptr nocapture noundef readonly %data1, ptr nocapture noundef readonly %data2) local_unnamed_addr { +; CHECK-LABEL: define dso_local float @dsps_simple_dotprod_f32_ansi( ; CHECK-SAME: ptr noalias nocapture noundef readonly [[DATA1:%.*]], ptr noalias nocapture noundef readonly [[DATA2:%.*]]) local_unnamed_addr { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_sqrt_f32_ansi.ll similarity index 71% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_sqrt_f32_ansi.ll index 89c891af40669..5767873f90adf 100644 --- a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sqrt.ll +++ b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_sqrt_f32_ansi.ll @@ -19,130 +19,130 @@ define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, pt ; CHECK-NEXT: [[CMP6_NOT207:%.*]] = icmp ult i32 [[LEN]], 16 ; CHECK-NEXT: br i1 [[CMP6_NOT207]], label [[FOR_COND_PREHEADER_NEW2:%.*]], label [[FOR_BODY_MODIFY:%.*]] ; CHECK: for.cond.preheader.new2: -; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[TMP32:%.*]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[TMP47:%.*]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] ; CHECK-NEXT: [[CMP85209:%.*]] = icmp slt i32 [[TMP0]], [[LEN]] ; CHECK-NEXT: br i1 [[CMP85209]], label [[FOR_BODY:%.*]], label [[RETURN]] ; CHECK: for.body.modify: -; CHECK-NEXT: [[I_012_MODIFY:%.*]] = phi i32 [ [[TMP32]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] +; CHECK-NEXT: [[I_012_MODIFY:%.*]] = phi i32 [ [[TMP47]], [[FOR_BODY_MODIFY]] ], [ 0, [[FOR_COND_PREHEADER_NEW]] ] ; CHECK-NEXT: [[ARRAYIDX_MODIFY:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012_MODIFY]] ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_MODIFY]], align 4 ; CHECK-NEXT: [[SHR_I_MODIFY:%.*]] = ashr i32 [[TMP1]], 1 -; CHECK-NEXT: [[ADD48:%.*]] = or disjoint i32 [[SHR_I_MODIFY]], 532365312 +; CHECK-NEXT: [[ADD_I_MODIFY:%.*]] = add nsw i32 [[SHR_I_MODIFY]], 532365312 ; CHECK-NEXT: [[ARRAYIDX5_MODIFY:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_012_MODIFY]] -; CHECK-NEXT: store i32 [[ADD48]], ptr [[ARRAYIDX5_MODIFY]], align 4 +; CHECK-NEXT: store i32 [[ADD_I_MODIFY]], ptr [[ARRAYIDX5_MODIFY]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = or disjoint i32 [[I_012_MODIFY]], 1 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD]] ; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4 ; CHECK-NEXT: [[TMP3:%.*]] = ashr i32 [[TMP2]], 1 -; CHECK-NEXT: [[ADD50:%.*]] = or disjoint i32 [[TMP3]], 532365312 +; CHECK-NEXT: [[TMP4:%.*]] = add nsw i32 [[TMP3]], 532365312 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD]] -; CHECK-NEXT: store i32 [[ADD50]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX2]], align 4 ; CHECK-NEXT: [[ADD3:%.*]] = or disjoint i32 [[I_012_MODIFY]], 2 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD3]] -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 -; CHECK-NEXT: [[TMP5:%.*]] = ashr i32 [[TMP4]], 1 -; CHECK-NEXT: [[ADD52:%.*]] = or disjoint i32 [[TMP5]], 532365312 +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = ashr i32 [[TMP5]], 1 +; CHECK-NEXT: [[TMP7:%.*]] = add nsw i32 [[TMP6]], 532365312 ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD3]] -; CHECK-NEXT: store i32 [[ADD52]], ptr [[ARRAYIDX6]], align 4 +; CHECK-NEXT: store i32 [[TMP7]], ptr [[ARRAYIDX6]], align 4 ; CHECK-NEXT: [[ADD7:%.*]] = or disjoint i32 [[I_012_MODIFY]], 3 ; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD7]] -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX8]], align 4 -; CHECK-NEXT: [[TMP7:%.*]] = ashr i32 [[TMP6]], 1 -; CHECK-NEXT: [[ADD54:%.*]] = or disjoint i32 [[TMP7]], 532365312 +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX8]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = ashr i32 [[TMP8]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = add nsw i32 [[TMP9]], 532365312 ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD7]] -; CHECK-NEXT: store i32 [[ADD54]], ptr [[ARRAYIDX9]], align 4 +; CHECK-NEXT: store i32 [[TMP10]], ptr [[ARRAYIDX9]], align 4 ; CHECK-NEXT: [[ADD10:%.*]] = or disjoint i32 [[I_012_MODIFY]], 4 ; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD10]] -; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4 -; CHECK-NEXT: [[TMP9:%.*]] = ashr i32 [[TMP8]], 1 -; CHECK-NEXT: [[ADD56:%.*]] = or disjoint i32 [[TMP9]], 532365312 +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = ashr i32 [[TMP11]], 1 +; CHECK-NEXT: [[TMP13:%.*]] = add nsw i32 [[TMP12]], 532365312 ; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD10]] -; CHECK-NEXT: store i32 [[ADD56]], ptr [[ARRAYIDX12]], align 4 +; CHECK-NEXT: store i32 [[TMP13]], ptr [[ARRAYIDX12]], align 4 ; CHECK-NEXT: [[ADD13:%.*]] = or disjoint i32 [[I_012_MODIFY]], 5 ; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD13]] -; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX14]], align 4 -; CHECK-NEXT: [[TMP11:%.*]] = ashr i32 [[TMP10]], 1 -; CHECK-NEXT: [[ADD58:%.*]] = or disjoint i32 [[TMP11]], 532365312 +; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX14]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = ashr i32 [[TMP14]], 1 +; CHECK-NEXT: [[TMP16:%.*]] = add nsw i32 [[TMP15]], 532365312 ; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD13]] -; CHECK-NEXT: store i32 [[ADD58]], ptr [[ARRAYIDX15]], align 4 +; CHECK-NEXT: store i32 [[TMP16]], ptr [[ARRAYIDX15]], align 4 ; CHECK-NEXT: [[ADD16:%.*]] = or disjoint i32 [[I_012_MODIFY]], 6 ; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD16]] -; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX17]], align 4 -; CHECK-NEXT: [[TMP13:%.*]] = ashr i32 [[TMP12]], 1 -; CHECK-NEXT: [[ADD60:%.*]] = or disjoint i32 [[TMP13]], 532365312 +; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX17]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = ashr i32 [[TMP17]], 1 +; CHECK-NEXT: [[TMP19:%.*]] = add nsw i32 [[TMP18]], 532365312 ; CHECK-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD16]] -; CHECK-NEXT: store i32 [[ADD60]], ptr [[ARRAYIDX18]], align 4 +; CHECK-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX18]], align 4 ; CHECK-NEXT: [[ADD19:%.*]] = or disjoint i32 [[I_012_MODIFY]], 7 ; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD19]] -; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4 -; CHECK-NEXT: [[TMP15:%.*]] = ashr i32 [[TMP14]], 1 -; CHECK-NEXT: [[ADD62:%.*]] = or disjoint i32 [[TMP15]], 532365312 +; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4 +; CHECK-NEXT: [[TMP21:%.*]] = ashr i32 [[TMP20]], 1 +; CHECK-NEXT: [[TMP22:%.*]] = add nsw i32 [[TMP21]], 532365312 ; CHECK-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD19]] -; CHECK-NEXT: store i32 [[ADD62]], ptr [[ARRAYIDX21]], align 4 +; CHECK-NEXT: store i32 [[TMP22]], ptr [[ARRAYIDX21]], align 4 ; CHECK-NEXT: [[ADD22:%.*]] = or disjoint i32 [[I_012_MODIFY]], 8 ; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD22]] -; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX23]], align 4 -; CHECK-NEXT: [[TMP17:%.*]] = ashr i32 [[TMP16]], 1 -; CHECK-NEXT: [[ADD64:%.*]] = or disjoint i32 [[TMP17]], 532365312 +; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX23]], align 4 +; CHECK-NEXT: [[TMP24:%.*]] = ashr i32 [[TMP23]], 1 +; CHECK-NEXT: [[TMP25:%.*]] = add nsw i32 [[TMP24]], 532365312 ; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD22]] -; CHECK-NEXT: store i32 [[ADD64]], ptr [[ARRAYIDX24]], align 4 +; CHECK-NEXT: store i32 [[TMP25]], ptr [[ARRAYIDX24]], align 4 ; CHECK-NEXT: [[ADD25:%.*]] = or disjoint i32 [[I_012_MODIFY]], 9 ; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD25]] -; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX26]], align 4 -; CHECK-NEXT: [[TMP19:%.*]] = ashr i32 [[TMP18]], 1 -; CHECK-NEXT: [[ADD66:%.*]] = or disjoint i32 [[TMP19]], 532365312 +; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX26]], align 4 +; CHECK-NEXT: [[TMP27:%.*]] = ashr i32 [[TMP26]], 1 +; CHECK-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP27]], 532365312 ; CHECK-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD25]] -; CHECK-NEXT: store i32 [[ADD66]], ptr [[ARRAYIDX27]], align 4 +; CHECK-NEXT: store i32 [[TMP28]], ptr [[ARRAYIDX27]], align 4 ; CHECK-NEXT: [[ADD28:%.*]] = or disjoint i32 [[I_012_MODIFY]], 10 ; CHECK-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD28]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX29]], align 4 -; CHECK-NEXT: [[TMP21:%.*]] = ashr i32 [[TMP20]], 1 -; CHECK-NEXT: [[ADD68:%.*]] = or disjoint i32 [[TMP21]], 532365312 +; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX29]], align 4 +; CHECK-NEXT: [[TMP30:%.*]] = ashr i32 [[TMP29]], 1 +; CHECK-NEXT: [[TMP31:%.*]] = add nsw i32 [[TMP30]], 532365312 ; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD28]] -; CHECK-NEXT: store i32 [[ADD68]], ptr [[ARRAYIDX30]], align 4 +; CHECK-NEXT: store i32 [[TMP31]], ptr [[ARRAYIDX30]], align 4 ; CHECK-NEXT: [[ADD31:%.*]] = or disjoint i32 [[I_012_MODIFY]], 11 ; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD31]] -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX32]], align 4 -; CHECK-NEXT: [[TMP23:%.*]] = ashr i32 [[TMP22]], 1 -; CHECK-NEXT: [[ADD70:%.*]] = or disjoint i32 [[TMP23]], 532365312 +; CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[ARRAYIDX32]], align 4 +; CHECK-NEXT: [[TMP33:%.*]] = ashr i32 [[TMP32]], 1 +; CHECK-NEXT: [[TMP34:%.*]] = add nsw i32 [[TMP33]], 532365312 ; CHECK-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD31]] -; CHECK-NEXT: store i32 [[ADD70]], ptr [[ARRAYIDX33]], align 4 +; CHECK-NEXT: store i32 [[TMP34]], ptr [[ARRAYIDX33]], align 4 ; CHECK-NEXT: [[ADD34:%.*]] = or disjoint i32 [[I_012_MODIFY]], 12 ; CHECK-NEXT: [[ARRAYIDX35:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD34]] -; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX35]], align 4 -; CHECK-NEXT: [[TMP25:%.*]] = ashr i32 [[TMP24]], 1 -; CHECK-NEXT: [[ADD72:%.*]] = or disjoint i32 [[TMP25]], 532365312 +; CHECK-NEXT: [[TMP35:%.*]] = load i32, ptr [[ARRAYIDX35]], align 4 +; CHECK-NEXT: [[TMP36:%.*]] = ashr i32 [[TMP35]], 1 +; CHECK-NEXT: [[TMP37:%.*]] = add nsw i32 [[TMP36]], 532365312 ; CHECK-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD34]] -; CHECK-NEXT: store i32 [[ADD72]], ptr [[ARRAYIDX36]], align 4 +; CHECK-NEXT: store i32 [[TMP37]], ptr [[ARRAYIDX36]], align 4 ; CHECK-NEXT: [[ADD37:%.*]] = or disjoint i32 [[I_012_MODIFY]], 13 ; CHECK-NEXT: [[ARRAYIDX38:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD37]] -; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX38]], align 4 -; CHECK-NEXT: [[TMP27:%.*]] = ashr i32 [[TMP26]], 1 -; CHECK-NEXT: [[ADD74:%.*]] = or disjoint i32 [[TMP27]], 532365312 +; CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX38]], align 4 +; CHECK-NEXT: [[TMP39:%.*]] = ashr i32 [[TMP38]], 1 +; CHECK-NEXT: [[TMP40:%.*]] = add nsw i32 [[TMP39]], 532365312 ; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD37]] -; CHECK-NEXT: store i32 [[ADD74]], ptr [[ARRAYIDX39]], align 4 +; CHECK-NEXT: store i32 [[TMP40]], ptr [[ARRAYIDX39]], align 4 ; CHECK-NEXT: [[ADD40:%.*]] = or disjoint i32 [[I_012_MODIFY]], 14 ; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD40]] -; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX41]], align 4 -; CHECK-NEXT: [[TMP29:%.*]] = ashr i32 [[TMP28]], 1 -; CHECK-NEXT: [[ADD76:%.*]] = or disjoint i32 [[TMP29]], 532365312 +; CHECK-NEXT: [[TMP41:%.*]] = load i32, ptr [[ARRAYIDX41]], align 4 +; CHECK-NEXT: [[TMP42:%.*]] = ashr i32 [[TMP41]], 1 +; CHECK-NEXT: [[TMP43:%.*]] = add nsw i32 [[TMP42]], 532365312 ; CHECK-NEXT: [[ARRAYIDX42:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD40]] -; CHECK-NEXT: store i32 [[ADD76]], ptr [[ARRAYIDX42]], align 4 +; CHECK-NEXT: store i32 [[TMP43]], ptr [[ARRAYIDX42]], align 4 ; CHECK-NEXT: [[ADD43:%.*]] = or disjoint i32 [[I_012_MODIFY]], 15 ; CHECK-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[ADD43]] -; CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX44]], align 4 -; CHECK-NEXT: [[TMP31:%.*]] = ashr i32 [[TMP30]], 1 -; CHECK-NEXT: [[ADD78:%.*]] = or disjoint i32 [[TMP31]], 532365312 +; CHECK-NEXT: [[TMP44:%.*]] = load i32, ptr [[ARRAYIDX44]], align 4 +; CHECK-NEXT: [[TMP45:%.*]] = ashr i32 [[TMP44]], 1 +; CHECK-NEXT: [[TMP46:%.*]] = add nsw i32 [[TMP45]], 532365312 ; CHECK-NEXT: [[ARRAYIDX45:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[ADD43]] -; CHECK-NEXT: store i32 [[ADD78]], ptr [[ARRAYIDX45]], align 4 -; CHECK-NEXT: [[TMP32]] = add nuw i32 [[I_012_MODIFY]], 16 -; CHECK-NEXT: [[EXITCOND_NOT_MODIFY:%.*]] = icmp sgt i32 [[TMP32]], [[SUB]] +; CHECK-NEXT: store i32 [[TMP46]], ptr [[ARRAYIDX45]], align 4 +; CHECK-NEXT: [[TMP47]] = add nuw i32 [[I_012_MODIFY]], 16 +; CHECK-NEXT: [[EXITCOND_NOT_MODIFY:%.*]] = icmp sgt i32 [[TMP47]], [[SUB]] ; CHECK-NEXT: br i1 [[EXITCOND_NOT_MODIFY]], label [[FOR_COND_PREHEADER_NEW2]], label [[FOR_BODY_MODIFY]] ; CHECK: for.body: ; CHECK-NEXT: [[I_012:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[TMP0]], [[FOR_COND_PREHEADER_NEW2]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012]] -; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[SHR_I:%.*]] = ashr i32 [[TMP33]], 1 +; CHECK-NEXT: [[TMP48:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[SHR_I:%.*]] = ashr i32 [[TMP48]], 1 ; CHECK-NEXT: [[ADD_I:%.*]] = add nsw i32 [[SHR_I]], 532365312 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_012]] ; CHECK-NEXT: store i32 [[ADD_I]], ptr [[ARRAYIDX5]], align 4 @@ -152,8 +152,8 @@ define dso_local noundef i32 @dsps_sqrt_f32_ansi(ptr noundef readonly %input, pt ; CHECK: for.body.clone: ; CHECK-NEXT: [[I_012_CLONE:%.*]] = phi i32 [ [[INC_CLONE:%.*]], [[FOR_BODY_CLONE]] ], [ 0, [[FOR_COND_PREHEADER]] ] ; CHECK-NEXT: [[ARRAYIDX_CLONE:%.*]] = getelementptr inbounds float, ptr [[INPUT]], i32 [[I_012_CLONE]] -; CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARRAYIDX_CLONE]], align 4 -; CHECK-NEXT: [[SHR_I_CLONE:%.*]] = ashr i32 [[TMP34]], 1 +; CHECK-NEXT: [[TMP49:%.*]] = load i32, ptr [[ARRAYIDX_CLONE]], align 4 +; CHECK-NEXT: [[SHR_I_CLONE:%.*]] = ashr i32 [[TMP49]], 1 ; CHECK-NEXT: [[ADD_I_CLONE:%.*]] = add nsw i32 [[SHR_I_CLONE]], 532365312 ; CHECK-NEXT: [[ARRAYIDX5_CLONE:%.*]] = getelementptr inbounds float, ptr [[OUTPUT]], i32 [[I_012_CLONE]] ; CHECK-NEXT: store i32 [[ADD_I_CLONE]], ptr [[ARRAYIDX5_CLONE]], align 4 diff --git a/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sub.ll b/llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_sub_f32_ansi.ll similarity index 100% rename from llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/sub.ll rename to llvm/test/CodeGen/RISCV/RISCVLoopUnrollAndRemainder/dsps_sub_f32_ansi.ll From 9b44599f532bd8077580c457c3a336c30d4f0d68 Mon Sep 17 00:00:00 2001 From: "chen.qian" Date: Fri, 6 Dec 2024 18:43:13 +0800 Subject: [PATCH 271/289] [Pass] fix sqrt add to or bug and getFirst/LastInst refactor --- .../RISCV/RISCVLoopUnrollAndRemainder.cpp | 164 ++++++++++-------- 1 file changed, 89 insertions(+), 75 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.cpp b/llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.cpp index a3e044a1a54bb..35c126ba4b61e 100644 --- a/llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.cpp +++ b/llvm/lib/Target/RISCV/RISCVLoopUnrollAndRemainder.cpp @@ -85,6 +85,7 @@ #include "llvm/Support/FileSystem.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/IPO.h" +#include "llvm/Transforms/InstCombine/InstCombine.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Scalar/DCE.h" #include "llvm/Transforms/Scalar/DeadStoreElimination.h" @@ -171,21 +172,19 @@ static ICmpInst *getLastICmpInstWithPredicate(BasicBlock *BB, return lastICmp; } -// Helper function to get the first ICmp instruction in a basic block -static ICmpInst *getFirstICmpInst(BasicBlock *BB) { +template static T *getFirstInst(BasicBlock *BB) { for (Instruction &I : *BB) { - if (auto *CI = dyn_cast(&I)) { - return CI; + if (T *Inst = dyn_cast(&I)) { + return Inst; } } return nullptr; } -// Helper function to get the last ICmp instruction in a basic block -static ICmpInst *getLastICmpInst(BasicBlock *BB) { - for (auto it = BB->rbegin(); it != BB->rend(); ++it) { - if (auto *icmp = dyn_cast(&*it)) { - return icmp; +template static T *getLastInst(BasicBlock *BB) { + for (Instruction &I : reverse(*BB)) { + if (T *Inst = dyn_cast(&I)) { + return Inst; } } return nullptr; @@ -239,16 +238,6 @@ static PHINode *getLastI32Phi(BasicBlock *BB) { return nullptr; } -// Helper function to get the last PHI node in a basic block -static PHINode *getLastPhi(BasicBlock *BB) { - for (auto it = BB->rbegin(); it != BB->rend(); ++it) { - if (auto *Phi = dyn_cast(&*it)) { - return Phi; - } - } - return nullptr; -} - // Helper function to get the first CallInst with a specific name in a basic // block static CallInst *getFirstCallInstWithName(BasicBlock *BB, StringRef Name) { @@ -406,6 +395,38 @@ static void movePHINodesToTop(BasicBlock &BB, } } +static void modifyFirdAddToOr(BasicBlock *ClonedForBody) { + SmallVector addInsts; + + // Collect all add instructions that meet the criteria + for (auto &I : *ClonedForBody) { + if (auto *binOp = dyn_cast(&I)) { + if (binOp->getOpcode() == Instruction::Add && binOp->hasNoSignedWrap() && + binOp->hasNoUnsignedWrap()) { + addInsts.push_back(binOp); + } + } + } + if (addInsts.empty()) { + return; + } + // Replace each add instruction with an or disjoint instruction + for (auto it = addInsts.begin(); it != std::prev(addInsts.end()); ++it) { + auto *addInst = *it; + // Create a new or disjoint instruction + Instruction *orInst = + BinaryOperator::CreateDisjoint(Instruction::Or, addInst->getOperand(0), + addInst->getOperand(1), "add", addInst); + + // Replace all uses of the add instruction + addInst->replaceAllUsesWith(orInst); + + // Delete the original add instruction + addInst->eraseFromParent(); + orInst->setName("add"); + } +} + // Helper function to update predecessors to point to a new preheader static void updatePredecessorsToPreheader(BasicBlock *ForBody, BasicBlock *ForBodyPreheader) { @@ -1151,7 +1172,7 @@ static Value *expandForCondPreheader( } // Get the icmp instruction in ForCondPreheader - ICmpInst *icmpInst = getFirstICmpInst(ForCondPreheader); + ICmpInst *icmpInst = getFirstInst(ForCondPreheader); // Ensure we found the icmp instruction assert(icmpInst && "Failed to find icmp instruction in ForCondPreheader"); @@ -1278,7 +1299,7 @@ static void insertUnusedInstructionsBeforeIcmp(PHINode *phiI32InClonedForBody, static void modifyClonedForBody(BasicBlock *ClonedForBody) { - ICmpInst *lastIcmpEq = getLastICmpInst(ClonedForBody); + ICmpInst *lastIcmpEq = getLastInst(ClonedForBody); assert(lastIcmpEq && "Failed to find last icmp eq instruction in ClonedForBody"); @@ -1472,7 +1493,7 @@ static void modifyForCondPreheader2(BasicBlock *ClonedForBody, } // Find operand 1 of the icmp instruction from ClonedForBody - ICmpInst *firstIcmp = getFirstICmpInst(ClonedForBody); + ICmpInst *firstIcmp = getFirstInst(ClonedForBody); assert(firstIcmp && "Unable to find icmp instruction in ClonedForBody"); Value *IcmpOperand1 = firstIcmp->getOperand(1); @@ -1549,7 +1570,7 @@ static void modifyForCondPreheader2(BasicBlock *ClonedForBody, static Value *modifyClonedForBodyPreheader(BasicBlock *ClonedForBodyPreheader, BasicBlock *ForBody) { - ICmpInst *firstIcmp = getFirstICmpInst(ForBody); + ICmpInst *firstIcmp = getFirstInst(ForBody); assert(firstIcmp && "Unable to find icmp instruction in ForBody"); Value *IcmpOperand1 = firstIcmp->getOperand(1); @@ -2011,35 +2032,27 @@ static Instruction *modifyAddToOrInClonedForBody(BasicBlock *ClonedForBody) { return orInst; } -static void modifyAddToOr(BasicBlock *ClonedForBody) { - SmallVector addInsts; +static void runInstCombinePass(Function &F) { + // Create necessary analysis managers + LoopAnalysisManager LAM; + FunctionAnalysisManager FAM; + CGSCCAnalysisManager CGAM; + ModuleAnalysisManager MAM; - // Collect all add instructions that meet the criteria - for (auto &I : *ClonedForBody) { - if (auto *binOp = dyn_cast(&I)) { - if (binOp->getOpcode() == Instruction::Add) { - addInsts.push_back(binOp); - } - } - } - if (addInsts.empty()) { - return; - } - // Replace each add instruction with an or disjoint instruction - for (auto it = addInsts.begin(); it != std::prev(addInsts.end()); ++it) { - auto *addInst = *it; - // Create a new or disjoint instruction - Instruction *orInst = - BinaryOperator::CreateDisjoint(Instruction::Or, addInst->getOperand(0), - addInst->getOperand(1), "add", addInst); + // Create pass builder + PassBuilder PB; - // Replace all uses of the add instruction - addInst->replaceAllUsesWith(orInst); + // Register analyses + PB.registerModuleAnalyses(MAM); + PB.registerCGSCCAnalyses(CGAM); + PB.registerFunctionAnalyses(FAM); + PB.registerLoopAnalyses(LAM); + PB.crossRegisterProxies(LAM, FAM, CGAM, MAM); - // Delete the original add instruction - addInst->eraseFromParent(); - orInst->setName("add"); - } + // Create function-level optimization pipeline + FunctionPassManager FPM; + FPM.addPass(InstCombinePass()); + FPM.run(F, FAM); } static Value *unrolladdcClonedForBody(BasicBlock *ClonedForBody, @@ -2058,7 +2071,7 @@ static Value *unrolladdcClonedForBody(BasicBlock *ClonedForBody, assert(firstNonPHI && orInst && "Start or end instruction not found"); // Find the icmp instruction - Instruction *icmpInst = getFirstICmpInst(ClonedForBody); + Instruction *icmpInst = getFirstInst(ClonedForBody); // Ensure that the icmp instruction is found assert(icmpInst && "icmp instruction not found"); @@ -2298,7 +2311,7 @@ static void unrollAddc(Function &F, ScalarEvolution &SE, Loop *L, assert(ForCondPreheader && "Expected to find for.cond.preheader!"); expandForCondPreheaderaddc(F, ForCondPreheader, ClonedForBody, ForBody, sub, unroll_factor); - modifyAddToOr(ClonedForBody); + runInstCombinePass(F); groupAndReorderInstructions(ClonedForBody); // Verify the function @@ -2816,11 +2829,11 @@ static void postUnrollLoopWithCount(Function &F, Loop *L, int unroll_count) { insertPhiNodesForFMulAdd(LoopHeader, LoopPreheader, FMulAddCalls); movePHINodesToTop(*LoopHeader); - modifyAddToOr(LoopHeader); + runInstCombinePass(F); groupAndReorderInstructions(LoopHeader); // Create for.end basic block after LoopHeader - ICmpInst *LastICmp = getLastICmpInst(LoopHeader); + ICmpInst *LastICmp = getLastInst(LoopHeader); LastICmp->setPredicate(ICmpInst::ICMP_ULT); // Get the first operand of LastICmp Value *Operand1 = LastICmp->getOperand(1); @@ -3023,7 +3036,7 @@ static bool shouldUnrollDotprodType(Function &F, LoopInfo *LI) { } static std::pair modifyEntryBB(BasicBlock &entryBB) { - ICmpInst *icmp = getLastICmpInst(&entryBB); + ICmpInst *icmp = getLastInst(&entryBB); assert(icmp && "icmp not found"); Value *start_index = icmp->getOperand(0); Value *end_index = icmp->getOperand(1); @@ -3115,7 +3128,7 @@ static void postUnrollLoopWithVariable(Function &F, Loop *L, int unroll_count) { temp->insertBefore(LoopPreheader->getTerminator()); } - ICmpInst *lastICmp = getLastICmpInst(ForBody7); + ICmpInst *lastICmp = getLastInst(ForBody7); assert(lastICmp && "icmp not found"); lastICmp->setOperand(1, Sub); lastICmp->setPredicate(ICmpInst::ICMP_SLT); @@ -3552,7 +3565,7 @@ static std::tuple modifyOuterLoop4(Loop *L, BasicBlock *ForBodyMerged, BasicBlock *CloneForBodyPreheader) { BasicBlock *BB = L->getHeader(); - PHINode *phi = getLastPhi(BB); + PHINode *phi = getLastInst(BB); // Add new instructions IRBuilder<> Builder(BB); Builder.SetInsertPoint(phi->getNextNode()); @@ -3596,7 +3609,7 @@ static void modifyInnerLoop4(Loop *L, BasicBlock *ForBodyMerged, Value *Sub, movePHINodesToTop(*ForBodyMerged); groupAndReorderInstructions(ForBodyMerged); - ICmpInst *LastICmp = getLastICmpInst(ForBodyMerged); + ICmpInst *LastICmp = getLastInst(ForBodyMerged); LastICmp->setPredicate(ICmpInst::ICMP_ULT); LastICmp->setOperand(1, Sub); swapTerminatorSuccessors(ForBodyMerged); @@ -3653,7 +3666,8 @@ static void modifyInnerLoop4(Loop *L, BasicBlock *ForBodyMerged, Value *Sub, AddPHI->addIncoming(Add2, NewForEnd); Value *phifloatincomingvalue0 = getFirstCallInstWithName(CloneForBody, "llvm.fmuladd.f32"); - Value *phii32incomingvalue0 = getLastICmpInst(CloneForBody)->getOperand(0); + Value *phii32incomingvalue0 = + getLastInst(CloneForBody)->getOperand(0); for (PHINode &Phi : CloneForBody->phis()) { if (Phi.getType()->isIntegerTy(32)) { Phi.setIncomingValue(0, AddPHI); @@ -3676,7 +3690,7 @@ static void modifyInnerLoop4(Loop *L, BasicBlock *ForBodyMerged, Value *Sub, static std::tuple modifyOuterLoop8(Loop *L) { BasicBlock *BB = L->getHeader(); - ICmpInst *LastICmp = getLastICmpInst(BB); + ICmpInst *LastICmp = getLastInst(BB); LastICmp->setPredicate(ICmpInst::ICMP_ULT); swapTerminatorSuccessors(BB); @@ -3714,7 +3728,7 @@ static std::tuple modifyOuterLoop16(Loop *L) { BasicBlock *BB = L->getHeader(); BasicBlock *BBLoopPreHeader = L->getLoopPreheader(); - ICmpInst *LastICmp = getLastICmpInst(BB); + ICmpInst *LastICmp = getLastInst(BB); LastICmp->setPredicate(ICmpInst::ICMP_ULT); swapTerminatorSuccessors(BB); @@ -3763,7 +3777,7 @@ static void modifyInnerLoop(Loop *L, BasicBlock *ForBodyMerged, Value *Add60, movePHINodesToTop(*ForBodyMerged); groupAndReorderInstructions(ForBodyMerged); - ICmpInst *LastICmp = getLastICmpInst(ForBodyMerged); + ICmpInst *LastICmp = getLastInst(ForBodyMerged); LastICmp->setPredicate(ICmpInst::ICMP_ULT); LastICmp->setOperand(1, Add60); swapTerminatorSuccessors(ForBodyMerged); @@ -3873,7 +3887,7 @@ static void modifyInnerLoop(Loop *L, BasicBlock *ForBodyMerged, Value *Add60, Value *operand1 = unroll_count == 16 ? getFirstI32Phi(OuterBB) - : getLastICmpInst(CloneForBody)->getOperand(1); + : getLastInst(CloneForBody)->getOperand(1); // Create a new comparison instruction ICmpInst *NewCmp = new ICmpInst(ICmpInst::ICMP_UGT, PhiSum, operand1, "cmp182.not587"); @@ -3890,7 +3904,8 @@ static void modifyInnerLoop(Loop *L, BasicBlock *ForBodyMerged, Value *Add60, getFirstCallInstWithName(CloneForBody, "llvm.fmuladd.f32"); for (PHINode &Phi : CloneForBody->phis()) { if (Phi.getType()->isIntegerTy(32)) { - Phi.setIncomingValue(0, getLastICmpInst(CloneForBody)->getOperand(0)); + Phi.setIncomingValue(0, + getLastInst(CloneForBody)->getOperand(0)); Phi.setIncomingBlock(0, CloneForBody); Phi.setIncomingValue(1, PhiSum); Phi.setIncomingBlock(1, ForEnd164); @@ -3981,7 +3996,7 @@ static void modifyFirstCloneForBody(BasicBlock *CloneForBody, lastAddInst = &I; } } - ICmpInst *LastCmpInst = getLastICmpInst(CloneForBody); + ICmpInst *LastCmpInst = getLastInst(CloneForBody); LastCmpInst->setOperand(0, lastAddInst); LastCmpInst->setOperand(1, Operand1); FirstI32Phi->setIncomingValue(1, lastAddInst); @@ -4045,7 +4060,7 @@ static void modifyFirdFirstLoop(Function &F, Loop *L, BasicBlock *ForBodyMerged, getFirstI32Phi(ForCond23Preheader)->getIncomingBlock(0); Instruction *FirstI32Phi = getFirstI32Phi(ForCondCleanup3); - ICmpInst *LastICmp = getLastICmpInst(ForCondCleanup3); + ICmpInst *LastICmp = getLastInst(ForCondCleanup3); // Create new add instruction IRBuilder<> Builder(LastICmp); Value *Add269 = Builder.CreateNSWAdd( @@ -4067,7 +4082,7 @@ static void modifyFirdFirstLoop(Function &F, Loop *L, BasicBlock *ForBodyMerged, N_069->setIncomingValue(1, Add281); - ICmpInst *LastICmpInPreheader = getLastICmpInst(ForCond23Preheader); + ICmpInst *LastICmpInPreheader = getLastInst(ForCond23Preheader); // Create new phi node PHINode *N_0_lcssa = PHINode::Create(Type::getInt32Ty(F.getContext()), 2, "n.0.lcssa", LastICmpInPreheader); @@ -4093,7 +4108,7 @@ static void modifyFirdFirstLoop(Function &F, Loop *L, BasicBlock *ForBodyMerged, Value *Add11 = Builder.CreateAdd(Operand1, CoeffPosLcssa); ForBody27LrPh->getTerminator()->setSuccessor(0, CloneForBody); - ICmpInst *LastICmpInForBodyMerged = getLastICmpInst(ForBodyMerged); + ICmpInst *LastICmpInForBodyMerged = getLastInst(ForBodyMerged); LastICmpInForBodyMerged->setOperand(1, Operand1); LastICmpInForBodyMerged->setOperand(0, Inc20_7); @@ -4159,9 +4174,8 @@ static void modifyFirdFirstLoop(Function &F, Loop *L, BasicBlock *ForBodyMerged, CI->setOperand(2, PHI); } movePHINodesToTop(*ForBodyMerged); - modifyAddToOr(ForBodyMerged); - - ICmpInst *LastICmpForBodyMerged = getLastICmpInst(ForBodyMerged); + modifyFirdAddToOr(ForBodyMerged); + ICmpInst *LastICmpForBodyMerged = getLastInst(ForBodyMerged); LastICmpForBodyMerged->setPredicate(ICmpInst::ICMP_SGT); cast(LastICmpForBodyMerged->getOperand(0)) ->setOperand(0, getFirstI32Phi(ForBodyMerged)); @@ -4256,7 +4270,7 @@ static void modifyFirdFirstLoop(Function &F, Loop *L, BasicBlock *ForBodyMerged, CoeffPosLcssaPhi->addIncoming(SubResult, ForCondCleanup26LoopExit); // eraseAllStoreInstInBB(ForCondCleanup26); - ICmpInst *LastICmpForCondCleanup26 = getLastICmpInst(ForCondCleanup26); + ICmpInst *LastICmpForCondCleanup26 = getLastInst(ForCondCleanup26); LastICmpForCondCleanup26->setPredicate(ICmpInst::ICMP_SLT); PHINode *FirstI32ForCondCleanup3 = getFirstI32Phi(ForCondCleanup3); @@ -4314,7 +4328,7 @@ static void modifyFirdFirstLoop(Function &F, Loop *L, BasicBlock *ForBodyMerged, 0, ConstantInt::get(getLastI32Phi(ForCond130Preheader)->getType(), 0)); LastI32Phi130->setIncomingValue(1, AndResult); - ICmpInst *LastICmp130 = getLastICmpInst(ForCond130Preheader); + ICmpInst *LastICmp130 = getLastInst(ForCond130Preheader); LastICmp130->setOperand(1, FirstI32ForCondCleanup3); PHINode *LastI32PhiClone = getLastFloatPhi(CloneForBody); @@ -4434,9 +4448,8 @@ static void modifyFirdSecondLoop(Function &F, Loop *L, Add76310->addIncoming(Add76, ForBodyMerged); movePHINodesToTop(*ForBodyMerged); - modifyAddToOr(ForBodyMerged); - - ICmpInst *LastICmp = getLastICmpInst(ForBodyMerged); + modifyFirdAddToOr(ForBodyMerged); + ICmpInst *LastICmp = getLastInst(ForBodyMerged); LastICmp->setPredicate(ICmpInst::ICMP_SGT); cast(Add76)->moveBefore(LastICmp); LastICmp->setOperand(0, Add76); @@ -5032,6 +5045,7 @@ RISCVLoopUnrollAndRemainderPass::run(Function &F, FunctionAnalysisManager &AM) { if (currentUnrollType == UnrollType::FIRD) { addLegacyCommonOptimizationPasses(F); } + // Verify function if (verifyFunction(F, &errs())) { LLVM_DEBUG(errs() << "Function verification failed\n"); From 052fd68b48b20dd199f695a1a8b68d08a5f97049 Mon Sep 17 00:00:00 2001 From: Alexander Richardson Date: Mon, 2 Dec 2024 11:53:45 -0800 Subject: [PATCH 272/289] [Xtensa] Default to unsigned char This matches GCC. Partially addresses https://github.com/llvm/llvm-project/pull/115964 Pull Request: https://github.com/llvm/llvm-project/pull/115967 --- clang/lib/Driver/ToolChains/Clang.cpp | 1 + clang/test/Driver/xtensa-char.c | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 clang/test/Driver/xtensa-char.c diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index b02a1db5d9520..e9b956a069f93 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -1334,6 +1334,7 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) { case llvm::Triple::riscv64: case llvm::Triple::systemz: case llvm::Triple::xcore: + case llvm::Triple::xtensa: return false; } } diff --git a/clang/test/Driver/xtensa-char.c b/clang/test/Driver/xtensa-char.c new file mode 100644 index 0000000000000..13f8f67727e75 --- /dev/null +++ b/clang/test/Driver/xtensa-char.c @@ -0,0 +1,4 @@ +/// Check that char is unsigned by default. +// RUN: %clang -### %s --target=xtensa -c 2>&1 | FileCheck %s +// CHECK: "-cc1" "-triple" "xtensa" +// CHECK-SAME: "-fno-signed-char" From f04a630297a7b77543b25910dc015384b5ff4c6a Mon Sep 17 00:00:00 2001 From: "chen.qian" Date: Thu, 31 Oct 2024 16:52:18 +0800 Subject: [PATCH 273/289] esp/ci: Add test_esp_dsp_fpu_optimization new gitlab ci job --- .gitlab-ci.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7952b6641fd37..1613ee26d42f5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -191,9 +191,11 @@ build_and_test: artifacts: paths: - ${BUILD_DIR}/*.log + - ./esp-dsp/scripts/ when: always expire_in: 1 day variables: + TEST_APP_ESP_DSP_CUSTOM_BRANCH: "llvm-19.1.2-optimization-test" after_script: # help to identify that build failed due to OOM - > @@ -213,6 +215,7 @@ build_and_test: -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON -DLLDB_INCLUDE_TESTS=OFF + -DLLVM_TOOLCHAIN_ENABLED_TARGETS="RISCV" -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=Xtensa -B ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log - export CUR_USER=$(whoami); @@ -227,6 +230,11 @@ build_and_test: - chmod o+w ${BUILD_PATH}/lld-tests.log; - runuser -u test_runner -- ninja -C ${BUILD_PATH} check-lld 2>&1 > ${BUILD_PATH}/lld-tests.log; - chown -R ${CUR_USER} ${BUILD_PATH}; + - export PATH=${BUILD_PATH}/bin:${PATH} + - git clone --shallow-submodules --recursive --single-branch --branch $TEST_APP_ESP_DSP_CUSTOM_BRANCH -- https://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/idf/esp-dsp.git esp-dsp + - pushd $PWD/esp-dsp/ + - echo ${LLVM_PROJECT_PATH}/llvm/utils/update_test_checks.py + - ./test_all.sh ${LLVM_PROJECT_PATH}/llvm/utils/update_test_checks.py .build_linux-gnu_template: extends: .build_toolchain_template From c101647ac4a6481581e7514170451debacd18451 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Sat, 7 Dec 2024 21:13:03 +0300 Subject: [PATCH 274/289] [Xtensa] Implement XtensaNullTargetStreamer It fixes crash in Xtensa AsmParser::run() during ModuleSummaryIndexAnalysis pass. --- llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp | 2 ++ .../lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp | 8 ++++++++ .../lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h | 7 +++---- llvm/test/CodeGen/Xtensa/null-streamer.ll | 7 +++++++ 4 files changed, 20 insertions(+), 4 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/null-streamer.ll diff --git a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp index 9a5d645dc68e2..9790e9acfec35 100644 --- a/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp +++ b/llvm/lib/Target/Xtensa/AsmParser/XtensaAsmParser.cpp @@ -54,6 +54,8 @@ class XtensaAsmParser : public MCTargetAsmParser { SMLoc getLoc() const { return getParser().getTok().getLoc(); } XtensaTargetStreamer &getTargetStreamer() { + assert(getParser().getStreamer().getTargetStreamer() && + "do not have a target streamer"); MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); return static_cast(TS); } diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp index 09ebd1850906e..958c83a3040e7 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaMCTargetDesc.cpp @@ -85,6 +85,10 @@ createXtensaObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) { return new XtensaTargetELFStreamer(S); } +static MCTargetStreamer *createXtensaNullTargetStreamer(MCStreamer &S) { + return new XtensaTargetStreamer(S); +} + extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXtensaTargetMC() { // Register the MCAsmInfo. TargetRegistry::RegisterMCAsmInfo(getTheXtensaTarget(), @@ -121,4 +125,8 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeXtensaTargetMC() { // Register the ELF target streamer. TargetRegistry::RegisterObjectTargetStreamer( getTheXtensaTarget(), createXtensaObjectTargetStreamer); + + // Register the null target streamer. + TargetRegistry::RegisterNullTargetStreamer(getTheXtensaTarget(), + createXtensaNullTargetStreamer); } diff --git a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h index 00df88ff36b41..8c255f5110855 100644 --- a/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h +++ b/llvm/lib/Target/Xtensa/MCTargetDesc/XtensaTargetStreamer.h @@ -26,13 +26,13 @@ class XtensaTargetStreamer : public MCTargetStreamer { // section is not switched yet (SwitchLiteralSection is true) then switch to // literal section. virtual void emitLiteral(MCSymbol *LblSym, const MCExpr *Value, - bool SwitchLiteralSection, SMLoc L = SMLoc()) = 0; + bool SwitchLiteralSection, SMLoc L = SMLoc()) {}; - virtual void emitLiteralPosition() = 0; + virtual void emitLiteralPosition() {}; // Switch to the literal section. The BaseSection name is used to construct // literal section name. - virtual void startLiteralSection(MCSection *BaseSection) = 0; + virtual void startLiteralSection(MCSection *BaseSection) {}; void setLiteralSectionPrefix(StringRef Name) { LiteralSectionPrefix = Name; } @@ -60,7 +60,6 @@ class XtensaTargetELFStreamer : public XtensaTargetStreamer { MCELFStreamer &getStreamer(); void emitLiteral(MCSymbol *LblSym, const MCExpr *Value, bool SwitchLiteralSection, SMLoc L) override; - void emitLiteralPosition() override {} void startLiteralSection(MCSection *Section) override; }; } // end namespace llvm diff --git a/llvm/test/CodeGen/Xtensa/null-streamer.ll b/llvm/test/CodeGen/Xtensa/null-streamer.ll new file mode 100644 index 0000000000000..65ff6d21709a3 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/null-streamer.ll @@ -0,0 +1,7 @@ +; Test the null streamer with a target streamer. +; RUN: llc -O0 -filetype=null -mtriple=xtensa < %s + +define i32 @main() { +entry: + ret i32 0 +} From b730e68f206c8beaa4619084cd204e5a8579055e Mon Sep 17 00:00:00 2001 From: Stefan Stipanovic Date: Thu, 22 Aug 2024 09:56:30 +0200 Subject: [PATCH 275/289] [RISCV][ESP32P4] Don't yet consider v16i8 and v4i32 legal --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 600b20416c918..0aa628ad56e68 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -120,12 +120,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, // Set up the register classes. addRegisterClass(XLenVT, &RISCV::GPRRegClass); - if (Subtarget.hasVendorESP32P4()) { - static const MVT::SimpleValueType QRVec[] = {MVT::v16i8, MVT::v4i32}; - for (auto st : QRVec) - addRegisterClass(st, &RISCV::QRRegClass); - } - if (Subtarget.is64Bit() && RV64LegalI32) addRegisterClass(MVT::i32, &RISCV::GPRRegClass); From 196f31f788851f49361ab94549c7993dbfc83484 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 16 Jan 2025 13:31:31 +0300 Subject: [PATCH 276/289] esp/ci: Upgrade GNU components to '14.2.0_20241119' --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1613ee26d42f5..6db717ded269f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -12,7 +12,7 @@ image: ${CI_DOCKER_REGISTRY}/llvm-build:5 variables: ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "llvm_release_19" - ESP_GNU_TOOLCHAIN_VER: "13.2.0_20240305" + ESP_GNU_TOOLCHAIN_VER: "14.2.0_20241119" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:2 CROSS_WIN_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-win:2 DIST_DIR: "dist" From 5ce1067e05268cc1949a86ec85127de62d138e79 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Thu, 16 Jan 2025 22:10:48 +0300 Subject: [PATCH 277/289] esp/ci: Use 'gold' linker for test build jobs It consumes much less memory. This commit also limits the number of simulteneous compile and link jobs for LLVM. --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6db717ded269f..6514188973c07 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -217,6 +217,9 @@ build_and_test: -DLLDB_INCLUDE_TESTS=OFF -DLLVM_TOOLCHAIN_ENABLED_TARGETS="RISCV" -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=Xtensa + -DLLVM_USE_LINKER="gold" + -DLLVM_PARALLEL_LINK_JOBS=2 + -DLLVM_PARALLEL_COMPILE_JOBS=2 -B ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log - export CUR_USER=$(whoami); - useradd -m test_runner; From 0b9eac05fb1c109aa4d1aa8a81bec21267635b6f Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 17 Jan 2025 17:06:00 +0300 Subject: [PATCH 278/289] esp/ci: Make MacOS signing jobs auto-starting for tags only --- .gitlab-ci.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6514188973c07..78c785b2dc697 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -18,6 +18,12 @@ variables: DIST_DIR: "dist" BUILD_DIR: "build" +workflow: + rules: + - if: $CI_PIPELINE_SOURCE == 'merge_request_event' + - if: $CI_COMMIT_TAG + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + .use_ci_tools: &use_ci_tools | curl -sSL ${CIT_LOADER_URL} -o cit_loader.sh && sh cit_loader.sh source citools/import_functions @@ -28,7 +34,6 @@ variables: .get_toolchain_build_scripts: &get_toolchain_build_scripts | git clone -b ${ESP_LLVM_EMBEDDED_TOOLCHAIN_REF} ${GITLAB_SSH_SERVER}/${ESP_LLVM_EMBEDDED_TOOLCHAIN_REPO}.git - before_script: - *use_ci_tools - *add_gitlab_key @@ -391,7 +396,12 @@ pack_aarch64-apple-darwin: .macos_codesign_template: stage: sign - when: on_success + rules: + - if: $CI_COMMIT_TAG + when: on_success + allow_failure: false + - when: manual + allow_failure: true resource_group: macos_codesign tags: [ "darwin", "codesign" ] artifacts: From 6f594a1788cc4b593b127e236eec75ceca92a18d Mon Sep 17 00:00:00 2001 From: Ali Azam Rana <85216275+alirana01@users.noreply.github.com> Date: Wed, 27 Nov 2024 11:22:20 +0100 Subject: [PATCH 279/289] [Clangd] Return Includes for documentSymbol request --- clang-tools-extra/clangd/FindSymbols.cpp | 50 +++++++++++++++++-- .../clangd/unittests/FindSymbolsTests.cpp | 4 +- 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/clang-tools-extra/clangd/FindSymbols.cpp b/clang-tools-extra/clangd/FindSymbols.cpp index 55f16b7085a6f..6225e59ab4f9f 100644 --- a/clang-tools-extra/clangd/FindSymbols.cpp +++ b/clang-tools-extra/clangd/FindSymbols.cpp @@ -10,6 +10,7 @@ #include "AST.h" #include "FuzzyMatch.h" #include "ParsedAST.h" +#include "Protocol.h" #include "Quality.h" #include "SourceCode.h" #include "index/Index.h" @@ -20,9 +21,9 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" +#include #include -#include -#include +#include #define DEBUG_TYPE "FindSymbols" @@ -375,12 +376,48 @@ class DocumentOutline { SymBuilder Root; for (auto &TopLevel : AST.getLocalTopLevelDecls()) traverseDecl(TopLevel, Root); - return std::move(std::move(Root).build().children); + std::vector symbols = + std::move(std::move(Root).build().children); + auto IncludeSymbols = collectIncludeSymbols(AST); + symbols.insert(symbols.end(), + std::make_move_iterator(IncludeSymbols.begin()), + std::make_move_iterator(IncludeSymbols.end())); + return symbols; } private: enum class VisitKind { No, OnlyDecl, OnlyChildren, DeclAndChildren }; + std::vector collectIncludeSymbols(const ParsedAST &AST) { + std::vector IncludeSymbols; + const auto &IS = AST.getIncludeStructure(); + const auto &MI = IS.MainFileIncludes; + for (const auto &Inclusion : MI) { + + DocumentSymbol IncludeSymbol; + std::string IncludeText = Inclusion.Written; + if (IncludeText.empty()) + continue; + + if (((IncludeText.front() == '"' && IncludeText.back() == '"') || + (IncludeText.front() == '<' && IncludeText.back() == '>'))) { + IncludeText = IncludeText.substr(1, IncludeText.size() - 2); + } + + IncludeSymbol.name = IncludeText; + IncludeSymbol.kind = SymbolKind::File; + IncludeSymbol.range.start.line = Inclusion.HashLine; + IncludeSymbol.range.start.character = 0; + IncludeSymbol.range.end.line = Inclusion.HashLine; + IncludeSymbol.range.end.character = + static_cast(Inclusion.Written.size() + 9); + IncludeSymbol.selectionRange = IncludeSymbol.range; + IncludeSymbols.push_back(std::move(IncludeSymbol)); + } + + return IncludeSymbols; + } + void traverseDecl(Decl *D, SymBuilder &Parent) { // Skip symbols which do not originate from the main file. if (!isInsideMainFile(D->getLocation(), AST.getSourceManager())) @@ -628,8 +665,10 @@ PragmaMarkSymbol markToSymbol(const PragmaMark &P) { } std::vector collectDocSymbols(ParsedAST &AST) { - std::vector Syms = DocumentOutline(AST).build(); - + DocumentOutline documentOutline = DocumentOutline(AST); + std::vector Syms = documentOutline.build(); + // TODO: Add a collection method for macros as well + const auto &PragmaMarks = AST.getMarks(); if (PragmaMarks.empty()) return Syms; @@ -644,6 +683,7 @@ std::vector collectDocSymbols(ParsedAST &AST) { DocumentSymbol Root; Root.children = std::move(Syms); Root.range = EntireFile; + mergePragmas(Root, llvm::ArrayRef(Pragmas)); return Root.children; } diff --git a/clang-tools-extra/clangd/unittests/FindSymbolsTests.cpp b/clang-tools-extra/clangd/unittests/FindSymbolsTests.cpp index 4276a44275f53..b0ab9d72a94db 100644 --- a/clang-tools-extra/clangd/unittests/FindSymbolsTests.cpp +++ b/clang-tools-extra/clangd/unittests/FindSymbolsTests.cpp @@ -467,7 +467,7 @@ TEST(DocumentSymbols, ExternSymbol) { #include "foo.h" )cpp"; - EXPECT_THAT(getSymbols(TU.build()), IsEmpty()); + EXPECT_THAT(getSymbols(TU.build()), ElementsAre(withName("foo.h"))); } TEST(DocumentSymbols, ExternContext) { @@ -546,7 +546,7 @@ TEST(DocumentSymbols, InHeaderFile) { } )cpp"; EXPECT_THAT(getSymbols(TU.build()), - ElementsAre(withName("i"), withName("test"))); + ElementsAre(withName("i"), withName("test"), withName("bar.h"))); } TEST(DocumentSymbols, Template) { From 353ae76a5e3babb0a95a48ee7e1fd4a205e96529 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Mon, 20 Jan 2025 18:27:24 +0300 Subject: [PATCH 280/289] esp/ci: Make limit for LLVM compile/link jobs configurable --- .gitlab-ci.yml | 7 +- .universal-toolchain-release.yml | 529 ------------------------------- 2 files changed, 3 insertions(+), 533 deletions(-) delete mode 100644 .universal-toolchain-release.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 78c785b2dc697..63f46008d64c3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -32,7 +32,7 @@ workflow: cit_add_ssh_key "${GITLAB_KEY}" .get_toolchain_build_scripts: &get_toolchain_build_scripts | - git clone -b ${ESP_LLVM_EMBEDDED_TOOLCHAIN_REF} ${GITLAB_SSH_SERVER}/${ESP_LLVM_EMBEDDED_TOOLCHAIN_REPO}.git + git clone -b ${ESP_LLVM_EMBEDDED_TOOLCHAIN_REF} ${GITLAB_SSH_SERVER}/${ESP_LLVM_EMBEDDED_TOOLCHAIN_REPO_PATH}.git before_script: - *use_ci_tools @@ -115,8 +115,8 @@ before_script: -DHOST_TRIPLE=${CONF_HOST} -DLLVM_TOOLCHAIN_ENABLED_TARGETS="${TARGET}" -DLLVM_USE_LINKER=${USE_LINKER} - -DLLVM_PARALLEL_LINK_JOBS=2 - -DLLVM_PARALLEL_COMPILE_JOBS=2 + -DLLVM_PARALLEL_LINK_JOBS=${ESP_LLVM_PARALLEL_LINK_JOBS} + -DLLVM_PARALLEL_COMPILE_JOBS=${ESP_LLVM_PARALLEL_COMPILE_JOBS} -DCLANG_REPOSITORY_STRING="${GH_REPO_HTTPS}" -DCPACK_ARCHIVE_THREADS=0 -B ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log @@ -224,7 +224,6 @@ build_and_test: -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=Xtensa -DLLVM_USE_LINKER="gold" -DLLVM_PARALLEL_LINK_JOBS=2 - -DLLVM_PARALLEL_COMPILE_JOBS=2 -B ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log - export CUR_USER=$(whoami); - useradd -m test_runner; diff --git a/.universal-toolchain-release.yml b/.universal-toolchain-release.yml deleted file mode 100644 index cd899df8a7ba0..0000000000000 --- a/.universal-toolchain-release.yml +++ /dev/null @@ -1,529 +0,0 @@ - -# Prepare release name/number -.get_release_name: &get_release_name | - # using annotated tags - REL_NUM=$(git describe --abbrev=7) - REL_NAME=${REL_SFX}-${REL_NUM}-${PLATFORM_NAME} - ARCHIVE_NAME=${REL_NAME}.${ARCHIVE_EXT} - LIBS_ARCHIVE_NAME=libs_${REL_NAME}.${ARCHIVE_EXT} - echo "PLATFORM_NAME: $PLATFORM_NAME" - echo "REL_NUM: $REL_NUM" - echo "REL_NAME: $REL_NAME" - echo "ARCHIVE_NAME: $ARCHIVE_NAME" - -# Get an existing crosstool-ng builds for all chips -.get_gcc_toolchain: &get_gcc_toolchain | - declare -a XTENSA_CPUS=("esp32" - "esp32s2" - "esp32s3") - for ((i = 0; i < ${#XTENSA_CPUS[@]}; i++)); do - XTENSA_CPU=${XTENSA_CPUS[$i]} - GCC_TOOLCHAIN_ARCH=xtensa-${XTENSA_CPU}-elf-${GCC_REL_VER}-${GCC_PLATFORM_NAME}.${GCC_ARCHIVE_EXT} - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-${GCC_REL_VER}/${GCC_TOOLCHAIN_ARCH} - ${GCC_UNARCHIVE_TOOL} ${GCC_TOOLCHAIN_ARCH} - done; - GCC_TOOLCHAIN_ARCH=riscv32-esp-elf-${GCC_REL_VER}-${GCC_PLATFORM_NAME}.${GCC_ARCHIVE_EXT} - wget --no-verbose https://dl.espressif.com/github_assets/espressif/crosstool-NG/releases/download/esp-${GCC_REL_VER}/${GCC_TOOLCHAIN_ARCH} - ${GCC_UNARCHIVE_TOOL} ${GCC_TOOLCHAIN_ARCH} - -# Pack the toolchain -.package_toolchain: &package_toolchain | - ${ARCHIVE_TOOL} ${ARCHIVE_NAME} esp-clang/ - mkdir -p ${DISTRO_DIR} - mv ${ARCHIVE_NAME} ${DISTRO_DIR}/ - echo "${ARCHIVE_NAME}" > ${DISTRO_DIR}/dist_name_${PLATFORM_NAME} - -# Pack libs to be used for Rust, Go etc. -.package_libs: &package_libs | - eval ${ARCHIVE_TOOL} ${LIBS_ARCHIVE_NAME} esp-clang/lib/clang/${CLANG_VER}/include esp-clang/lib/lib{clang,LLVM}* ${LIBS_PACK_EXTRA_PATHS:-} - mkdir -p ${DISTRO_DIR} - mv ${LIBS_ARCHIVE_NAME} ${DISTRO_DIR}/ - echo "${LIBS_ARCHIVE_NAME}" > ${DISTRO_DIR}/dist_name_libs-${PLATFORM_NAME} - -.get_binutils: &get_binutils | - git clone -b ${BINUTILS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${BINUTILS_REPO}.git - BINUTILS_PATH=$PWD/${BINUTILS_REPO} - -.get_xtensa_overlays: &get_xtensa_overlays | - git clone -b ${XTENSA_OVERLAYS_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${XTENSA_OVERLAYS_REPO}.git - XTENSA_OVERLAYS_PATH=$PWD/${XTENSA_OVERLAYS_REPO} - -.get_newlib: &get_newlib | - git clone -b ${NEWLIB_REF} --single-branch ${GITLAB_SSH_SERVER}/idf/${NEWLIB_REPO}.git - NEWLIB_PATH=$PWD/${NEWLIB_REPO} - -.build_template: - stage: build - tags: [ "amd64", "build" ] - artifacts: - paths: - - ${DIST_DIR}/ - - ${BUILD_DIR}/lld-tests.log - - ${BUILD_DIR}/tests.log - - ${BUILD_DIR}/build.log - - "${BUILD_DIR}/**/CMakeError.log" - - "${BUILD_DIR}/**/CMakeOutput.log" - when: always - expire_in: 1 day - variables: - BUILD_TOOLCHAIN_CMD_EXTRA_ARGS: "" - after_script: - # help to identify that build failed due to OOM - - > - if [ $CI_JOB_STATUS == 'failed' ]; then - [ ! -f "${BUILD_DIR}/build.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/build.log || true - [ ! -f "${BUILD_DIR}/tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/tests.log || true - [ ! -f "${BUILD_DIR}/lld-tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/lld-tests.log || true - fi - script: - - *get_release_name - - mkdir ${DOWNLOADS_DIR} - - pushd ${DOWNLOADS_DIR} - - ESP_GCC_TOOLCHAIN_DIST_BASE=$PWD - - *get_gcc_toolchain - - *get_binutils - - *get_xtensa_overlays - - popd - - !reference [.get_clang_toolchain_build_scripts, script] - - !reference [.fix_origin_remote_for_public, script] - - LLVM_PROJECT_PATH=$PWD - - BUILD_PATH=$PWD/${BUILD_DIR} - - mkdir -p ${BUILD_PATH} - - export USE_PARALLEL_LINK_JOBS=2 - - export USE_PARALLEL_COMPILE_JOBS=2 - # build Clang toolchain w/o newlib - - ${BUILD_TOOLCHAIN_CMD} --llvm-path=${LLVM_PROJECT_PATH} - --gcc-toolchains-path=${ESP_GCC_TOOLCHAIN_DIST_BASE} --binutils-path=${BINUTILS_PATH} - --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} --host=${CONF_HOST} ${BUILD_TOOLCHAIN_CMD_EXTRA_ARGS} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log - - BUILD_HOST=$(gcc -dumpmachine) - # Do not run unit tests for cross-builds. - # Run as non-root user because permission tests fail when run by root. - - if [ "${CONF_HOST}" == "${BUILD_HOST}" ]; then - export LLVM_BUILD_PATH=${BUILD_PATH}/build/llvm/build-${CONF_HOST}-Release; - echo "Run unit tests for native build in ${LLVM_BUILD_PATH}"; - useradd -m test_runner; - chown -R test_runner ${LLVM_BUILD_PATH}; - touch ${BUILD_PATH}/tests.log; - chmod o+w ${BUILD_PATH}/tests.log; - runuser -l test_runner -c 'cmake --build '${LLVM_BUILD_PATH}' --target check-all 2>&1 > '${BUILD_PATH}'/tests.log'; - touch ${BUILD_PATH}/lld-tests.log; - chmod o+w ${BUILD_PATH}/lld-tests.log; - runuser -l test_runner -c 'cmake --build '${LLVM_BUILD_PATH}' --target lld-test 2>&1 > '${BUILD_PATH}'/lld-tests.log'; - fi - - export DISTRO_DIR=$PWD/$DIST_DIR - - pushd ${BUILD_PATH} - - *package_toolchain - - popd - -.build_linux-gnu_template: - extends: .build_template - variables: - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - GCC_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - GCC_ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" - USE_LINKER: "gold" - -build_x86_64-linux-gnu: - extends: .build_linux-gnu_template - variables: - CONF_HOST: "x86_64-linux-gnu" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_LINUX}" - -build_arm-linux-gnueabihf: - extends: .build_linux-gnu_template - image: ${CROSS_ARM_IMAGE} - variables: - CONF_HOST: "arm-linux-gnueabihf" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARMHF}" - GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_LINUX_ARMHF}" - -build_aarch64-linux-gnu: - extends: .build_linux-gnu_template - image: ${CROSS_ARM_IMAGE} - variables: - CONF_HOST: "aarch64-linux-gnu" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" - GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_LINUX_ARM64}" - -build_x86_64-w64-mingw32: - extends: .build_template - needs: - # needs native toolchain and newlib from this job - - job: build_x86_64-linux-gnu - before_script: - - !reference [.use_ci_tools, script] - - !reference [.add_gitlab_key, script] - # get ARCHIVE_NAME for Linux release. Modify vars to make get_release_name working properly - - CLANG_LINUX_ARCHIVE=$(cat ${DIST_DIR}/dist_name_${PLATFORM_NAME_LINUX}) - # unpack x86_64-linux-gnu toolchain to re-use it as native Clang for Windows build - - mkdir -p esp-clang-${PLATFORM_NAME_LINUX} - - ${UNARCHIVE_TOOL_LINUX} ${DIST_DIR}/${CLANG_LINUX_ARCHIVE} -C esp-clang-${PLATFORM_NAME_LINUX} - # we do not want to keep artifacts from 'x86_64-linux-gnu' job - - rm -rf ${DIST_DIR} - - rm -rf ${BUILD_DIR} - # add build command args speciifc for Windows build - - export BUILD_TOOLCHAIN_CMD_EXTRA_ARGS="--native-esp-clang-path=$PWD/esp-clang-${PLATFORM_NAME_LINUX}" - variables: - CONF_HOST: "x86_64-w64-mingw32" - PLATFORM_NAME: "${PLATFORM_NAME_WIN}" - GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_WIN}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - GCC_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_WIN}" - GCC_ARCHIVE_EXT: "${ARCHIVE_EXT_WIN}" - BUILD_TOOLCHAIN_CMD: "./build-toolchain-win.sh" - -.build_apple-darwin_template: - extends: .build_template - variables: - ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" - ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" - GCC_UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" - GCC_ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" - BUILD_TOOLCHAIN_CMD: "./build-toolchain.sh" - -build_x86_64-apple-darwin: - extends: .build_apple-darwin_template - variables: - CONF_HOST: "x86_64-apple-darwin21.1" - PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" - GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_MACOS}" - -build_aarch64-apple-darwin: - extends: .build_apple-darwin_template - variables: - CONF_HOST: "aarch64-apple-darwin21.1" - PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" - GCC_PLATFORM_NAME: "${GCC_PLATFORM_NAME_MACOS_ARM64}" - -build_newlib: - stage: build - tags: [ "amd64", "build" ] - needs: - # needs native toolchain - - job: build_x86_64-linux-gnu - artifacts: - paths: - - ${DIST_DIR}/ - - ${BUILD_DIR}/build.log - when: always - expire_in: 1 day - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - script: - # get ARCHIVE_NAME for Linux release. - - CLANG_ARCHIVE=$PWD/${DIST_DIR}/$(cat ${DIST_DIR}/dist_name_${PLATFORM_NAME_LINUX}) - - mkdir -p ${DOWNLOADS_DIR} - - pushd ${DOWNLOADS_DIR} - - *get_xtensa_overlays - - *get_newlib - # unpack clang - - ${UNARCHIVE_TOOL} ${CLANG_ARCHIVE} - - export PATH=$PWD/esp-clang/bin:$PATH - - popd - - rm -rf $PWD/${DIST_DIR} - - !reference [.get_clang_toolchain_build_scripts, script] - # build newlib overlay using ESP native (Linux) clang toolchain only - # it will be re-used for cross-buit toolchains (win and mac). - - NEWLIB_OVERLAY_DISTRO_PATH=$PWD/${DIST_DIR} - - mkdir -p ${NEWLIB_OVERLAY_DISTRO_PATH} - - BUILD_PATH=$PWD/${BUILD_DIR} - - mkdir -p ${BUILD_PATH} - - ./build-toolchain.sh --newlib-path=${NEWLIB_PATH} --xtensa-overlays-path=${XTENSA_OVERLAYS_PATH} ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log - - pushd ${BUILD_PATH} - - ${ARCHIVE_TOOL_NEWLIB} ${NEWLIB_OVERLAY_DISTRO_PATH}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} esp-clang/ - - popd - -build_compiler-rt: - stage: build - tags: [ "amd64", "build" ] - needs: - # needs native toolchain with newlib - # newlib is necessary for building tests - - job: build_x86_64-linux-gnu - - job: build_newlib - artifacts: - paths: - - ${DIST_DIR}/ - - ${BUILD_DIR}/build.log - when: always - expire_in: 1 day - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - script: - - LLVM_PROJECT_PATH=$PWD - # get ARCHIVE_NAME for Linux release. - - CLANG_ARCHIVE=$PWD/${DIST_DIR}/$(cat ${DIST_DIR}/dist_name_${PLATFORM_NAME_LINUX}) - - NEWLIB_ARCHIVE=$PWD/${DIST_DIR}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} - - mkdir -p ${DOWNLOADS_DIR} - - pushd ${DOWNLOADS_DIR} - # unpack clang - - ${UNARCHIVE_TOOL} ${CLANG_ARCHIVE} - # unpack newlib - - ${UNARCHIVE_TOOL_NEWLIB} ${NEWLIB_ARCHIVE} - # now Linux toolchain with newlib is in $PWD/esp-clang - - export PATH=$PWD/esp-clang/bin:$PATH - - popd - - rm -rf $PWD/${DIST_DIR} - - !reference [.get_clang_toolchain_build_scripts, script] - # build compiler-rt overlay using ESP native (Linux) clang toolchain only - # it will be re-used for cross-buit toolchains (win and mac). - - COMPILER_RT_OVERLAY_DISTRO_PATH=$PWD/${DIST_DIR} - - mkdir -p ${COMPILER_RT_OVERLAY_DISTRO_PATH} - - BUILD_PATH=$PWD/${BUILD_DIR} - - mkdir -p ${BUILD_PATH} - - ./build-toolchain.sh --llvm-path=${LLVM_PROJECT_PATH} --build-llvm=no --build-compiler-rt=yes ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log - - pushd ${BUILD_PATH} - - ${ARCHIVE_TOOL_COMPILER_RT} ${COMPILER_RT_OVERLAY_DISTRO_PATH}/esp-clang-compiler-rt-overlay.${ARCHIVE_EXT_COMPILER_RT} esp-clang/ - - popd - -.pack_template: - stage: pack - tags: [ "amd64", "build" ] - artifacts: - paths: - - ${DIST_DIR}/ - when: always - expire_in: 3 day - script: - - *get_release_name - - export BUILD_PATH=$PWD/${BUILD_DIR} - - mkdir -p ${BUILD_PATH} - # unpack clang - - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} -C ${BUILD_PATH} - # unpack newlib - - ${UNARCHIVE_TOOL_NEWLIB} ${DIST_DIR}/esp-clang-newlib-overlay.${ARCHIVE_EXT_NEWLIB} -C ${BUILD_PATH} - # unpack compiler-rt - - ${UNARCHIVE_TOOL_COMPILER_RT} ${DIST_DIR}/esp-clang-compiler-rt-overlay.${ARCHIVE_EXT_COMPILER_RT} -C ${BUILD_PATH} - - rm -rf ${DIST_DIR} - - !reference [.get_clang_toolchain_build_scripts, script] - # strip binutils afer newlib is built - - STRIP_BINUTILS=YES ./build-toolchain.sh --host=${CONF_HOST} ${BUILD_PATH} - - DISTRO_DIR=$PWD/${DIST_DIR} - - pushd ${BUILD_PATH} - - *package_toolchain - - *package_libs - - popd - -.pack_linux-gnu_template: - extends: .pack_template - variables: - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - -pack_x86_64-linux-gnu: - extends: .pack_linux-gnu_template - needs: - - job: build_x86_64-linux-gnu - - job: build_newlib - - job: build_compiler-rt - variables: - CONF_HOST: "x86_64-linux-gnu" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - -pack_arm-linux-gnueabihf: - extends: .pack_linux-gnu_template - image: ${CROSS_ARM_IMAGE} - needs: - - job: build_arm-linux-gnueabihf - - job: build_newlib - - job: build_compiler-rt - variables: - CONF_HOST: "arm-linux-gnueabihf" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARMHF}" - -pack_aarch64-linux-gnu: - extends: .pack_linux-gnu_template - image: ${CROSS_ARM_IMAGE} - needs: - - job: build_aarch64-linux-gnu - - job: build_newlib - - job: build_compiler-rt - variables: - CONF_HOST: "aarch64-linux-gnu" - PLATFORM_NAME: "${PLATFORM_NAME_LINUX_ARM64}" - -pack_x86_64-w64-mingw32: - extends: .pack_template - needs: - - job: build_x86_64-w64-mingw32 - - job: build_newlib - - job: build_compiler-rt - variables: - CONF_HOST: "x86_64-w64-mingw32" - PLATFORM_NAME: "${PLATFORM_NAME_WIN}" - ARCHIVE_TOOL: "${PACK_ARCHIVE_TOOL_WIN}" - UNARCHIVE_TOOL: "${PACK_UNARCHIVE_TOOL_WIN}" - ARCHIVE_EXT: "${PACK_ARCHIVE_EXT_WIN}" - LIBS_PACK_EXTRA_PATHS: esp-clang/bin/lib{c++,clang,LLVM,unwind}* - -.pack_apple-darwin_template: - extends: .pack_template - variables: - ARCHIVE_TOOL: "${ARCHIVE_TOOL_MACOS}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_MACOS}" - ARCHIVE_EXT: "${ARCHIVE_EXT_MACOS}" - -pack_x86_64-apple-darwin: - extends: .pack_apple-darwin_template - needs: - - job: build_x86_64-apple-darwin - - job: build_newlib - - job: build_compiler-rt - variables: - CONF_HOST: "x86_64-apple-darwin21.1" - PLATFORM_NAME: "${PLATFORM_NAME_MACOS}" - -pack_aarch64-apple-darwin: - extends: .pack_apple-darwin_template - needs: - - job: build_aarch64-apple-darwin - - job: build_newlib - - job: build_compiler-rt - variables: - CONF_HOST: "aarch64-apple-darwin21.1" - PLATFORM_NAME: "${PLATFORM_NAME_MACOS_ARM64}" - -test_x86_64-linux-gnu: - stage: test - tags: [ "amd64", "build" ] - needs: - - job: pack_x86_64-linux-gnu - artifacts: - paths: - - ${BUILD_DIR}/tests.log - when: always - expire_in: 1 day - variables: - PLATFORM_NAME: "${PLATFORM_NAME_LINUX}" - ARCHIVE_TOOL: "${ARCHIVE_TOOL_LINUX}" - UNARCHIVE_TOOL: "${UNARCHIVE_TOOL_LINUX}" - ARCHIVE_EXT: "${ARCHIVE_EXT_LINUX}" - script: - - BUILD_PATH=$PWD/$BUILD_DIR - - mkdir -p ${BUILD_PATH} - - *get_release_name - - ${UNARCHIVE_TOOL} ${DIST_DIR}/${ARCHIVE_NAME} - # getting testsuite - - git clone -b ${LLVM_GCC_TESTSUITE_REF} --depth 1 $GITLAB_SSH_SERVER/idf/${LLVM_TESTSUITE_REPO}.git - # preparing testsuite - - export PATH=${PWD}/esp-clang/bin:$PATH - - cd ${LLVM_TESTSUITE_REPO} - # qemu - - ./qemu_esp32_install.sh - # run testsuite for esp32 - - ./run_esp32_tests.sh 2>&1 > ${BUILD_PATH}/tests.log - -sign_pack_x86_64-apple-darwin: - stage: macos_codesign - when: on_success - resource_group: macos_codesign - tags: [ "darwin", "codesign" ] - # list all jobs that produces macos distros - needs: - - job: pack_x86_64-apple-darwin - artifacts: - paths: - - ${DIST_DIR} - variables: - # directory with distro archives - DIST_ART_DIR: ${DIST_DIR} - # command to unarchive distro - ARCHIVE_TOOL: ${ARCHIVE_TOOL_MACOS} - # command to unarchive distro - UNARCHIVE_TOOL: ${UNARCHIVE_TOOL_MACOS} - # URL to macos codesign repo - NOTARIZATION_SCRIPTS_GIT: "${CI_SERVER_PROTOCOL}://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/macos_codesign_notarization.git" - script: - - git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} -b ${CI_COMMIT_REF_NAME} || - git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} - - ./macos_codesign_notarization/run.sh - -sign_aarch64-apple-darwin: - stage: macos_codesign - when: on_success - resource_group: macos_codesign - tags: [ "darwin", "codesign" ] - # list all jobs that produces macos distros - needs: - - job: pack_aarch64-apple-darwin - artifacts: - paths: - - ${DIST_DIR} - variables: - # directory with distro archives - DIST_ART_DIR: ${DIST_DIR} - # command to unarchive distro - ARCHIVE_TOOL: ${ARCHIVE_TOOL_MACOS} - # command to unarchive distro - UNARCHIVE_TOOL: ${UNARCHIVE_TOOL_MACOS} - # URL to macos codesign repo - NOTARIZATION_SCRIPTS_GIT: "${CI_SERVER_PROTOCOL}://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/espressif/macos_codesign_notarization.git" - script: - - git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} -b ${CI_COMMIT_REF_NAME} || - git clone -q --depth=1 ${NOTARIZATION_SCRIPTS_GIT} - - ./macos_codesign_notarization/run.sh - -upload_to_http: - stage: private_deploy - when: manual - allow_failure: true - tags: [ "deploy", "shiny" ] - variables: - # force the fetch strategy to clean old archives up in dist/ dir - GIT_STRATEGY: fetch - needs: - - job: pack_x86_64-linux-gnu - before_script: - - !reference [.use_ci_tools, script] - script: - - cit_add_ssh_key "${HTTP_UPLOAD_KEY}" - # List of archives - - FILES=$(find ${DIST_DIR} -name dist_name_\* -exec cat {} \+) - - cd ${DIST_DIR} - - ls -l $FILES - - scp ${FILES} ${HTTP_UPLOAD_DIR}/ct-ng/llvm-builds - # Show info - - echo -e "\nArchives were published there:\n\n$(for n in ${FILES}; do echo "${HTTP_PUBLIC_DIR}/ct-ng/llvm-builds/${n}"; done)\n" - -upload_to_github: - stage: public_deploy - when: manual - allow_failure: true - only: - - tags - tags: [ "amd64", "internet" ] - image: espressif/github-hub:2 - variables: - GIT_STRATEGY: fetch - GITHUB_TOKEN: "${GH_TOKEN}" - GITHUB_REPO: "${GH_REPO_HTTPS}" - TAG: "${CI_COMMIT_TAG}" - needs: - - job: pack_x86_64-linux-gnu - - job: pack_arm-linux-gnueabihf - - job: pack_aarch64-linux-gnu - - job: pack_x86_64-w64-mingw32 - - job: sign_pack_x86_64-apple-darwin - - job: sign_aarch64-apple-darwin - before_script: [] - script: - - ls -l dist*/ - - git remote add github ${GH_REPO_HTTPS} - - hub release show ${TAG} || { echo "Please create a release on GitHub with ${TAG} tag at first"; exit 1; } - # List of archives - - FILES=$(find ${DIST_DIR} -name dist_name_\* -exec cat {} \+) - - cd ${DIST_DIR} - - ls -l $FILES - # Upload archives - - for n in ${FILES}; do hub release edit -m "" -a "${n}" "${TAG}"; done From 2662180996dc6dc955539b9e86d9beffb6f1903c Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 31 Jan 2025 16:59:42 +0300 Subject: [PATCH 281/289] esp/ci: Fix standalone libs packaging --- .gitlab-ci.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 63f46008d64c3..b5498f84086d9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -43,7 +43,7 @@ before_script: artifacts: paths: - ${DIST_DIR}/ - - ${BUILD_DIR}/*.log + - ${BUILD_DIR}/build/*.log when: always expire_in: 1 day dependencies: [] @@ -72,8 +72,10 @@ before_script: - *get_toolchain_build_scripts - LLVM_PROJECT_PATH=$PWD - git config --global --add safe.directory ${LLVM_PROJECT_PATH} - - BUILD_PATH=$PWD/${BUILD_DIR} + - BUILD_PATH=$PWD/${BUILD_DIR}/build + - INSTALL_PATH=$PWD/${BUILD_DIR}/install - mkdir -p ${BUILD_PATH} + - mkdir -p ${INSTALL_PATH} - BUILD_HOST=$(gcc -dumpmachine) # Config to build target libraries # TODO: do not build core tools (clang, lld, binutils etc) @@ -119,6 +121,7 @@ before_script: -DLLVM_PARALLEL_COMPILE_JOBS=${ESP_LLVM_PARALLEL_COMPILE_JOBS} -DCLANG_REPOSITORY_STRING="${GH_REPO_HTTPS}" -DCPACK_ARCHIVE_THREADS=0 + --install-prefix=${INSTALL_PATH} -B ${BUILD_PATH} 2>&1 > ${BUILD_PATH}/build.log # Do not run unit tests for cross-builds. # Run as non-root user because permission tests fail when run by root. @@ -168,6 +171,8 @@ before_script: # pack distro with standalone libs - > if [[ "${PACK_STANDALONE_LIBS}" == "ON" ]]; then + echo "Clean install dir ${INSTALL_PATH}" + rm -rf ${INSTALL_PATH} ninja -C ${BUILD_PATH} package-llvm-standalone-libs 2>&1 >> ${BUILD_PATH}/build.log DISTRO_PACK_PATH=$(ninja -C ${BUILD_PATH} print-llvm-standalone-libs-package-path | tail -n 1) echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" @@ -178,6 +183,8 @@ before_script: # pack target libraries to be re-used in distros for other platforms - > if [[ "${PACK_TARGET_LIBS}" == "ON" ]]; then + echo "Clean install dir ${INSTALL_PATH}" + rm -rf ${INSTALL_PATH} ninja -C ${BUILD_PATH} package-llvm-toolchain-target-libs 2>&1 >> ${BUILD_PATH}/build.log DISTRO_PACK_PATH=$(ninja -C ${BUILD_PATH} print-llvm-toolchain-target-libs-package-path | tail -n 1) echo "DISTRO_PACK_PATH=${DISTRO_PACK_PATH}" From 18b826623c6d350795002137cedb0f9c59167481 Mon Sep 17 00:00:00 2001 From: Stefan Stipanovic Date: Thu, 30 Jan 2025 12:07:35 +0100 Subject: [PATCH 282/289] [Xtensa] Alignment fix --- .gitignore | 1 + .../lib/Target/Xtensa/XtensaFrameLowering.cpp | 25 ++++++++++++++++ llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp | 5 ++-- llvm/test/CodeGen/Xtensa/aligned_alloc.ll | 29 +++++++++++++++++++ 4 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 llvm/test/CodeGen/Xtensa/aligned_alloc.ll diff --git a/.gitignore b/.gitignore index 0e13e97841618..7fc80c8a9f5d1 100644 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ /*/build-* /_build /_dist +/dbg #==============================================================================# # Explicit files to ignore (only matches one). diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp index e40e73d4b84f3..a02a726e2db7c 100644 --- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp +++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp @@ -62,6 +62,9 @@ void XtensaFrameLowering::emitPrologue(MachineFunction &MF, if (STI.isWinABI()) { StackSize += 32; + uint64_t MaxAlignment = MFI.getMaxAlign().value(); + if(MaxAlignment > 32) + StackSize += MaxAlignment; if (StackSize <= 32760) { BuildMI(MBB, MBBI, DL, TII.get(Xtensa::ENTRY)) @@ -83,6 +86,28 @@ void XtensaFrameLowering::emitPrologue(MachineFunction &MF, BuildMI(MBB, MBBI, DL, TII.get(Xtensa::MOVSP), SP).addReg(TmpReg); } + // Calculate how much is needed to have the correct alignment. + // Change offset to: alignment + difference. + // For example, in case of alignment of 128: + // diff_to_128_aligned_address = (128 - (SP & 127)) + // new_offset = 128 + diff_to_128_aligned_address + // This is safe to do because we increased the stack size by MaxAlignment. + unsigned Reg, RegMisAlign; + if (MaxAlignment > 32){ + TII.loadImmediate(MBB, MBBI, &RegMisAlign, MaxAlignment - 1); + TII.loadImmediate(MBB, MBBI, &Reg, MaxAlignment); + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::AND)) + .addReg(RegMisAlign, RegState::Define) + .addReg(FP) + .addReg(RegMisAlign); + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::SUB), RegMisAlign) + .addReg(Reg) + .addReg(RegMisAlign); + BuildMI(MBB, MBBI, DL, TII.get(Xtensa::ADD), SP) + .addReg(SP) + .addReg(RegMisAlign, RegState::Kill); + } + // Store FP register in A8, because FP may be used to pass function // arguments if (XtensaFI->isSaveFrameRegister()) { diff --git a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp index 48ae4e4a99943..f35db72f3bdee 100644 --- a/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp +++ b/llvm/lib/Target/Xtensa/XtensaRegisterInfo.cpp @@ -119,6 +119,7 @@ bool XtensaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int64_t Offset = SPOffset + (int64_t)StackSize + MI.getOperand(FIOperandNum + 1).getImm(); + uint64_t Alignment = MF.getFrameInfo().getObjectAlign(FrameIndex).value(); bool Valid = isValidAddrOffset(MI, Offset); // If MI is not a debug value, make sure Offset fits in the 16-bit immediate @@ -126,13 +127,13 @@ bool XtensaRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, if (!MI.isDebugValue() && !Valid) { MachineBasicBlock &MBB = *MI.getParent(); DebugLoc DL = II->getDebugLoc(); - unsigned ADD = Xtensa::ADD; unsigned Reg; const XtensaInstrInfo &TII = *static_cast( MBB.getParent()->getSubtarget().getInstrInfo()); TII.loadImmediate(MBB, II, &Reg, Offset); - BuildMI(MBB, II, DL, TII.get(ADD), Reg) + + BuildMI(MBB, II, DL, TII.get(Xtensa::ADD), Reg) .addReg(FrameReg) .addReg(Reg, RegState::Kill); diff --git a/llvm/test/CodeGen/Xtensa/aligned_alloc.ll b/llvm/test/CodeGen/Xtensa/aligned_alloc.ll new file mode 100644 index 0000000000000..bc79c68a07d53 --- /dev/null +++ b/llvm/test/CodeGen/Xtensa/aligned_alloc.ll @@ -0,0 +1,29 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=xtensa -O0 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=XTENSA + +define i8 @loadi8_128(i8 %a) { +; XTENSA-LABEL: loadi8_128: +; XTENSA: entry a1, 416 +; XTENSA-NEXT: movi a8, 127 +; XTENSA-NEXT: movi a9, 128 +; XTENSA-NEXT: and a8, a1, a8 +; XTENSA-NEXT: sub a8, a9, a8 +; XTENSA-NEXT: add.n a1, a1, a8 +; XTENSA-NEXT: movi a8, 128 +; XTENSA-NEXT: add.n a8, a1, a8 +; XTENSA-NEXT: addi a10, a8, 0 +; XTENSA-NEXT: movi.n a11, 0 +; XTENSA-NEXT: movi.n a12, 64 +; XTENSA-NEXT: l32r a8, .LCPI0_0 +; XTENSA-NEXT: callx8 a8 +; XTENSA-NEXT: l8ui a2, a1, 128 +; XTENSA-NEXT: retw.n + %aligned = alloca i8, align 128 + call void @llvm.memset.p0.i64(ptr noundef nonnull align 64 dereferenceable(64) %aligned, i8 0, i64 64, i1 false) + %1 = load i8, ptr %aligned, align 128 + ret i8 %1 +} + +; Function Attrs: nocallback nofree nounwind willreturn memory(argmem: write) +declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) From 891b8fc17d2cbabcb7af0c95484df30b95aca2dc Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 11 Feb 2025 23:55:51 +0300 Subject: [PATCH 283/289] ecp/ci: Set release tag for build scripts --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b5498f84086d9..fca069fd9bc8f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -11,7 +11,7 @@ stages: image: ${CI_DOCKER_REGISTRY}/llvm-build:5 variables: - ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "llvm_release_19" + ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "esp-19.1.2_20250211" ESP_GNU_TOOLCHAIN_VER: "14.2.0_20241119" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:2 CROSS_WIN_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-win:2 From 8684df259d4c403d249572c947919b4181818668 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Fri, 14 Feb 2025 19:57:50 +0300 Subject: [PATCH 284/289] esp/ci: Retry failed jobs --- .gitlab-ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index fca069fd9bc8f..6dfd66b73e9f6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -10,6 +10,10 @@ stages: image: ${CI_DOCKER_REGISTRY}/llvm-build:5 +default: + interruptible: true + retry: 2 + variables: ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "esp-19.1.2_20250211" ESP_GNU_TOOLCHAIN_VER: "14.2.0_20241119" From d43c11e8212dc2a19e63e1da55f4f4e5ff116814 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Mon, 24 Feb 2025 17:49:37 +0300 Subject: [PATCH 285/289] esp/ci: Retry build jobs only --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6dfd66b73e9f6..498d65a7e14ae 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -12,7 +12,6 @@ image: ${CI_DOCKER_REGISTRY}/llvm-build:5 default: interruptible: true - retry: 2 variables: ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "esp-19.1.2_20250211" @@ -51,6 +50,7 @@ before_script: when: always expire_in: 1 day dependencies: [] + retry: 2 variables: TARGET: "Xtensa;RISCV" USE_LINKER: "ld" @@ -210,6 +210,7 @@ build_and_test: - ./esp-dsp/scripts/ when: always expire_in: 1 day + retry: 2 variables: TEST_APP_ESP_DSP_CUSTOM_BRANCH: "llvm-19.1.2-optimization-test" after_script: From a8a8fecac7a7aa6502410a3a09674bbd688f5903 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Mon, 24 Feb 2025 17:50:40 +0300 Subject: [PATCH 286/289] Update build scripts repo ref to release tag 'esp-19.1.2_20250225' --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 498d65a7e14ae..8ffdc368695be 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -14,7 +14,7 @@ default: interruptible: true variables: - ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "esp-19.1.2_20250211" + ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "esp-19.1.2_20250225" ESP_GNU_TOOLCHAIN_VER: "14.2.0_20241119" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:2 CROSS_WIN_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-win:2 From a2e340942b77f4c7b71cfbe0f43b0b5129106e47 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 11 Mar 2025 22:35:50 +0300 Subject: [PATCH 287/289] esp/ci: Update 'test_xesppie' deps to 'need' --- .gitlab-ci.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8ffdc368695be..b879bd9386bd5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -465,6 +465,7 @@ sign_aarch64-apple-darwin: pushd $PWD/esp-dsp/test_app test_esp_dsp: + stage: test image: espressif/idf:latest tags: [ "amd64", "build" ] allow_failure: true @@ -497,8 +498,8 @@ test_esp_dsp: test_xesppie: stage: test - dependencies: - - build_x86_64-linux-gnu + needs: + - job: "build_x86_64-linux-gnu" allow_failure: true only: - tags From 5a99af44e2246f4ac1e507b356ad6502fd91d7a7 Mon Sep 17 00:00:00 2001 From: Alexey Gerenkov Date: Tue, 11 Mar 2025 22:37:08 +0300 Subject: [PATCH 288/289] esp/ci: Update build scripts repo ref to release tag 'esp-19.1.2_20250312' --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b879bd9386bd5..70b7a40daee36 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -14,7 +14,7 @@ default: interruptible: true variables: - ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "esp-19.1.2_20250225" + ESP_LLVM_EMBEDDED_TOOLCHAIN_REF: "esp-19.1.2_20250312" ESP_GNU_TOOLCHAIN_VER: "14.2.0_20241119" CROSS_ARM_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-arm:2 CROSS_WIN_IMAGE: $CI_DOCKER_REGISTRY/llvm-build-cross-win:2 From 0b10ac7aa166e0c040668776c10c72aaa7595d80 Mon Sep 17 00:00:00 2001 From: Stefan Stipanovic Date: Mon, 7 Apr 2025 16:35:24 +0200 Subject: [PATCH 289/289] [RISCV] Rewrite esp32p4 intrinsics to have the return value where appropriate. --- .../clang/Basic/BuiltinsRISCVESP32P4.td | 350 +-- clang/test/CodeGen/RISCV/riscv-esp32p4.c | 919 ++++--- .../include/llvm/IR/IntrinsicsRISCVESP32P4.td | 350 +-- .../Target/RISCV/RISCVESP32P4ISelLowering.cpp | 2239 +++++++---------- .../lib/Target/RISCV/RISCVInstrInfoESP32P4.td | 1048 ++++---- llvm/test/CodeGen/RISCV/esp32p4.ll | 2098 +++++++-------- 6 files changed, 3328 insertions(+), 3676 deletions(-) diff --git a/clang/include/clang/Basic/BuiltinsRISCVESP32P4.td b/clang/include/clang/Basic/BuiltinsRISCVESP32P4.td index cf41cee6bfbee..69ad0cc255a21 100644 --- a/clang/include/clang/Basic/BuiltinsRISCVESP32P4.td +++ b/clang/include/clang/Basic/BuiltinsRISCVESP32P4.td @@ -1,80 +1,80 @@ let Features = "xesppie" in { def esp_vcmulas_s16_qacc_h : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vcmulas_s16_qacc_h_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vcmulas_s16_qacc_h_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vcmulas_s16_qacc_h_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vcmulas_s16_qacc_h_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vcmulas_s16_qacc_l : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vcmulas_s16_qacc_l_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vcmulas_s16_qacc_l_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vcmulas_s16_qacc_l_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vcmulas_s16_qacc_l_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vcmulas_s8_qacc_h : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vcmulas_s8_qacc_h_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vcmulas_s8_qacc_h_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vcmulas_s8_qacc_h_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vcmulas_s8_qacc_h_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vcmulas_s8_qacc_l : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vcmulas_s8_qacc_l_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vcmulas_s8_qacc_l_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vcmulas_s8_qacc_l_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vcmulas_s8_qacc_l_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmulas_s16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vmulas_s16_qacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vmulas_s16_qacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_s16_qacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; -def esp_vmulas_s16_qacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s16_qacc_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_s16_qacc_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s16_qacc_st_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_s16_qacc_st_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmulas_s16_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vmulas_s16_xacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vmulas_s16_xacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_s16_xacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; -def esp_vmulas_s16_xacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s16_xacc_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_s16_xacc_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s16_xacc_st_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_s16_xacc_st_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmulas_s8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vmulas_s8_qacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vmulas_s8_qacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_s8_qacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; -def esp_vmulas_s8_qacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s8_qacc_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_s8_qacc_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s8_qacc_st_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_s8_qacc_st_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmulas_s8_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vmulas_s8_xacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vmulas_s8_xacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_s8_xacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; -def esp_vmulas_s8_xacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s8_xacc_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_s8_xacc_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s8_xacc_st_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_s8_xacc_st_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmulas_u16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vmulas_u16_qacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vmulas_u16_qacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_u16_qacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; -def esp_vmulas_u16_qacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u16_qacc_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_u16_qacc_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u16_qacc_st_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_u16_qacc_st_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmulas_u16_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vmulas_u16_xacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vmulas_u16_xacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_u16_xacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; -def esp_vmulas_u16_xacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u16_xacc_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_u16_xacc_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u16_xacc_st_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_u16_xacc_st_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmulas_u8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vmulas_u8_qacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vmulas_u8_qacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_u8_qacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; -def esp_vmulas_u8_qacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u8_qacc_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_u8_qacc_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u8_qacc_st_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_u8_qacc_st_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmulas_u8_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vmulas_u8_xacc_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_vmulas_u8_xacc_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_u8_xacc_st_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, int)">; -def esp_vmulas_u8_xacc_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_s16_qacc_ldbc_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_s8_qacc_ldbc_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_u16_qacc_ldbc_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmulas_u8_qacc_ldbc_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u8_xacc_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_vmulas_u8_xacc_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u8_xacc_st_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, int)">; +def esp_vmulas_u8_xacc_st_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s16_qacc_ldbc_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_s8_qacc_ldbc_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u16_qacc_ldbc_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmulas_u8_qacc_ldbc_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vsmulas_s16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vsmulas_s16_qacc_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_s16_qacc_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vsmulas_s8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vsmulas_s8_qacc_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_s8_qacc_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vsmulas_u16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vsmulas_u16_qacc_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_u16_qacc_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vsmulas_u8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vsmulas_u8_qacc_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsmulas_u8_qacc_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_cmul_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_cmul_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_cmul_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_s16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_s16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_cmul_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_cmul_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_cmul_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_s8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_s8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_cmul_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_cmul_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_cmul_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_u16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_u16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_cmul_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_cmul_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_cmul_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_u8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_cmul_u8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_max_s16_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; def esp_max_s32_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; def esp_max_s8_a : RISCVBuiltin<"void(unsigned int, unsigned int)">; @@ -91,74 +91,74 @@ def esp_vabs_16 : RISCVBuiltin<"void(unsigned int, unsigned int)">; def esp_vabs_32 : RISCVBuiltin<"void(unsigned int, unsigned int)">; def esp_vabs_8 : RISCVBuiltin<"void(unsigned int, unsigned int)">; def esp_vadd_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vadd_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vadd_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vadd_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vadd_s32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vadd_s32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s32_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s32_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vadd_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vadd_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vadd_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_s8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vadd_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vadd_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vadd_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vadd_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vadd_u32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vadd_u32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u32_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u32_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vadd_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vadd_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vadd_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vadd_u8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vclamp_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_vmax_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmax_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmax_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmax_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmax_s32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmax_s32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s32_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s32_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmax_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmax_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmax_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_s8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmax_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmax_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmax_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmax_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmax_u32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmax_u32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u32_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u32_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmax_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmax_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmax_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmax_u8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmin_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmin_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmin_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmin_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmin_s32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmin_s32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s32_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s32_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmin_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmin_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmin_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_s8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmin_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmin_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmin_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmin_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmin_u32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmin_u32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u32_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u32_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmin_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmin_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmin_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmin_u8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmul_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmul_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmul_s16_s8xs8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmul_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmul_s32_s16xs16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmul_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmul_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmul_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_s8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmul_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmul_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmul_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_u16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_u16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vmul_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vmul_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vmul_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_u8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vmul_u8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vprelu_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vprelu_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vrelu_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; @@ -178,26 +178,26 @@ def esp_vssubs_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int) def esp_vssubs_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_vssubs_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_vsub_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vsub_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vsub_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vsub_s32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vsub_s32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vsub_s32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s32_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s32_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vsub_s8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vsub_s8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vsub_s8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_s8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vsub_u16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vsub_u16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vsub_u16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vsub_u32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vsub_u32_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vsub_u32_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u32_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u32_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_vsub_u8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vsub_u8_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vsub_u8_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u8_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vsub_u8_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_addx2 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_addx4 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_sat : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_sat : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; def esp_subx2 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_subx4 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_andq : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; @@ -232,7 +232,7 @@ def esp_movi_32_a : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int) def esp_movi_32_q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_movi_8_a : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_movi_8_q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_movx_r_cfg : RISCVBuiltin<"void(unsigned int)">; +def esp_movx_r_cfg : RISCVBuiltin<"unsigned int()">; def esp_movx_r_fft_bit_width : RISCVBuiltin<"void(unsigned int)">; def esp_movx_r_perf : RISCVBuiltin<"void(unsigned int, unsigned int)">; def esp_movx_r_sar : RISCVBuiltin<"void(unsigned int)">; @@ -263,71 +263,71 @@ def esp_vzipt_8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)"> def esp_zero_q : RISCVBuiltin<"void(unsigned int)">; def esp_zero_qacc : RISCVBuiltin<"void()">; def esp_zero_xacc : RISCVBuiltin<"void()">; -def esp_fft_ams_s16_ld_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_fft_ams_s16_ld_incp_uaup : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_fft_ams_s16_ld_r32_decp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_ams_s16_ld_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_ams_s16_ld_incp_uaup : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_ams_s16_ld_r32_decp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_fft_ams_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_fft_bitrev : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_fft_cmul_s16_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_fft_cmul_s16_st_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_bitrev : RISCVBuiltin<"int(unsigned int, unsigned int)">; +def esp_fft_cmul_s16_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_cmul_s16_st_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_fft_r2bf_s16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_fft_r2bf_s16_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_fft_vst_r32_decp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_ld_128_usar_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; -def esp_ld_128_usar_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_ld_xacc_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_ldqa_s16_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_ldqa_s16_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_ldqa_s8_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_ldqa_s8_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_ldqa_u16_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_ldqa_u16_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_ldqa_u8_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_ldqa_u8_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int)">; -def esp_vldbc_16_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; -def esp_vldbc_16_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vldbc_32_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; -def esp_vldbc_32_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vldbc_8_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; -def esp_vldbc_8_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vldext_s16_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int, unsigned int)">; -def esp_vldext_s16_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vldext_s8_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int, unsigned int)">; -def esp_vldext_s8_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vldext_u16_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int, unsigned int)">; -def esp_vldext_u16_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vldext_u8_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int, unsigned int)">; -def esp_vldext_u8_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vldhbc_16_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_ld_qacc_h_h_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_ld_qacc_h_l_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_ld_qacc_l_h_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_ld_qacc_l_l_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_ld_ua_state_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_fft_r2bf_s16_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_fft_vst_r32_decp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_ld_128_usar_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int)">; +def esp_ld_128_usar_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_ld_xacc_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_ldqa_s16_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_ldqa_s16_128_xp : RISCVBuiltin<"int(unsigned int, unsigned int)">; +def esp_ldqa_s8_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_ldqa_s8_128_xp : RISCVBuiltin<"int(unsigned int, unsigned int)">; +def esp_ldqa_u16_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_ldqa_u16_128_xp : RISCVBuiltin<"int(unsigned int, unsigned int)">; +def esp_ldqa_u8_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_ldqa_u8_128_xp : RISCVBuiltin<"int(unsigned int, unsigned int)">; +def esp_vldbc_16_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int)">; +def esp_vldbc_16_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_vldbc_32_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int)">; +def esp_vldbc_32_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_vldbc_8_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int)">; +def esp_vldbc_8_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_vldext_s16_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int, unsigned int)">; +def esp_vldext_s16_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vldext_s8_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int, unsigned int)">; +def esp_vldext_s8_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vldext_u16_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int, unsigned int)">; +def esp_vldext_u16_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vldext_u8_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int, unsigned int)">; +def esp_vldext_u8_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_vldhbc_16_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_ld_qacc_h_h_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_ld_qacc_h_l_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_ld_qacc_l_h_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_ld_qacc_l_l_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_ld_ua_state_ip : RISCVBuiltin<"int(unsigned int, int)">; def esp_ldxq_32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_st_qacc_h_h_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_st_qacc_h_l_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_st_qacc_l_h_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_st_qacc_l_l_128_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_st_ua_state_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_st_qacc_h_h_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_st_qacc_h_l_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_st_qacc_l_h_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_st_qacc_l_l_128_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_st_ua_state_ip : RISCVBuiltin<"int(unsigned int, int)">; def esp_stxq_32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; -def esp_vld_128_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; -def esp_vld_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vld_h_64_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; -def esp_vld_h_64_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vld_l_64_ip : RISCVBuiltin<"void(unsigned int, int, unsigned int)">; -def esp_vld_l_64_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vst_128_ip : RISCVBuiltin<"void(unsigned int, unsigned int, int)">; -def esp_vst_128_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vst_h_64_ip : RISCVBuiltin<"void(unsigned int, unsigned int, int)">; -def esp_vst_h_64_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_vst_l_64_ip : RISCVBuiltin<"void(unsigned int, unsigned int, int)">; -def esp_vst_l_64_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_vld_128_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int)">; +def esp_vld_128_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_vld_h_64_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int)">; +def esp_vld_h_64_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_vld_l_64_ip : RISCVBuiltin<"int(unsigned int, int, unsigned int)">; +def esp_vld_l_64_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_vst_128_ip : RISCVBuiltin<"int(unsigned int, unsigned int, int)">; +def esp_vst_128_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_vst_h_64_ip : RISCVBuiltin<"int(unsigned int, unsigned int, int)">; +def esp_vst_h_64_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; +def esp_vst_l_64_ip : RISCVBuiltin<"int(unsigned int, unsigned int, int)">; +def esp_vst_l_64_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; def esp_slci_2q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_slcxxp_2q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_src_q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_src_q_ld_ip : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, int, unsigned int)">; -def esp_src_q_ld_xp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; +def esp_src_q_ld_ip : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, int, unsigned int)">; +def esp_src_q_ld_xp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_src_q_qup : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_srci_2q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_srcmb_s16_q_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; @@ -338,7 +338,7 @@ def esp_srcmb_u16_q_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsign def esp_srcmb_u16_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_srcmb_u8_q_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_srcmb_u8_qacc : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_srcq_128_st_incp : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; +def esp_srcq_128_st_incp : RISCVBuiltin<"int(unsigned int, unsigned int, unsigned int)">; def esp_srcxxp_2q : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int, unsigned int)">; def esp_srs_s_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; def esp_srs_u_xacc : RISCVBuiltin<"void(unsigned int, unsigned int)">; @@ -351,6 +351,6 @@ def esp_vsr_u32 : RISCVBuiltin<"void(unsigned int, unsigned int)">; def esp_vsrd_16 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_vsrd_32 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; def esp_vsrd_8 : RISCVBuiltin<"void(unsigned int, unsigned int, unsigned int)">; -def esp_st_s_xacc_ip : RISCVBuiltin<"void(unsigned int, int)">; -def esp_st_u_xacc_ip : RISCVBuiltin<"void(unsigned int, int)">; +def esp_st_s_xacc_ip : RISCVBuiltin<"int(unsigned int, int)">; +def esp_st_u_xacc_ip : RISCVBuiltin<"int(unsigned int, int)">; } diff --git a/clang/test/CodeGen/RISCV/riscv-esp32p4.c b/clang/test/CodeGen/RISCV/riscv-esp32p4.c index 912b217276b63..88bcc8fa32043 100644 --- a/clang/test/CodeGen/RISCV/riscv-esp32p4.c +++ b/clang/test/CodeGen/RISCV/riscv-esp32p4.c @@ -10,649 +10,648 @@ // CHECK-NEXT: store i32 10, ptr [[DATA]], align 4 // CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.h(i32 4, i32 2) // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32 4, i32 0, i32 [[TMP0]], i32 -96, i32 3) -// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32 4, i32 0, i32 [[TMP0]], i32 -96, i32 3) // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32 [[TMP1]], i32 5, i32 5, i32 [[TMP2]], i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32 6, i32 1) // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32 2, i32 3, i32 [[TMP3]], i32 -48, i32 3) -// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32 [[TMP2]], i32 5, i32 5, i32 [[TMP3]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32 6, i32 1) // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32 [[TMP4]], i32 7, i32 2, i32 [[TMP5]], i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32 4, i32 4) -// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32 7, i32 4, i32 [[TMP6]], i32 -128, i32 4) +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32 2, i32 3, i32 [[TMP5]], i32 -48, i32 3) // CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[DATA]], align 4 // CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32 [[TMP7]], i32 2, i32 3, i32 [[TMP8]], i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32 6, i32 4) -// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32 5, i32 5, i32 [[TMP9]], i32 16, i32 7) +// CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32 [[TMP7]], i32 7, i32 2, i32 [[TMP8]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32 4, i32 4) // CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32 [[TMP10]], i32 4, i32 4, i32 [[TMP11]], i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc(i32 7, i32 6) +// CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32 7, i32 4, i32 [[TMP10]], i32 -128, i32 4) // CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32 0, i32 4, i32 [[TMP12]], i32 96, i32 4) // CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32 [[TMP13]], i32 4, i32 4, i32 [[TMP14]], i32 7) +// CHECK-NEXT: [[TMP14:%.*]] = call i32 @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32 [[TMP12]], i32 2, i32 3, i32 [[TMP13]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32 6, i32 4) // CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32 2, i32 1, i32 7, i32 [[TMP15]], i32 -128) -// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32 5, i32 5, i32 [[TMP15]], i32 16, i32 7) // CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32 [[TMP16]], i32 1, i32 2, i32 6, i32 [[TMP17]]) -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc(i32 1, i32 3) // CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32 7, i32 3, i32 [[TMP18]], i32 -96, i32 5) -// CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32 [[TMP17]], i32 4, i32 4, i32 [[TMP18]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc(i32 7, i32 6) // CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32 [[TMP19]], i32 3, i32 1, i32 [[TMP20]], i32 1) -// CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32 2, i32 0, i32 0, i32 [[TMP21]], i32 64) +// CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32 0, i32 4, i32 [[TMP20]], i32 96, i32 4) // CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[DATA]], align 4 // CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32 [[TMP22]], i32 6, i32 3, i32 6, i32 [[TMP23]]) -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc(i32 0, i32 0) -// CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32 0, i32 3, i32 [[TMP24]], i32 0, i32 7) +// CHECK-NEXT: [[TMP24:%.*]] = call i32 @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32 [[TMP22]], i32 4, i32 4, i32 [[TMP23]], i32 7) // CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32 [[TMP25]], i32 4, i32 3, i32 [[TMP26]], i32 4) +// CHECK-NEXT: [[TMP26:%.*]] = call i32 @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32 2, i32 1, i32 7, i32 [[TMP25]], i32 -128) // CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32 3, i32 3, i32 5, i32 [[TMP27]], i32 -64) // CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32 [[TMP28]], i32 4, i32 7, i32 0, i32 [[TMP29]]) -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc(i32 3, i32 3) +// CHECK-NEXT: [[TMP29:%.*]] = call i32 @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32 [[TMP27]], i32 1, i32 2, i32 6, i32 [[TMP28]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc(i32 1, i32 3) // CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32 3, i32 2, i32 [[TMP30]], i32 0, i32 5) -// CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP31:%.*]] = call i32 @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32 7, i32 3, i32 [[TMP30]], i32 -96, i32 5) // CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32 [[TMP31]], i32 6, i32 3, i32 [[TMP32]], i32 0) // CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32 1, i32 7, i32 7, i32 [[TMP33]], i32 -32) -// CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP34:%.*]] = call i32 @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32 [[TMP32]], i32 3, i32 1, i32 [[TMP33]], i32 1) // CHECK-NEXT: [[TMP35:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32 [[TMP34]], i32 6, i32 7, i32 6, i32 [[TMP35]]) -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc(i32 5, i32 4) -// CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32 5, i32 2, i32 [[TMP36]], i32 64, i32 6) +// CHECK-NEXT: [[TMP36:%.*]] = call i32 @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32 2, i32 0, i32 0, i32 [[TMP35]], i32 64) // CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[DATA]], align 4 // CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32 [[TMP37]], i32 5, i32 7, i32 [[TMP38]], i32 7) -// CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32 1, i32 4, i32 3, i32 [[TMP39]], i32 -96) +// CHECK-NEXT: [[TMP39:%.*]] = call i32 @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32 [[TMP37]], i32 6, i32 3, i32 6, i32 [[TMP38]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc(i32 0, i32 0) // CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP41:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32 [[TMP40]], i32 5, i32 0, i32 2, i32 [[TMP41]]) -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc(i32 0, i32 7) +// CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32 0, i32 3, i32 [[TMP40]], i32 0, i32 7) // CHECK-NEXT: [[TMP42:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32 6, i32 6, i32 [[TMP42]], i32 -96, i32 4) // CHECK-NEXT: [[TMP43:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP44:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32 [[TMP43]], i32 6, i32 5, i32 [[TMP44]], i32 6) +// CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32 [[TMP42]], i32 4, i32 3, i32 [[TMP43]], i32 4) // CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32 3, i32 0, i32 4, i32 [[TMP45]], i32 64) -// CHECK-NEXT: [[TMP46:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP46:%.*]] = call i32 @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32 3, i32 3, i32 5, i32 [[TMP45]], i32 -64) // CHECK-NEXT: [[TMP47:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32 [[TMP46]], i32 1, i32 0, i32 4, i32 [[TMP47]]) -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc(i32 5, i32 4) // CHECK-NEXT: [[TMP48:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32 5, i32 3, i32 [[TMP48]], i32 80, i32 5) -// CHECK-NEXT: [[TMP49:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP49:%.*]] = call i32 @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32 [[TMP47]], i32 4, i32 7, i32 0, i32 [[TMP48]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc(i32 3, i32 3) // CHECK-NEXT: [[TMP50:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32 [[TMP49]], i32 4, i32 7, i32 [[TMP50]], i32 4) -// CHECK-NEXT: [[TMP51:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32 3, i32 3, i32 5, i32 [[TMP51]], i32 -96) +// CHECK-NEXT: [[TMP51:%.*]] = call i32 @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32 3, i32 2, i32 [[TMP50]], i32 0, i32 5) // CHECK-NEXT: [[TMP52:%.*]] = load i32, ptr [[DATA]], align 4 // CHECK-NEXT: [[TMP53:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32 [[TMP52]], i32 6, i32 7, i32 3, i32 [[TMP53]]) -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc(i32 0, i32 1) -// CHECK-NEXT: [[TMP54:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32 6, i32 0, i32 [[TMP54]], i32 -32, i32 7) +// CHECK-NEXT: [[TMP54:%.*]] = call i32 @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32 [[TMP52]], i32 6, i32 3, i32 [[TMP53]], i32 0) // CHECK-NEXT: [[TMP55:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP56:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32 [[TMP55]], i32 3, i32 3, i32 [[TMP56]], i32 5) +// CHECK-NEXT: [[TMP56:%.*]] = call i32 @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32 1, i32 7, i32 7, i32 [[TMP55]], i32 -32) // CHECK-NEXT: [[TMP57:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32 7, i32 0, i32 4, i32 [[TMP57]], i32 32) // CHECK-NEXT: [[TMP58:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP59:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32 [[TMP58]], i32 1, i32 0, i32 0, i32 [[TMP59]]) +// CHECK-NEXT: [[TMP59:%.*]] = call i32 @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32 [[TMP57]], i32 6, i32 7, i32 6, i32 [[TMP58]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc(i32 5, i32 4) // CHECK-NEXT: [[TMP60:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32 3, i32 6, i32 [[TMP60]], i32 7) -// CHECK-NEXT: [[TMP61:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32 5, i32 3, i32 [[TMP61]], i32 6) +// CHECK-NEXT: [[TMP61:%.*]] = call i32 @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32 5, i32 2, i32 [[TMP60]], i32 64, i32 6) // CHECK-NEXT: [[TMP62:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32 0, i32 3, i32 [[TMP62]], i32 2) // CHECK-NEXT: [[TMP63:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32 4, i32 7, i32 [[TMP63]], i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s16.qacc(i32 7, i32 7, i32 4) -// CHECK-NEXT: [[TMP64:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32 7, i32 7, i32 [[TMP64]], i32 4, i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s8.qacc(i32 7, i32 0, i32 7) +// CHECK-NEXT: [[TMP64:%.*]] = call i32 @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32 [[TMP62]], i32 5, i32 7, i32 [[TMP63]], i32 7) // CHECK-NEXT: [[TMP65:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32 5, i32 6, i32 [[TMP65]], i32 15, i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u16.qacc(i32 7, i32 0, i32 10) -// CHECK-NEXT: [[TMP66:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32 7, i32 6, i32 [[TMP66]], i32 1, i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u8.qacc(i32 3, i32 6, i32 5) +// CHECK-NEXT: [[TMP66:%.*]] = call i32 @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32 1, i32 4, i32 3, i32 [[TMP65]], i32 -96) // CHECK-NEXT: [[TMP67:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32 6, i32 1, i32 [[TMP67]], i32 4, i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s16(i32 2, i32 1, i32 3, i32 1) // CHECK-NEXT: [[TMP68:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s16.ld.incp(i32 2, i32 7, i32 [[TMP68]], i32 0, i32 5, i32 0) -// CHECK-NEXT: [[TMP69:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s16.st.incp(i32 7, i32 4, i32 6, i32 [[TMP69]], i32 2, i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s8(i32 5, i32 7, i32 2, i32 4) +// CHECK-NEXT: [[TMP69:%.*]] = call i32 @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32 [[TMP67]], i32 5, i32 0, i32 2, i32 [[TMP68]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc(i32 0, i32 7) // CHECK-NEXT: [[TMP70:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s8.ld.incp(i32 0, i32 6, i32 [[TMP70]], i32 2, i32 7, i32 5) -// CHECK-NEXT: [[TMP71:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s8.st.incp(i32 1, i32 6, i32 5, i32 [[TMP71]], i32 0, i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u16(i32 7, i32 4, i32 0, i32 0) +// CHECK-NEXT: [[TMP71:%.*]] = call i32 @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32 6, i32 6, i32 [[TMP70]], i32 -96, i32 4) // CHECK-NEXT: [[TMP72:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u16.ld.incp(i32 2, i32 0, i32 [[TMP72]], i32 3, i32 1, i32 1) // CHECK-NEXT: [[TMP73:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u16.st.incp(i32 4, i32 3, i32 4, i32 [[TMP73]], i32 1, i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u8(i32 3, i32 4, i32 1, i32 5) -// CHECK-NEXT: [[TMP74:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u8.ld.incp(i32 5, i32 0, i32 [[TMP74]], i32 1, i32 5, i32 1) +// CHECK-NEXT: [[TMP74:%.*]] = call i32 @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32 [[TMP72]], i32 6, i32 5, i32 [[TMP73]], i32 6) // CHECK-NEXT: [[TMP75:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u8.st.incp(i32 2, i32 7, i32 4, i32 [[TMP75]], i32 3, i32 1) -// CHECK-NEXT: [[TMP76:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.max.s16.a(i32 2, i32 [[TMP76]]) +// CHECK-NEXT: [[TMP76:%.*]] = call i32 @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32 3, i32 0, i32 4, i32 [[TMP75]], i32 64) // CHECK-NEXT: [[TMP77:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.max.s32.a(i32 0, i32 [[TMP77]]) // CHECK-NEXT: [[TMP78:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.max.s8.a(i32 7, i32 [[TMP78]]) -// CHECK-NEXT: [[TMP79:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.max.u16.a(i32 4, i32 [[TMP79]]) +// CHECK-NEXT: [[TMP79:%.*]] = call i32 @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32 [[TMP77]], i32 1, i32 0, i32 4, i32 [[TMP78]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc(i32 5, i32 4) // CHECK-NEXT: [[TMP80:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.max.u32.a(i32 4, i32 [[TMP80]]) -// CHECK-NEXT: [[TMP81:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.max.u8.a(i32 3, i32 [[TMP81]]) +// CHECK-NEXT: [[TMP81:%.*]] = call i32 @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32 5, i32 3, i32 [[TMP80]], i32 80, i32 5) // CHECK-NEXT: [[TMP82:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.min.s16.a(i32 0, i32 [[TMP82]]) // CHECK-NEXT: [[TMP83:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.min.s32.a(i32 7, i32 [[TMP83]]) -// CHECK-NEXT: [[TMP84:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.min.s8.a(i32 4, i32 [[TMP84]]) +// CHECK-NEXT: [[TMP84:%.*]] = call i32 @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32 [[TMP82]], i32 4, i32 7, i32 [[TMP83]], i32 4) // CHECK-NEXT: [[TMP85:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.min.u16.a(i32 7, i32 [[TMP85]]) -// CHECK-NEXT: [[TMP86:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.min.u32.a(i32 6, i32 [[TMP86]]) +// CHECK-NEXT: [[TMP86:%.*]] = call i32 @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32 3, i32 3, i32 5, i32 [[TMP85]], i32 -96) // CHECK-NEXT: [[TMP87:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.min.u8.a(i32 1, i32 [[TMP87]]) -// CHECK-NEXT: call void @llvm.riscv.esp.vabs.16(i32 7, i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.vabs.32(i32 0, i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.vabs.8(i32 5, i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s16(i32 0, i32 4, i32 0) // CHECK-NEXT: [[TMP88:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s16.ld.incp(i32 4, i32 2, i32 [[TMP88]], i32 0, i32 7) -// CHECK-NEXT: [[TMP89:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s16.st.incp(i32 5, i32 7, i32 0, i32 [[TMP89]], i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s32(i32 6, i32 5, i32 0) +// CHECK-NEXT: [[TMP89:%.*]] = call i32 @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32 [[TMP87]], i32 6, i32 7, i32 3, i32 [[TMP88]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc(i32 0, i32 1) // CHECK-NEXT: [[TMP90:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s32.ld.incp(i32 5, i32 6, i32 [[TMP90]], i32 0, i32 2) -// CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s32.st.incp(i32 7, i32 7, i32 0, i32 [[TMP91]], i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s8(i32 6, i32 5, i32 5) +// CHECK-NEXT: [[TMP91:%.*]] = call i32 @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32 6, i32 0, i32 [[TMP90]], i32 -32, i32 7) // CHECK-NEXT: [[TMP92:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s8.ld.incp(i32 2, i32 4, i32 [[TMP92]], i32 6, i32 7) // CHECK-NEXT: [[TMP93:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s8.st.incp(i32 4, i32 6, i32 4, i32 [[TMP93]], i32 7) -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u16(i32 0, i32 6, i32 5) -// CHECK-NEXT: [[TMP94:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u16.ld.incp(i32 6, i32 7, i32 [[TMP94]], i32 5, i32 1) +// CHECK-NEXT: [[TMP94:%.*]] = call i32 @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32 [[TMP92]], i32 3, i32 3, i32 [[TMP93]], i32 5) // CHECK-NEXT: [[TMP95:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u16.st.incp(i32 1, i32 3, i32 4, i32 [[TMP95]], i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u32(i32 7, i32 3, i32 0) -// CHECK-NEXT: [[TMP96:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u32.ld.incp(i32 0, i32 4, i32 [[TMP96]], i32 5, i32 5) +// CHECK-NEXT: [[TMP96:%.*]] = call i32 @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32 7, i32 0, i32 4, i32 [[TMP95]], i32 32) // CHECK-NEXT: [[TMP97:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u32.st.incp(i32 1, i32 5, i32 6, i32 [[TMP97]], i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u8(i32 0, i32 1, i32 5) // CHECK-NEXT: [[TMP98:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u8.ld.incp(i32 5, i32 1, i32 [[TMP98]], i32 2, i32 6) -// CHECK-NEXT: [[TMP99:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u8.st.incp(i32 1, i32 7, i32 4, i32 [[TMP99]], i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vclamp.s16(i32 3, i32 12, i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s16(i32 1, i32 2, i32 2) +// CHECK-NEXT: [[TMP99:%.*]] = call i32 @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32 [[TMP97]], i32 1, i32 0, i32 0, i32 [[TMP98]]) // CHECK-NEXT: [[TMP100:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s16.ld.incp(i32 3, i32 0, i32 [[TMP100]], i32 5, i32 1) -// CHECK-NEXT: [[TMP101:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s16.st.incp(i32 0, i32 4, i32 2, i32 [[TMP101]], i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s32(i32 0, i32 2, i32 4) +// CHECK-NEXT: [[TMP101:%.*]] = call i32 @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32 3, i32 6, i32 [[TMP100]], i32 7) // CHECK-NEXT: [[TMP102:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s32.ld.incp(i32 3, i32 5, i32 [[TMP102]], i32 3, i32 6) -// CHECK-NEXT: [[TMP103:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s32.st.incp(i32 6, i32 0, i32 7, i32 [[TMP103]], i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s8(i32 1, i32 0, i32 3) +// CHECK-NEXT: [[TMP103:%.*]] = call i32 @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32 5, i32 3, i32 [[TMP102]], i32 6) // CHECK-NEXT: [[TMP104:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s8.ld.incp(i32 1, i32 6, i32 [[TMP104]], i32 6, i32 6) -// CHECK-NEXT: [[TMP105:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s8.st.incp(i32 2, i32 7, i32 1, i32 [[TMP105]], i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u16(i32 6, i32 6, i32 3) +// CHECK-NEXT: [[TMP105:%.*]] = call i32 @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32 0, i32 3, i32 [[TMP104]], i32 2) // CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u16.ld.incp(i32 5, i32 2, i32 [[TMP106]], i32 2, i32 1) -// CHECK-NEXT: [[TMP107:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u16.st.incp(i32 3, i32 6, i32 2, i32 [[TMP107]], i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u32(i32 2, i32 3, i32 3) +// CHECK-NEXT: [[TMP107:%.*]] = call i32 @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32 4, i32 7, i32 [[TMP106]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s16.qacc(i32 7, i32 7, i32 4) // CHECK-NEXT: [[TMP108:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u32.ld.incp(i32 1, i32 4, i32 [[TMP108]], i32 5, i32 5) -// CHECK-NEXT: [[TMP109:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u32.st.incp(i32 4, i32 2, i32 1, i32 [[TMP109]], i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u8(i32 7, i32 0, i32 4) +// CHECK-NEXT: [[TMP109:%.*]] = call i32 @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32 7, i32 7, i32 [[TMP108]], i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s8.qacc(i32 7, i32 0, i32 7) // CHECK-NEXT: [[TMP110:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u8.ld.incp(i32 1, i32 5, i32 [[TMP110]], i32 4, i32 7) -// CHECK-NEXT: [[TMP111:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u8.st.incp(i32 1, i32 2, i32 5, i32 [[TMP111]], i32 7) -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s16(i32 5, i32 1, i32 7) +// CHECK-NEXT: [[TMP111:%.*]] = call i32 @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32 5, i32 6, i32 [[TMP110]], i32 15, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u16.qacc(i32 7, i32 0, i32 10) // CHECK-NEXT: [[TMP112:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s16.ld.incp(i32 7, i32 6, i32 [[TMP112]], i32 6, i32 4) -// CHECK-NEXT: [[TMP113:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s16.st.incp(i32 7, i32 0, i32 6, i32 [[TMP113]], i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s32(i32 7, i32 4, i32 7) +// CHECK-NEXT: [[TMP113:%.*]] = call i32 @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32 7, i32 6, i32 [[TMP112]], i32 1, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u8.qacc(i32 3, i32 6, i32 5) // CHECK-NEXT: [[TMP114:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s32.ld.incp(i32 0, i32 1, i32 [[TMP114]], i32 5, i32 4) -// CHECK-NEXT: [[TMP115:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s32.st.incp(i32 1, i32 6, i32 7, i32 [[TMP115]], i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s8(i32 5, i32 6, i32 4) +// CHECK-NEXT: [[TMP115:%.*]] = call i32 @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32 6, i32 1, i32 [[TMP114]], i32 4, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s16(i32 2, i32 1, i32 3, i32 1) // CHECK-NEXT: [[TMP116:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s8.ld.incp(i32 1, i32 6, i32 [[TMP116]], i32 6, i32 5) -// CHECK-NEXT: [[TMP117:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s8.st.incp(i32 7, i32 7, i32 6, i32 [[TMP117]], i32 7) -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u16(i32 7, i32 1, i32 1) +// CHECK-NEXT: [[TMP117:%.*]] = call i32 @llvm.riscv.esp.cmul.s16.ld.incp(i32 2, i32 7, i32 [[TMP116]], i32 0, i32 5, i32 0) // CHECK-NEXT: [[TMP118:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u16.ld.incp(i32 6, i32 0, i32 [[TMP118]], i32 3, i32 0) -// CHECK-NEXT: [[TMP119:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u16.st.incp(i32 0, i32 7, i32 5, i32 [[TMP119]], i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u32(i32 6, i32 5, i32 0) +// CHECK-NEXT: [[TMP119:%.*]] = call i32 @llvm.riscv.esp.cmul.s16.st.incp(i32 7, i32 4, i32 6, i32 [[TMP118]], i32 2, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s8(i32 5, i32 7, i32 2, i32 4) // CHECK-NEXT: [[TMP120:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u32.ld.incp(i32 3, i32 7, i32 [[TMP120]], i32 1, i32 4) -// CHECK-NEXT: [[TMP121:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u32.st.incp(i32 1, i32 0, i32 2, i32 [[TMP121]], i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u8(i32 2, i32 0, i32 7) +// CHECK-NEXT: [[TMP121:%.*]] = call i32 @llvm.riscv.esp.cmul.s8.ld.incp(i32 0, i32 6, i32 [[TMP120]], i32 2, i32 7, i32 5) // CHECK-NEXT: [[TMP122:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u8.ld.incp(i32 4, i32 2, i32 [[TMP122]], i32 4, i32 3) -// CHECK-NEXT: [[TMP123:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u8.st.incp(i32 1, i32 7, i32 4, i32 [[TMP123]], i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16(i32 7, i32 5, i32 3) +// CHECK-NEXT: [[TMP123:%.*]] = call i32 @llvm.riscv.esp.cmul.s8.st.incp(i32 1, i32 6, i32 5, i32 [[TMP122]], i32 0, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u16(i32 7, i32 4, i32 0, i32 0) // CHECK-NEXT: [[TMP124:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16.ld.incp(i32 5, i32 4, i32 [[TMP124]], i32 1, i32 6) -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16.s8xs8(i32 7, i32 6, i32 4, i32 4) -// CHECK-NEXT: [[TMP125:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16.st.incp(i32 0, i32 1, i32 5, i32 [[TMP125]], i32 7) -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s32.s16xs16(i32 5, i32 3, i32 1, i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s8(i32 1, i32 6, i32 0) +// CHECK-NEXT: [[TMP125:%.*]] = call i32 @llvm.riscv.esp.cmul.u16.ld.incp(i32 2, i32 0, i32 [[TMP124]], i32 3, i32 1, i32 1) // CHECK-NEXT: [[TMP126:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s8.ld.incp(i32 2, i32 1, i32 [[TMP126]], i32 6, i32 5) -// CHECK-NEXT: [[TMP127:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s8.st.incp(i32 5, i32 2, i32 1, i32 [[TMP127]], i32 7) -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u16(i32 7, i32 3, i32 6) +// CHECK-NEXT: [[TMP127:%.*]] = call i32 @llvm.riscv.esp.cmul.u16.st.incp(i32 4, i32 3, i32 4, i32 [[TMP126]], i32 1, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u8(i32 3, i32 4, i32 1, i32 5) // CHECK-NEXT: [[TMP128:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u16.ld.incp(i32 3, i32 3, i32 [[TMP128]], i32 2, i32 0) -// CHECK-NEXT: [[TMP129:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u16.st.incp(i32 6, i32 5, i32 0, i32 [[TMP129]], i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u8(i32 2, i32 2, i32 7) +// CHECK-NEXT: [[TMP129:%.*]] = call i32 @llvm.riscv.esp.cmul.u8.ld.incp(i32 5, i32 0, i32 [[TMP128]], i32 1, i32 5, i32 1) // CHECK-NEXT: [[TMP130:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u8.ld.incp(i32 1, i32 1, i32 [[TMP130]], i32 6, i32 7) -// CHECK-NEXT: [[TMP131:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u8.st.incp(i32 5, i32 0, i32 6, i32 [[TMP131]], i32 2) +// CHECK-NEXT: [[TMP131:%.*]] = call i32 @llvm.riscv.esp.cmul.u8.st.incp(i32 2, i32 7, i32 4, i32 [[TMP130]], i32 3, i32 1) // CHECK-NEXT: [[TMP132:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vprelu.s16(i32 [[TMP132]], i32 0, i32 7, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.max.s16.a(i32 2, i32 [[TMP132]]) // CHECK-NEXT: [[TMP133:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vprelu.s8(i32 [[TMP133]], i32 6, i32 6, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.max.s32.a(i32 0, i32 [[TMP133]]) // CHECK-NEXT: [[TMP134:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.s8.a(i32 7, i32 [[TMP134]]) // CHECK-NEXT: [[TMP135:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vrelu.s16(i32 [[TMP134]], i32 [[TMP135]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.max.u16.a(i32 4, i32 [[TMP135]]) // CHECK-NEXT: [[TMP136:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.u32.a(i32 4, i32 [[TMP136]]) // CHECK-NEXT: [[TMP137:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vrelu.s8(i32 [[TMP136]], i32 [[TMP137]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.max.u8.a(i32 3, i32 [[TMP137]]) // CHECK-NEXT: [[TMP138:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.s16(i32 [[TMP138]], i32 5, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.min.s16.a(i32 0, i32 [[TMP138]]) // CHECK-NEXT: [[TMP139:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.s8(i32 [[TMP139]], i32 6, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.min.s32.a(i32 7, i32 [[TMP139]]) // CHECK-NEXT: [[TMP140:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.u16(i32 [[TMP140]], i32 7, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.min.s8.a(i32 4, i32 [[TMP140]]) // CHECK-NEXT: [[TMP141:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.u8(i32 [[TMP141]], i32 2, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.min.u16.a(i32 7, i32 [[TMP141]]) // CHECK-NEXT: [[TMP142:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.u32.a(i32 6, i32 [[TMP142]]) // CHECK-NEXT: [[TMP143:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s16(i32 [[TMP142]], i32 [[TMP143]], i32 7, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.min.u8.a(i32 1, i32 [[TMP143]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vabs.16(i32 7, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vabs.32(i32 0, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vabs.8(i32 5, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s16(i32 0, i32 4, i32 0) // CHECK-NEXT: [[TMP144:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP145:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s32(i32 [[TMP144]], i32 [[TMP145]], i32 2, i32 5) +// CHECK-NEXT: [[TMP145:%.*]] = call i32 @llvm.riscv.esp.vadd.s16.ld.incp(i32 4, i32 2, i32 [[TMP144]], i32 0, i32 7) // CHECK-NEXT: [[TMP146:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP147:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s8(i32 [[TMP146]], i32 [[TMP147]], i32 2, i32 5) +// CHECK-NEXT: [[TMP147:%.*]] = call i32 @llvm.riscv.esp.vadd.s16.st.incp(i32 5, i32 7, i32 0, i32 [[TMP146]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s32(i32 6, i32 5, i32 0) // CHECK-NEXT: [[TMP148:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP149:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u16(i32 [[TMP148]], i32 [[TMP149]], i32 0, i32 2) +// CHECK-NEXT: [[TMP149:%.*]] = call i32 @llvm.riscv.esp.vadd.s32.ld.incp(i32 5, i32 6, i32 [[TMP148]], i32 0, i32 2) // CHECK-NEXT: [[TMP150:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP151:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u32(i32 [[TMP150]], i32 [[TMP151]], i32 4, i32 2) +// CHECK-NEXT: [[TMP151:%.*]] = call i32 @llvm.riscv.esp.vadd.s32.st.incp(i32 7, i32 7, i32 0, i32 [[TMP150]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s8(i32 6, i32 5, i32 5) // CHECK-NEXT: [[TMP152:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP153:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u8(i32 [[TMP152]], i32 [[TMP153]], i32 0, i32 2) +// CHECK-NEXT: [[TMP153:%.*]] = call i32 @llvm.riscv.esp.vadd.s8.ld.incp(i32 2, i32 4, i32 [[TMP152]], i32 6, i32 7) // CHECK-NEXT: [[TMP154:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.s16(i32 [[TMP154]], i32 3, i32 6) -// CHECK-NEXT: [[TMP155:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.s8(i32 [[TMP155]], i32 5, i32 5) +// CHECK-NEXT: [[TMP155:%.*]] = call i32 @llvm.riscv.esp.vadd.s8.st.incp(i32 4, i32 6, i32 4, i32 [[TMP154]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u16(i32 0, i32 6, i32 5) // CHECK-NEXT: [[TMP156:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.u16(i32 [[TMP156]], i32 6, i32 3) -// CHECK-NEXT: [[TMP157:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.u8(i32 [[TMP157]], i32 0, i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s16(i32 0, i32 5, i32 3) +// CHECK-NEXT: [[TMP157:%.*]] = call i32 @llvm.riscv.esp.vadd.u16.ld.incp(i32 6, i32 7, i32 [[TMP156]], i32 5, i32 1) // CHECK-NEXT: [[TMP158:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s16.ld.incp(i32 0, i32 1, i32 [[TMP158]], i32 5, i32 3) -// CHECK-NEXT: [[TMP159:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s16.st.incp(i32 5, i32 7, i32 7, i32 [[TMP159]], i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s32(i32 3, i32 0, i32 3) +// CHECK-NEXT: [[TMP159:%.*]] = call i32 @llvm.riscv.esp.vadd.u16.st.incp(i32 1, i32 3, i32 4, i32 [[TMP158]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u32(i32 7, i32 3, i32 0) // CHECK-NEXT: [[TMP160:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s32.ld.incp(i32 1, i32 2, i32 [[TMP160]], i32 0, i32 2) -// CHECK-NEXT: [[TMP161:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s32.st.incp(i32 4, i32 0, i32 0, i32 [[TMP161]], i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s8(i32 4, i32 1, i32 3) +// CHECK-NEXT: [[TMP161:%.*]] = call i32 @llvm.riscv.esp.vadd.u32.ld.incp(i32 0, i32 4, i32 [[TMP160]], i32 5, i32 5) // CHECK-NEXT: [[TMP162:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s8.ld.incp(i32 3, i32 7, i32 [[TMP162]], i32 3, i32 5) -// CHECK-NEXT: [[TMP163:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s8.st.incp(i32 5, i32 7, i32 3, i32 [[TMP163]], i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u16(i32 4, i32 6, i32 5) +// CHECK-NEXT: [[TMP163:%.*]] = call i32 @llvm.riscv.esp.vadd.u32.st.incp(i32 1, i32 5, i32 6, i32 [[TMP162]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u8(i32 0, i32 1, i32 5) // CHECK-NEXT: [[TMP164:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u16.ld.incp(i32 4, i32 7, i32 [[TMP164]], i32 0, i32 5) -// CHECK-NEXT: [[TMP165:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u16.st.incp(i32 2, i32 2, i32 7, i32 [[TMP165]], i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u32(i32 0, i32 1, i32 2) +// CHECK-NEXT: [[TMP165:%.*]] = call i32 @llvm.riscv.esp.vadd.u8.ld.incp(i32 5, i32 1, i32 [[TMP164]], i32 2, i32 6) // CHECK-NEXT: [[TMP166:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u32.ld.incp(i32 5, i32 6, i32 [[TMP166]], i32 3, i32 5) -// CHECK-NEXT: [[TMP167:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u32.st.incp(i32 0, i32 1, i32 4, i32 [[TMP167]], i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u8(i32 4, i32 2, i32 7) +// CHECK-NEXT: [[TMP167:%.*]] = call i32 @llvm.riscv.esp.vadd.u8.st.incp(i32 1, i32 7, i32 4, i32 [[TMP166]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vclamp.s16(i32 3, i32 12, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s16(i32 1, i32 2, i32 2) // CHECK-NEXT: [[TMP168:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u8.ld.incp(i32 2, i32 7, i32 [[TMP168]], i32 3, i32 4) -// CHECK-NEXT: [[TMP169:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u8.st.incp(i32 6, i32 4, i32 7, i32 [[TMP169]], i32 7) +// CHECK-NEXT: [[TMP169:%.*]] = call i32 @llvm.riscv.esp.vmax.s16.ld.incp(i32 3, i32 0, i32 [[TMP168]], i32 5, i32 1) // CHECK-NEXT: [[TMP170:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP171:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP171:%.*]] = call i32 @llvm.riscv.esp.vmax.s16.st.incp(i32 0, i32 4, i32 2, i32 [[TMP170]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s32(i32 0, i32 2, i32 4) // CHECK-NEXT: [[TMP172:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.addx2(i32 [[TMP170]], i32 [[TMP171]], i32 [[TMP172]]) -// CHECK-NEXT: [[TMP173:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP173:%.*]] = call i32 @llvm.riscv.esp.vmax.s32.ld.incp(i32 3, i32 5, i32 [[TMP172]], i32 3, i32 6) // CHECK-NEXT: [[TMP174:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP175:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.addx4(i32 [[TMP173]], i32 [[TMP174]], i32 [[TMP175]]) +// CHECK-NEXT: [[TMP175:%.*]] = call i32 @llvm.riscv.esp.vmax.s32.st.incp(i32 6, i32 0, i32 7, i32 [[TMP174]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s8(i32 1, i32 0, i32 3) // CHECK-NEXT: [[TMP176:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP177:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP177:%.*]] = call i32 @llvm.riscv.esp.vmax.s8.ld.incp(i32 1, i32 6, i32 [[TMP176]], i32 6, i32 6) // CHECK-NEXT: [[TMP178:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.sat(i32 [[TMP176]], i32 [[TMP177]], i32 [[TMP178]]) -// CHECK-NEXT: [[TMP179:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP179:%.*]] = call i32 @llvm.riscv.esp.vmax.s8.st.incp(i32 2, i32 7, i32 1, i32 [[TMP178]], i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u16(i32 6, i32 6, i32 3) // CHECK-NEXT: [[TMP180:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP181:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.subx2(i32 [[TMP179]], i32 [[TMP180]], i32 [[TMP181]]) +// CHECK-NEXT: [[TMP181:%.*]] = call i32 @llvm.riscv.esp.vmax.u16.ld.incp(i32 5, i32 2, i32 [[TMP180]], i32 2, i32 1) // CHECK-NEXT: [[TMP182:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP183:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP183:%.*]] = call i32 @llvm.riscv.esp.vmax.u16.st.incp(i32 3, i32 6, i32 2, i32 [[TMP182]], i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u32(i32 2, i32 3, i32 3) // CHECK-NEXT: [[TMP184:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.subx4(i32 [[TMP182]], i32 [[TMP183]], i32 [[TMP184]]) -// CHECK-NEXT: call void @llvm.riscv.esp.andq(i32 0, i32 1, i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.notq(i32 0, i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.orq(i32 0, i32 6, i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.xorq(i32 7, i32 4, i32 7) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s16(i32 6, i32 6, i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s32(i32 6, i32 2, i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s8(i32 7, i32 6, i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u16(i32 0, i32 2, i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u32(i32 6, i32 4, i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u8(i32 6, i32 4, i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s16(i32 5, i32 3, i32 6) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s32(i32 2, i32 4, i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s8(i32 7, i32 7, i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u16(i32 2, i32 7, i32 7) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u32(i32 6, i32 4, i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u8(i32 0, i32 4, i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s16(i32 4, i32 6, i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s32(i32 2, i32 4, i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s8(i32 3, i32 0, i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u16(i32 2, i32 4, i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u32(i32 2, i32 0, i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u8(i32 0, i32 2, i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.mov.s16.qacc(i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.mov.s8.qacc(i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.mov.u16.qacc(i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.mov.u8.qacc(i32 5) -// CHECK-NEXT: [[TMP185:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movi.16.a(i32 2, i32 8, i32 [[TMP185]]) +// CHECK-NEXT: [[TMP185:%.*]] = call i32 @llvm.riscv.esp.vmax.u32.ld.incp(i32 1, i32 4, i32 [[TMP184]], i32 5, i32 5) // CHECK-NEXT: [[TMP186:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movi.16.q(i32 [[TMP186]], i32 12, i32 1) -// CHECK-NEXT: [[TMP187:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movi.32.a(i32 4, i32 2, i32 [[TMP187]]) +// CHECK-NEXT: [[TMP187:%.*]] = call i32 @llvm.riscv.esp.vmax.u32.st.incp(i32 4, i32 2, i32 1, i32 [[TMP186]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u8(i32 7, i32 0, i32 4) // CHECK-NEXT: [[TMP188:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movi.32.q(i32 [[TMP188]], i32 1, i32 0) -// CHECK-NEXT: [[TMP189:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movi.8.a(i32 0, i32 13, i32 [[TMP189]]) +// CHECK-NEXT: [[TMP189:%.*]] = call i32 @llvm.riscv.esp.vmax.u8.ld.incp(i32 1, i32 5, i32 [[TMP188]], i32 4, i32 7) // CHECK-NEXT: [[TMP190:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movi.8.q(i32 [[TMP190]], i32 14, i32 3) -// CHECK-NEXT: [[TMP191:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.cfg(i32 [[TMP191]]) +// CHECK-NEXT: [[TMP191:%.*]] = call i32 @llvm.riscv.esp.vmax.u8.st.incp(i32 1, i32 2, i32 5, i32 [[TMP190]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s16(i32 5, i32 1, i32 7) // CHECK-NEXT: [[TMP192:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.fft.bit.width(i32 [[TMP192]]) -// CHECK-NEXT: [[TMP193:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP193:%.*]] = call i32 @llvm.riscv.esp.vmin.s16.ld.incp(i32 7, i32 6, i32 [[TMP192]], i32 6, i32 4) // CHECK-NEXT: [[TMP194:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.perf(i32 [[TMP193]], i32 [[TMP194]]) -// CHECK-NEXT: [[TMP195:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.sar(i32 [[TMP195]]) +// CHECK-NEXT: [[TMP195:%.*]] = call i32 @llvm.riscv.esp.vmin.s16.st.incp(i32 7, i32 0, i32 6, i32 [[TMP194]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s32(i32 7, i32 4, i32 7) // CHECK-NEXT: [[TMP196:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.sar.bytes(i32 [[TMP196]]) -// CHECK-NEXT: [[TMP197:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.xacc.h(i32 [[TMP197]]) +// CHECK-NEXT: [[TMP197:%.*]] = call i32 @llvm.riscv.esp.vmin.s32.ld.incp(i32 0, i32 1, i32 [[TMP196]], i32 5, i32 4) // CHECK-NEXT: [[TMP198:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.xacc.l(i32 [[TMP198]]) -// CHECK-NEXT: [[TMP199:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.cfg(i32 [[TMP199]]) +// CHECK-NEXT: [[TMP199:%.*]] = call i32 @llvm.riscv.esp.vmin.s32.st.incp(i32 1, i32 6, i32 7, i32 [[TMP198]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s8(i32 5, i32 6, i32 4) // CHECK-NEXT: [[TMP200:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.fft.bit.width(i32 [[TMP200]]) -// CHECK-NEXT: [[TMP201:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.perf(i32 [[TMP201]]) +// CHECK-NEXT: [[TMP201:%.*]] = call i32 @llvm.riscv.esp.vmin.s8.ld.incp(i32 1, i32 6, i32 [[TMP200]], i32 6, i32 5) // CHECK-NEXT: [[TMP202:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.sar(i32 [[TMP202]]) -// CHECK-NEXT: [[TMP203:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.sar.bytes(i32 [[TMP203]]) +// CHECK-NEXT: [[TMP203:%.*]] = call i32 @llvm.riscv.esp.vmin.s8.st.incp(i32 7, i32 7, i32 6, i32 [[TMP202]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u16(i32 7, i32 1, i32 1) // CHECK-NEXT: [[TMP204:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.xacc.h(i32 [[TMP204]]) -// CHECK-NEXT: [[TMP205:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.xacc.l(i32 [[TMP205]]) -// CHECK-NEXT: call void @llvm.riscv.esp.vext.s16(i32 0, i32 4, i32 6) -// CHECK-NEXT: call void @llvm.riscv.esp.vext.s8(i32 0, i32 7, i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vext.u16(i32 1, i32 0, i32 6) -// CHECK-NEXT: call void @llvm.riscv.esp.vext.u8(i32 4, i32 1, i32 6) -// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.16(i32 3, i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.32(i32 6, i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.8(i32 3, i32 5) -// CHECK-NEXT: call void @llvm.riscv.esp.vunzipt.16(i32 1, i32 5, i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.vunzipt.8(i32 7, i32 5, i32 7) -// CHECK-NEXT: call void @llvm.riscv.esp.vzip.16(i32 2, i32 2) -// CHECK-NEXT: call void @llvm.riscv.esp.vzip.32(i32 0, i32 7) -// CHECK-NEXT: call void @llvm.riscv.esp.vzip.8(i32 6, i32 4) -// CHECK-NEXT: call void @llvm.riscv.esp.vzipt.16(i32 6, i32 3, i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.vzipt.8(i32 7, i32 0, i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.zero.q(i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.zero.qacc() -// CHECK-NEXT: call void @llvm.riscv.esp.zero.xacc() +// CHECK-NEXT: [[TMP205:%.*]] = call i32 @llvm.riscv.esp.vmin.u16.ld.incp(i32 6, i32 0, i32 [[TMP204]], i32 3, i32 0) // CHECK-NEXT: [[TMP206:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.ld.incp(i32 1, i32 1, i32 3, i32 [[TMP206]], i32 0, i32 6, i32 0, i32 3) -// CHECK-NEXT: [[TMP207:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32 3, i32 0, i32 1, i32 [[TMP207]], i32 0, i32 3, i32 3, i32 1) +// CHECK-NEXT: [[TMP207:%.*]] = call i32 @llvm.riscv.esp.vmin.u16.st.incp(i32 0, i32 7, i32 5, i32 [[TMP206]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u32(i32 6, i32 5, i32 0) // CHECK-NEXT: [[TMP208:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32 2, i32 3, i32 7, i32 [[TMP208]], i32 0, i32 1, i32 1, i32 4) -// CHECK-NEXT: [[TMP209:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP209:%.*]] = call i32 @llvm.riscv.esp.vmin.u32.ld.incp(i32 3, i32 7, i32 [[TMP208]], i32 1, i32 4) // CHECK-NEXT: [[TMP210:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.st.incp(i32 4, i32 4, i32 0, i32 5, i32 [[TMP209]], i32 [[TMP210]], i32 1, i32 1) -// CHECK-NEXT: [[TMP211:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.fft.bitrev(i32 [[TMP211]], i32 6) +// CHECK-NEXT: [[TMP211:%.*]] = call i32 @llvm.riscv.esp.vmin.u32.st.incp(i32 1, i32 0, i32 2, i32 [[TMP210]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u8(i32 2, i32 0, i32 7) // CHECK-NEXT: [[TMP212:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP213:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32 [[TMP212]], i32 7, i32 0, i32 [[TMP213]], i32 2, i32 1, i32 2) +// CHECK-NEXT: [[TMP213:%.*]] = call i32 @llvm.riscv.esp.vmin.u8.ld.incp(i32 4, i32 2, i32 [[TMP212]], i32 4, i32 3) // CHECK-NEXT: [[TMP214:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP215:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.fft.cmul.s16.st.xp(i32 [[TMP214]], i32 6, i32 0, i32 7, i32 [[TMP215]], i32 0, i32 1, i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.fft.r2bf.s16(i32 2, i32 5, i32 0, i32 7, i32 5) +// CHECK-NEXT: [[TMP215:%.*]] = call i32 @llvm.riscv.esp.vmin.u8.st.incp(i32 1, i32 7, i32 4, i32 [[TMP214]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16(i32 7, i32 5, i32 3) // CHECK-NEXT: [[TMP216:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32 1, i32 7, i32 [[TMP216]], i32 1, i32 6) -// CHECK-NEXT: [[TMP217:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.fft.vst.r32.decp(i32 2, i32 [[TMP217]], i32 1) +// CHECK-NEXT: [[TMP217:%.*]] = call i32 @llvm.riscv.esp.vmul.s16.ld.incp(i32 5, i32 4, i32 [[TMP216]], i32 1, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16.s8xs8(i32 7, i32 6, i32 4, i32 4) // CHECK-NEXT: [[TMP218:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ld.128.usar.ip(i32 [[TMP218]], i32 -464, i32 7) -// CHECK-NEXT: [[TMP219:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP219:%.*]] = call i32 @llvm.riscv.esp.vmul.s16.st.incp(i32 0, i32 1, i32 5, i32 [[TMP218]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s32.s16xs16(i32 5, i32 3, i32 1, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s8(i32 1, i32 6, i32 0) // CHECK-NEXT: [[TMP220:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ld.128.usar.xp(i32 [[TMP219]], i32 [[TMP220]], i32 0) -// CHECK-NEXT: [[TMP221:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ld.xacc.ip(i32 [[TMP221]], i32 -224) +// CHECK-NEXT: [[TMP221:%.*]] = call i32 @llvm.riscv.esp.vmul.s8.ld.incp(i32 2, i32 1, i32 [[TMP220]], i32 6, i32 5) // CHECK-NEXT: [[TMP222:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s16.128.ip(i32 [[TMP222]], i32 288) -// CHECK-NEXT: [[TMP223:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP223:%.*]] = call i32 @llvm.riscv.esp.vmul.s8.st.incp(i32 5, i32 2, i32 1, i32 [[TMP222]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u16(i32 7, i32 3, i32 6) // CHECK-NEXT: [[TMP224:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s16.128.xp(i32 [[TMP223]], i32 [[TMP224]]) -// CHECK-NEXT: [[TMP225:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s8.128.ip(i32 [[TMP225]], i32 -1408) +// CHECK-NEXT: [[TMP225:%.*]] = call i32 @llvm.riscv.esp.vmul.u16.ld.incp(i32 3, i32 3, i32 [[TMP224]], i32 2, i32 0) // CHECK-NEXT: [[TMP226:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP227:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s8.128.xp(i32 [[TMP226]], i32 [[TMP227]]) +// CHECK-NEXT: [[TMP227:%.*]] = call i32 @llvm.riscv.esp.vmul.u16.st.incp(i32 6, i32 5, i32 0, i32 [[TMP226]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u8(i32 2, i32 2, i32 7) // CHECK-NEXT: [[TMP228:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u16.128.ip(i32 [[TMP228]], i32 -1440) -// CHECK-NEXT: [[TMP229:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP229:%.*]] = call i32 @llvm.riscv.esp.vmul.u8.ld.incp(i32 1, i32 1, i32 [[TMP228]], i32 6, i32 7) // CHECK-NEXT: [[TMP230:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u16.128.xp(i32 [[TMP229]], i32 [[TMP230]]) -// CHECK-NEXT: [[TMP231:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u8.128.ip(i32 [[TMP231]], i32 -816) +// CHECK-NEXT: [[TMP231:%.*]] = call i32 @llvm.riscv.esp.vmul.u8.st.incp(i32 5, i32 0, i32 6, i32 [[TMP230]], i32 2) // CHECK-NEXT: [[TMP232:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vprelu.s16(i32 [[TMP232]], i32 0, i32 7, i32 3) // CHECK-NEXT: [[TMP233:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u8.128.xp(i32 [[TMP232]], i32 [[TMP233]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vprelu.s8(i32 [[TMP233]], i32 6, i32 6, i32 6) // CHECK-NEXT: [[TMP234:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.16.ip(i32 [[TMP234]], i32 380, i32 2) // CHECK-NEXT: [[TMP235:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vrelu.s16(i32 [[TMP234]], i32 [[TMP235]], i32 3) // CHECK-NEXT: [[TMP236:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.16.xp(i32 [[TMP235]], i32 [[TMP236]], i32 3) // CHECK-NEXT: [[TMP237:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.32.ip(i32 [[TMP237]], i32 -292, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vrelu.s8(i32 [[TMP236]], i32 [[TMP237]], i32 7) // CHECK-NEXT: [[TMP238:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.s16(i32 [[TMP238]], i32 5, i32 4) // CHECK-NEXT: [[TMP239:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.32.xp(i32 [[TMP238]], i32 [[TMP239]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.s8(i32 [[TMP239]], i32 6, i32 6) // CHECK-NEXT: [[TMP240:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.8.ip(i32 [[TMP240]], i32 -416, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.u16(i32 [[TMP240]], i32 7, i32 2) // CHECK-NEXT: [[TMP241:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.u8(i32 [[TMP241]], i32 2, i32 0) // CHECK-NEXT: [[TMP242:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.8.xp(i32 [[TMP241]], i32 [[TMP242]], i32 7) // CHECK-NEXT: [[TMP243:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s16.ip(i32 [[TMP243]], i32 -80, i32 0, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s16(i32 [[TMP242]], i32 [[TMP243]], i32 7, i32 5) // CHECK-NEXT: [[TMP244:%.*]] = load i32, ptr [[DATA]], align 4 // CHECK-NEXT: [[TMP245:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s16.xp(i32 [[TMP244]], i32 [[TMP245]], i32 2, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s32(i32 [[TMP244]], i32 [[TMP245]], i32 2, i32 5) // CHECK-NEXT: [[TMP246:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s8.ip(i32 [[TMP246]], i32 0, i32 2, i32 7) // CHECK-NEXT: [[TMP247:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s8(i32 [[TMP246]], i32 [[TMP247]], i32 2, i32 5) // CHECK-NEXT: [[TMP248:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s8.xp(i32 [[TMP247]], i32 [[TMP248]], i32 7, i32 5) // CHECK-NEXT: [[TMP249:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u16.ip(i32 [[TMP249]], i32 32, i32 0, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u16(i32 [[TMP248]], i32 [[TMP249]], i32 0, i32 2) // CHECK-NEXT: [[TMP250:%.*]] = load i32, ptr [[DATA]], align 4 // CHECK-NEXT: [[TMP251:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u16.xp(i32 [[TMP250]], i32 [[TMP251]], i32 7, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u32(i32 [[TMP250]], i32 [[TMP251]], i32 4, i32 2) // CHECK-NEXT: [[TMP252:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u8.ip(i32 [[TMP252]], i32 -16, i32 3, i32 1) // CHECK-NEXT: [[TMP253:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u8(i32 [[TMP252]], i32 [[TMP253]], i32 0, i32 2) // CHECK-NEXT: [[TMP254:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u8.xp(i32 [[TMP253]], i32 [[TMP254]], i32 5, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.s16(i32 [[TMP254]], i32 3, i32 6) // CHECK-NEXT: [[TMP255:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vldhbc.16.incp(i32 [[TMP255]], i32 2, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.s8(i32 [[TMP255]], i32 5, i32 5) // CHECK-NEXT: [[TMP256:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32 [[TMP256]], i32 -240) +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.u16(i32 [[TMP256]], i32 6, i32 3) // CHECK-NEXT: [[TMP257:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32 [[TMP257]], i32 -32) +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.u8(i32 [[TMP257]], i32 0, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s16(i32 0, i32 5, i32 3) // CHECK-NEXT: [[TMP258:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32 [[TMP258]], i32 -64) -// CHECK-NEXT: [[TMP259:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32 [[TMP259]], i32 -80) +// CHECK-NEXT: [[TMP259:%.*]] = call i32 @llvm.riscv.esp.vsub.s16.ld.incp(i32 0, i32 1, i32 [[TMP258]], i32 5, i32 3) // CHECK-NEXT: [[TMP260:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ld.ua.state.ip(i32 [[TMP260]], i32 1504) -// CHECK-NEXT: [[TMP261:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.ldxq.32(i32 [[TMP261]], i32 6, i32 1, i32 7, i32 1) +// CHECK-NEXT: [[TMP261:%.*]] = call i32 @llvm.riscv.esp.vsub.s16.st.incp(i32 5, i32 7, i32 7, i32 [[TMP260]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s32(i32 3, i32 0, i32 3) // CHECK-NEXT: [[TMP262:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.h.h.128.ip(i32 [[TMP262]], i32 -480) -// CHECK-NEXT: [[TMP263:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.h.l.128.ip(i32 [[TMP263]], i32 -1712) +// CHECK-NEXT: [[TMP263:%.*]] = call i32 @llvm.riscv.esp.vsub.s32.ld.incp(i32 1, i32 2, i32 [[TMP262]], i32 0, i32 2) // CHECK-NEXT: [[TMP264:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.l.h.128.ip(i32 [[TMP264]], i32 960) -// CHECK-NEXT: [[TMP265:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.l.l.128.ip(i32 [[TMP265]], i32 1920) +// CHECK-NEXT: [[TMP265:%.*]] = call i32 @llvm.riscv.esp.vsub.s32.st.incp(i32 4, i32 0, i32 0, i32 [[TMP264]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s8(i32 4, i32 1, i32 3) // CHECK-NEXT: [[TMP266:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.st.ua.state.ip(i32 [[TMP266]], i32 -1360) -// CHECK-NEXT: [[TMP267:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.stxq.32(i32 [[TMP267]], i32 6, i32 2, i32 3, i32 0) +// CHECK-NEXT: [[TMP267:%.*]] = call i32 @llvm.riscv.esp.vsub.s8.ld.incp(i32 3, i32 7, i32 [[TMP266]], i32 3, i32 5) // CHECK-NEXT: [[TMP268:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vld.128.ip(i32 [[TMP268]], i32 -1136, i32 0) -// CHECK-NEXT: [[TMP269:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP269:%.*]] = call i32 @llvm.riscv.esp.vsub.s8.st.incp(i32 5, i32 7, i32 3, i32 [[TMP268]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u16(i32 4, i32 6, i32 5) // CHECK-NEXT: [[TMP270:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vld.128.xp(i32 [[TMP269]], i32 [[TMP270]], i32 5) -// CHECK-NEXT: [[TMP271:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vld.h.64.ip(i32 [[TMP271]], i32 1008, i32 4) +// CHECK-NEXT: [[TMP271:%.*]] = call i32 @llvm.riscv.esp.vsub.u16.ld.incp(i32 4, i32 7, i32 [[TMP270]], i32 0, i32 5) // CHECK-NEXT: [[TMP272:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP273:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vld.h.64.xp(i32 [[TMP272]], i32 [[TMP273]], i32 2) +// CHECK-NEXT: [[TMP273:%.*]] = call i32 @llvm.riscv.esp.vsub.u16.st.incp(i32 2, i32 2, i32 7, i32 [[TMP272]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u32(i32 0, i32 1, i32 2) // CHECK-NEXT: [[TMP274:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vld.l.64.ip(i32 [[TMP274]], i32 -304, i32 6) -// CHECK-NEXT: [[TMP275:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP275:%.*]] = call i32 @llvm.riscv.esp.vsub.u32.ld.incp(i32 5, i32 6, i32 [[TMP274]], i32 3, i32 5) // CHECK-NEXT: [[TMP276:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vld.l.64.xp(i32 [[TMP275]], i32 [[TMP276]], i32 6) -// CHECK-NEXT: [[TMP277:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vst.128.ip(i32 0, i32 [[TMP277]], i32 -1216) +// CHECK-NEXT: [[TMP277:%.*]] = call i32 @llvm.riscv.esp.vsub.u32.st.incp(i32 0, i32 1, i32 4, i32 [[TMP276]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u8(i32 4, i32 2, i32 7) // CHECK-NEXT: [[TMP278:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: [[TMP279:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vst.128.xp(i32 [[TMP278]], i32 6, i32 [[TMP279]]) +// CHECK-NEXT: [[TMP279:%.*]] = call i32 @llvm.riscv.esp.vsub.u8.ld.incp(i32 2, i32 7, i32 [[TMP278]], i32 3, i32 4) // CHECK-NEXT: [[TMP280:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vst.h.64.ip(i32 1, i32 [[TMP280]], i32 -456) -// CHECK-NEXT: [[TMP281:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP281:%.*]] = call i32 @llvm.riscv.esp.vsub.u8.st.incp(i32 6, i32 4, i32 7, i32 [[TMP280]], i32 7) // CHECK-NEXT: [[TMP282:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vst.h.64.xp(i32 [[TMP281]], i32 2, i32 [[TMP282]]) // CHECK-NEXT: [[TMP283:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vst.l.64.ip(i32 6, i32 [[TMP283]], i32 664) // CHECK-NEXT: [[TMP284:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.addx2(i32 [[TMP282]], i32 [[TMP283]], i32 [[TMP284]]) // CHECK-NEXT: [[TMP285:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.vst.l.64.xp(i32 [[TMP284]], i32 4, i32 [[TMP285]]) -// CHECK-NEXT: call void @llvm.riscv.esp.slci.2q(i32 2, i32 0, i32 14) // CHECK-NEXT: [[TMP286:%.*]] = load i32, ptr [[DATA]], align 4 // CHECK-NEXT: [[TMP287:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.slcxxp.2q(i32 [[TMP286]], i32 [[TMP287]], i32 0, i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.src.q(i32 7, i32 3, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.addx4(i32 [[TMP285]], i32 [[TMP286]], i32 [[TMP287]]) // CHECK-NEXT: [[TMP288:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.src.q.ld.ip(i32 1, i32 [[TMP288]], i32 4, i32 1168, i32 4) // CHECK-NEXT: [[TMP289:%.*]] = load i32, ptr [[DATA]], align 4 // CHECK-NEXT: [[TMP290:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.src.q.ld.xp(i32 [[TMP289]], i32 0, i32 [[TMP290]], i32 1, i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.src.q.qup(i32 3, i32 3, i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.srci.2q(i32 7, i32 4, i32 1) -// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s16.q.qacc(i32 2, i32 1, i32 5) -// CHECK-NEXT: [[TMP291:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s16.qacc(i32 [[TMP291]], i32 0, i32 7) -// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s8.q.qacc(i32 7, i32 0, i32 3) +// CHECK-NEXT: [[TMP291:%.*]] = call i32 @llvm.riscv.esp.sat(i32 [[TMP288]], i32 [[TMP289]], i32 [[TMP290]]) // CHECK-NEXT: [[TMP292:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s8.qacc(i32 [[TMP292]], i32 1, i32 3) -// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u16.q.qacc(i32 6, i32 1, i32 0) // CHECK-NEXT: [[TMP293:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u16.qacc(i32 [[TMP293]], i32 0, i32 0) -// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u8.q.qacc(i32 6, i32 0, i32 7) // CHECK-NEXT: [[TMP294:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u8.qacc(i32 [[TMP294]], i32 1, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.subx2(i32 [[TMP292]], i32 [[TMP293]], i32 [[TMP294]]) // CHECK-NEXT: [[TMP295:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.srcq.128.st.incp(i32 0, i32 5, i32 [[TMP295]]) // CHECK-NEXT: [[TMP296:%.*]] = load i32, ptr [[DATA]], align 4 // CHECK-NEXT: [[TMP297:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.srcxxp.2q(i32 [[TMP296]], i32 [[TMP297]], i32 7, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.subx4(i32 [[TMP295]], i32 [[TMP296]], i32 [[TMP297]]) +// CHECK-NEXT: call void @llvm.riscv.esp.andq(i32 0, i32 1, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.notq(i32 0, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.orq(i32 0, i32 6, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.xorq(i32 7, i32 4, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s16(i32 6, i32 6, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s32(i32 6, i32 2, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s8(i32 7, i32 6, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u16(i32 0, i32 2, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u32(i32 6, i32 4, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u8(i32 6, i32 4, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s16(i32 5, i32 3, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s32(i32 2, i32 4, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s8(i32 7, i32 7, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u16(i32 2, i32 7, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u32(i32 6, i32 4, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u8(i32 0, i32 4, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s16(i32 4, i32 6, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s32(i32 2, i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s8(i32 3, i32 0, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u16(i32 2, i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u32(i32 2, i32 0, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u8(i32 0, i32 2, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.s16.qacc(i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.s8.qacc(i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.u16.qacc(i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.u8.qacc(i32 5) // CHECK-NEXT: [[TMP298:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.16.a(i32 2, i32 8, i32 [[TMP298]]) // CHECK-NEXT: [[TMP299:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.srs.s.xacc(i32 [[TMP298]], i32 [[TMP299]]) +// CHECK-NEXT: call void @llvm.riscv.esp.movi.16.q(i32 [[TMP299]], i32 12, i32 1) // CHECK-NEXT: [[TMP300:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.32.a(i32 4, i32 2, i32 [[TMP300]]) // CHECK-NEXT: [[TMP301:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.srs.u.xacc(i32 [[TMP300]], i32 [[TMP301]]) +// CHECK-NEXT: call void @llvm.riscv.esp.movi.32.q(i32 [[TMP301]], i32 1, i32 0) +// CHECK-NEXT: [[TMP302:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.8.a(i32 0, i32 13, i32 [[TMP302]]) +// CHECK-NEXT: [[TMP303:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.8.q(i32 [[TMP303]], i32 14, i32 3) +// CHECK-NEXT: [[TMP304:%.*]] = call i32 @llvm.riscv.esp.movx.r.cfg() +// CHECK-NEXT: [[TMP305:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.fft.bit.width(i32 [[TMP305]]) +// CHECK-NEXT: [[TMP306:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP307:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.perf(i32 [[TMP306]], i32 [[TMP307]]) +// CHECK-NEXT: [[TMP308:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.sar(i32 [[TMP308]]) +// CHECK-NEXT: [[TMP309:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.sar.bytes(i32 [[TMP309]]) +// CHECK-NEXT: [[TMP310:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.xacc.h(i32 [[TMP310]]) +// CHECK-NEXT: [[TMP311:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.xacc.l(i32 [[TMP311]]) +// CHECK-NEXT: [[TMP312:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.cfg(i32 [[TMP312]]) +// CHECK-NEXT: [[TMP313:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.fft.bit.width(i32 [[TMP313]]) +// CHECK-NEXT: [[TMP314:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.perf(i32 [[TMP314]]) +// CHECK-NEXT: [[TMP315:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.sar(i32 [[TMP315]]) +// CHECK-NEXT: [[TMP316:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.sar.bytes(i32 [[TMP316]]) +// CHECK-NEXT: [[TMP317:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.xacc.h(i32 [[TMP317]]) +// CHECK-NEXT: [[TMP318:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.xacc.l(i32 [[TMP318]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.s16(i32 0, i32 4, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.s8(i32 0, i32 7, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.u16(i32 1, i32 0, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.u8(i32 4, i32 1, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.16(i32 3, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.32(i32 6, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.8(i32 3, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzipt.16(i32 1, i32 5, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzipt.8(i32 7, i32 5, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vzip.16(i32 2, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vzip.32(i32 0, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vzip.8(i32 6, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vzipt.16(i32 6, i32 3, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vzipt.8(i32 7, i32 0, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.zero.q(i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.zero.qacc() +// CHECK-NEXT: call void @llvm.riscv.esp.zero.xacc() +// CHECK-NEXT: [[TMP319:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP320:%.*]] = call i32 @llvm.riscv.esp.fft.ams.s16.ld.incp(i32 1, i32 1, i32 3, i32 [[TMP319]], i32 0, i32 6, i32 0, i32 3) +// CHECK-NEXT: [[TMP321:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP322:%.*]] = call i32 @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32 3, i32 0, i32 1, i32 [[TMP321]], i32 0, i32 3, i32 3, i32 1) +// CHECK-NEXT: [[TMP323:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP324:%.*]] = call i32 @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32 2, i32 3, i32 7, i32 [[TMP323]], i32 0, i32 1, i32 1, i32 4) +// CHECK-NEXT: [[TMP325:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP326:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.st.incp(i32 4, i32 4, i32 0, i32 5, i32 [[TMP325]], i32 [[TMP326]], i32 1, i32 1) +// CHECK-NEXT: [[TMP327:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP328:%.*]] = call i32 @llvm.riscv.esp.fft.bitrev(i32 [[TMP327]], i32 6) +// CHECK-NEXT: [[TMP329:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP330:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP331:%.*]] = call i32 @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32 [[TMP329]], i32 7, i32 0, i32 [[TMP330]], i32 2, i32 1, i32 2) +// CHECK-NEXT: [[TMP332:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP333:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP334:%.*]] = call i32 @llvm.riscv.esp.fft.cmul.s16.st.xp(i32 [[TMP332]], i32 6, i32 0, i32 7, i32 [[TMP333]], i32 0, i32 1, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.fft.r2bf.s16(i32 2, i32 5, i32 0, i32 7, i32 5) +// CHECK-NEXT: [[TMP335:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP336:%.*]] = call i32 @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32 1, i32 7, i32 [[TMP335]], i32 1, i32 6) +// CHECK-NEXT: [[TMP337:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP338:%.*]] = call i32 @llvm.riscv.esp.fft.vst.r32.decp(i32 2, i32 [[TMP337]], i32 1) +// CHECK-NEXT: [[TMP339:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP340:%.*]] = call i32 @llvm.riscv.esp.ld.128.usar.ip(i32 [[TMP339]], i32 -464, i32 7) +// CHECK-NEXT: [[TMP341:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP342:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP343:%.*]] = call i32 @llvm.riscv.esp.ld.128.usar.xp(i32 [[TMP341]], i32 [[TMP342]], i32 0) +// CHECK-NEXT: [[TMP344:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP345:%.*]] = call i32 @llvm.riscv.esp.ld.xacc.ip(i32 [[TMP344]], i32 -224) +// CHECK-NEXT: [[TMP346:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP347:%.*]] = call i32 @llvm.riscv.esp.ldqa.s16.128.ip(i32 [[TMP346]], i32 288) +// CHECK-NEXT: [[TMP348:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP349:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP350:%.*]] = call i32 @llvm.riscv.esp.ldqa.s16.128.xp(i32 [[TMP348]], i32 [[TMP349]]) +// CHECK-NEXT: [[TMP351:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP352:%.*]] = call i32 @llvm.riscv.esp.ldqa.s8.128.ip(i32 [[TMP351]], i32 -1408) +// CHECK-NEXT: [[TMP353:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP354:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP355:%.*]] = call i32 @llvm.riscv.esp.ldqa.s8.128.xp(i32 [[TMP353]], i32 [[TMP354]]) +// CHECK-NEXT: [[TMP356:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP357:%.*]] = call i32 @llvm.riscv.esp.ldqa.u16.128.ip(i32 [[TMP356]], i32 -1440) +// CHECK-NEXT: [[TMP358:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP359:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP360:%.*]] = call i32 @llvm.riscv.esp.ldqa.u16.128.xp(i32 [[TMP358]], i32 [[TMP359]]) +// CHECK-NEXT: [[TMP361:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP362:%.*]] = call i32 @llvm.riscv.esp.ldqa.u8.128.ip(i32 [[TMP361]], i32 -816) +// CHECK-NEXT: [[TMP363:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP364:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP365:%.*]] = call i32 @llvm.riscv.esp.ldqa.u8.128.xp(i32 [[TMP363]], i32 [[TMP364]]) +// CHECK-NEXT: [[TMP366:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP367:%.*]] = call i32 @llvm.riscv.esp.vldbc.16.ip(i32 [[TMP366]], i32 380, i32 2) +// CHECK-NEXT: [[TMP368:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP369:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP370:%.*]] = call i32 @llvm.riscv.esp.vldbc.16.xp(i32 [[TMP368]], i32 [[TMP369]], i32 3) +// CHECK-NEXT: [[TMP371:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP372:%.*]] = call i32 @llvm.riscv.esp.vldbc.32.ip(i32 [[TMP371]], i32 -292, i32 7) +// CHECK-NEXT: [[TMP373:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP374:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP375:%.*]] = call i32 @llvm.riscv.esp.vldbc.32.xp(i32 [[TMP373]], i32 [[TMP374]], i32 1) +// CHECK-NEXT: [[TMP376:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP377:%.*]] = call i32 @llvm.riscv.esp.vldbc.8.ip(i32 [[TMP376]], i32 -416, i32 5) +// CHECK-NEXT: [[TMP378:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP379:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP380:%.*]] = call i32 @llvm.riscv.esp.vldbc.8.xp(i32 [[TMP378]], i32 [[TMP379]], i32 7) +// CHECK-NEXT: [[TMP381:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP382:%.*]] = call i32 @llvm.riscv.esp.vldext.s16.ip(i32 [[TMP381]], i32 -80, i32 0, i32 3) +// CHECK-NEXT: [[TMP383:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP384:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP385:%.*]] = call i32 @llvm.riscv.esp.vldext.s16.xp(i32 [[TMP383]], i32 [[TMP384]], i32 2, i32 5) +// CHECK-NEXT: [[TMP386:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP387:%.*]] = call i32 @llvm.riscv.esp.vldext.s8.ip(i32 [[TMP386]], i32 0, i32 2, i32 7) +// CHECK-NEXT: [[TMP388:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP389:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP390:%.*]] = call i32 @llvm.riscv.esp.vldext.s8.xp(i32 [[TMP388]], i32 [[TMP389]], i32 7, i32 5) +// CHECK-NEXT: [[TMP391:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP392:%.*]] = call i32 @llvm.riscv.esp.vldext.u16.ip(i32 [[TMP391]], i32 32, i32 0, i32 6) +// CHECK-NEXT: [[TMP393:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP394:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP395:%.*]] = call i32 @llvm.riscv.esp.vldext.u16.xp(i32 [[TMP393]], i32 [[TMP394]], i32 7, i32 6) +// CHECK-NEXT: [[TMP396:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP397:%.*]] = call i32 @llvm.riscv.esp.vldext.u8.ip(i32 [[TMP396]], i32 -16, i32 3, i32 1) +// CHECK-NEXT: [[TMP398:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP399:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP400:%.*]] = call i32 @llvm.riscv.esp.vldext.u8.xp(i32 [[TMP398]], i32 [[TMP399]], i32 5, i32 4) +// CHECK-NEXT: [[TMP401:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP402:%.*]] = call i32 @llvm.riscv.esp.vldhbc.16.incp(i32 [[TMP401]], i32 2, i32 3) +// CHECK-NEXT: [[TMP403:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP404:%.*]] = call i32 @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32 [[TMP403]], i32 -240) +// CHECK-NEXT: [[TMP405:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP406:%.*]] = call i32 @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32 [[TMP405]], i32 -32) +// CHECK-NEXT: [[TMP407:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP408:%.*]] = call i32 @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32 [[TMP407]], i32 -64) +// CHECK-NEXT: [[TMP409:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP410:%.*]] = call i32 @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32 [[TMP409]], i32 -80) +// CHECK-NEXT: [[TMP411:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP412:%.*]] = call i32 @llvm.riscv.esp.ld.ua.state.ip(i32 [[TMP411]], i32 1504) +// CHECK-NEXT: [[TMP413:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldxq.32(i32 [[TMP413]], i32 6, i32 1, i32 7, i32 1) +// CHECK-NEXT: [[TMP414:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP415:%.*]] = call i32 @llvm.riscv.esp.st.qacc.h.h.128.ip(i32 [[TMP414]], i32 -480) +// CHECK-NEXT: [[TMP416:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP417:%.*]] = call i32 @llvm.riscv.esp.st.qacc.h.l.128.ip(i32 [[TMP416]], i32 -1712) +// CHECK-NEXT: [[TMP418:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP419:%.*]] = call i32 @llvm.riscv.esp.st.qacc.l.h.128.ip(i32 [[TMP418]], i32 960) +// CHECK-NEXT: [[TMP420:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP421:%.*]] = call i32 @llvm.riscv.esp.st.qacc.l.l.128.ip(i32 [[TMP420]], i32 1920) +// CHECK-NEXT: [[TMP422:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP423:%.*]] = call i32 @llvm.riscv.esp.st.ua.state.ip(i32 [[TMP422]], i32 -1360) +// CHECK-NEXT: [[TMP424:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.stxq.32(i32 [[TMP424]], i32 6, i32 2, i32 3, i32 0) +// CHECK-NEXT: [[TMP425:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP426:%.*]] = call i32 @llvm.riscv.esp.vld.128.ip(i32 [[TMP425]], i32 -1136, i32 0) +// CHECK-NEXT: [[TMP427:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP428:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP429:%.*]] = call i32 @llvm.riscv.esp.vld.128.xp(i32 [[TMP427]], i32 [[TMP428]], i32 5) +// CHECK-NEXT: [[TMP430:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP431:%.*]] = call i32 @llvm.riscv.esp.vld.h.64.ip(i32 [[TMP430]], i32 1008, i32 4) +// CHECK-NEXT: [[TMP432:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP433:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP434:%.*]] = call i32 @llvm.riscv.esp.vld.h.64.xp(i32 [[TMP432]], i32 [[TMP433]], i32 2) +// CHECK-NEXT: [[TMP435:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP436:%.*]] = call i32 @llvm.riscv.esp.vld.l.64.ip(i32 [[TMP435]], i32 -304, i32 6) +// CHECK-NEXT: [[TMP437:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP438:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP439:%.*]] = call i32 @llvm.riscv.esp.vld.l.64.xp(i32 [[TMP437]], i32 [[TMP438]], i32 6) +// CHECK-NEXT: [[TMP440:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP441:%.*]] = call i32 @llvm.riscv.esp.vst.128.ip(i32 0, i32 [[TMP440]], i32 -1216) +// CHECK-NEXT: [[TMP442:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP443:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP444:%.*]] = call i32 @llvm.riscv.esp.vst.128.xp(i32 [[TMP442]], i32 6, i32 [[TMP443]]) +// CHECK-NEXT: [[TMP445:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP446:%.*]] = call i32 @llvm.riscv.esp.vst.h.64.ip(i32 1, i32 [[TMP445]], i32 -456) +// CHECK-NEXT: [[TMP447:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP448:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP449:%.*]] = call i32 @llvm.riscv.esp.vst.h.64.xp(i32 [[TMP447]], i32 2, i32 [[TMP448]]) +// CHECK-NEXT: [[TMP450:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP451:%.*]] = call i32 @llvm.riscv.esp.vst.l.64.ip(i32 6, i32 [[TMP450]], i32 664) +// CHECK-NEXT: [[TMP452:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP453:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP454:%.*]] = call i32 @llvm.riscv.esp.vst.l.64.xp(i32 [[TMP452]], i32 4, i32 [[TMP453]]) +// CHECK-NEXT: call void @llvm.riscv.esp.slci.2q(i32 2, i32 0, i32 14) +// CHECK-NEXT: [[TMP455:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP456:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.slcxxp.2q(i32 [[TMP455]], i32 [[TMP456]], i32 0, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.src.q(i32 7, i32 3, i32 2) +// CHECK-NEXT: [[TMP457:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP458:%.*]] = call i32 @llvm.riscv.esp.src.q.ld.ip(i32 1, i32 [[TMP457]], i32 4, i32 1168, i32 4) +// CHECK-NEXT: [[TMP459:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP460:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP461:%.*]] = call i32 @llvm.riscv.esp.src.q.ld.xp(i32 [[TMP459]], i32 0, i32 [[TMP460]], i32 1, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.src.q.qup(i32 3, i32 3, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.srci.2q(i32 7, i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s16.q.qacc(i32 2, i32 1, i32 5) +// CHECK-NEXT: [[TMP462:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s16.qacc(i32 [[TMP462]], i32 0, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s8.q.qacc(i32 7, i32 0, i32 3) +// CHECK-NEXT: [[TMP463:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s8.qacc(i32 [[TMP463]], i32 1, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u16.q.qacc(i32 6, i32 1, i32 0) +// CHECK-NEXT: [[TMP464:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u16.qacc(i32 [[TMP464]], i32 0, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u8.q.qacc(i32 6, i32 0, i32 7) +// CHECK-NEXT: [[TMP465:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u8.qacc(i32 [[TMP465]], i32 1, i32 2) +// CHECK-NEXT: [[TMP466:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP467:%.*]] = call i32 @llvm.riscv.esp.srcq.128.st.incp(i32 0, i32 5, i32 [[TMP466]]) +// CHECK-NEXT: [[TMP468:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP469:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcxxp.2q(i32 [[TMP468]], i32 [[TMP469]], i32 7, i32 5) +// CHECK-NEXT: [[TMP470:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP471:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srs.s.xacc(i32 [[TMP470]], i32 [[TMP471]]) +// CHECK-NEXT: [[TMP472:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP473:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srs.u.xacc(i32 [[TMP472]], i32 [[TMP473]]) // CHECK-NEXT: call void @llvm.riscv.esp.vsl.32(i32 0, i32 3) // CHECK-NEXT: call void @llvm.riscv.esp.vsld.16(i32 6, i32 4, i32 4) // CHECK-NEXT: call void @llvm.riscv.esp.vsld.32(i32 2, i32 7, i32 5) @@ -662,10 +661,10 @@ // CHECK-NEXT: call void @llvm.riscv.esp.vsrd.16(i32 6, i32 2, i32 1) // CHECK-NEXT: call void @llvm.riscv.esp.vsrd.32(i32 7, i32 5, i32 4) // CHECK-NEXT: call void @llvm.riscv.esp.vsrd.8(i32 2, i32 1, i32 4) -// CHECK-NEXT: [[TMP302:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.st.s.xacc.ip(i32 [[TMP302]], i32 912) -// CHECK-NEXT: [[TMP303:%.*]] = load i32, ptr [[DATA]], align 4 -// CHECK-NEXT: call void @llvm.riscv.esp.st.u.xacc.ip(i32 [[TMP303]], i32 -112) +// CHECK-NEXT: [[TMP474:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP475:%.*]] = call i32 @llvm.riscv.esp.st.s.xacc.ip(i32 [[TMP474]], i32 912) +// CHECK-NEXT: [[TMP476:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP477:%.*]] = call i32 @llvm.riscv.esp.st.u.xacc.ip(i32 [[TMP476]], i32 -112) // CHECK-NEXT: ret void // void test() { @@ -903,7 +902,7 @@ __builtin_riscv_esp_movi_32_a(4, 2, data); __builtin_riscv_esp_movi_32_q(data, 1, 0); __builtin_riscv_esp_movi_8_a(0, 13, data); __builtin_riscv_esp_movi_8_q(data, 14, 3); -__builtin_riscv_esp_movx_r_cfg(data); +__builtin_riscv_esp_movx_r_cfg(); __builtin_riscv_esp_movx_r_fft_bit_width(data); __builtin_riscv_esp_movx_r_perf(data, data); __builtin_riscv_esp_movx_r_sar(data); diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td b/llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td index c1a11f90f12dd..6b3162d95ea04 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td @@ -3,229 +3,229 @@ def int_riscv_esp_vcmulas_s16_qacc_h: ClangBuiltin<"__builtin_riscv_esp_vcmulas_ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s16_qacc_h_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s16_qacc_h_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s16_qacc_l: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_l">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s16_qacc_l_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s16_qacc_l_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s8_qacc_h: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_h">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s8_qacc_h_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s8_qacc_h_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s8_qacc_l: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_l">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s8_qacc_l_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vcmulas_s8_qacc_l_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_st_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_st_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_st_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_st_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_st_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_st_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_st_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_st_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_st_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_st_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_st_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_st_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_st_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_st_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_st_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_st_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s16_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_ldbc_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_s8_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_ldbc_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u16_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_ldbc_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmulas_u8_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_ldbc_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsmulas_s16_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s16_qacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsmulas_s16_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s16_qacc_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsmulas_s8_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s8_qacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsmulas_s8_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s8_qacc_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsmulas_u16_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u16_qacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsmulas_u16_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u16_qacc_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsmulas_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u8_qacc">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsmulas_u8_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u8_qacc_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_s16: ClangBuiltin<"__builtin_riscv_esp_cmul_s16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_s8: ClangBuiltin<"__builtin_riscv_esp_cmul_s8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_u16: ClangBuiltin<"__builtin_riscv_esp_cmul_u16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_u8: ClangBuiltin<"__builtin_riscv_esp_cmul_u8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_cmul_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_max_s16_a: ClangBuiltin<"__builtin_riscv_esp_max_s16_a">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; @@ -276,55 +276,55 @@ def int_riscv_esp_vadd_s16: ClangBuiltin<"__builtin_riscv_esp_vadd_s16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_s32: ClangBuiltin<"__builtin_riscv_esp_vadd_s32">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s32_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s32_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_s8: ClangBuiltin<"__builtin_riscv_esp_vadd_s8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_u16: ClangBuiltin<"__builtin_riscv_esp_vadd_u16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_u32: ClangBuiltin<"__builtin_riscv_esp_vadd_u32">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u32_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u32_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_u8: ClangBuiltin<"__builtin_riscv_esp_vadd_u8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vadd_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vclamp_s16: ClangBuiltin<"__builtin_riscv_esp_vclamp_s16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; @@ -333,121 +333,121 @@ def int_riscv_esp_vmax_s16: ClangBuiltin<"__builtin_riscv_esp_vmax_s16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_s32: ClangBuiltin<"__builtin_riscv_esp_vmax_s32">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s32_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s32_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_s8: ClangBuiltin<"__builtin_riscv_esp_vmax_s8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_u16: ClangBuiltin<"__builtin_riscv_esp_vmax_u16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_u32: ClangBuiltin<"__builtin_riscv_esp_vmax_u32">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u32_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u32_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_u8: ClangBuiltin<"__builtin_riscv_esp_vmax_u8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmax_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_s16: ClangBuiltin<"__builtin_riscv_esp_vmin_s16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_s32: ClangBuiltin<"__builtin_riscv_esp_vmin_s32">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s32_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s32_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_s8: ClangBuiltin<"__builtin_riscv_esp_vmin_s8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_u16: ClangBuiltin<"__builtin_riscv_esp_vmin_u16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_u32: ClangBuiltin<"__builtin_riscv_esp_vmin_u32">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u32_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u32_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_u8: ClangBuiltin<"__builtin_riscv_esp_vmin_u8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmin_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_s16: ClangBuiltin<"__builtin_riscv_esp_vmul_s16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_s16_s8xs8: ClangBuiltin<"__builtin_riscv_esp_vmul_s16_s8xs8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_s32_s16xs16: ClangBuiltin<"__builtin_riscv_esp_vmul_s32_s16xs16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; @@ -456,28 +456,28 @@ def int_riscv_esp_vmul_s8: ClangBuiltin<"__builtin_riscv_esp_vmul_s8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_u16: ClangBuiltin<"__builtin_riscv_esp_vmul_u16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_u8: ClangBuiltin<"__builtin_riscv_esp_vmul_u8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vmul_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vprelu_s16: ClangBuiltin<"__builtin_riscv_esp_vprelu_s16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; @@ -537,55 +537,55 @@ def int_riscv_esp_vsub_s16: ClangBuiltin<"__builtin_riscv_esp_vsub_s16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_s32: ClangBuiltin<"__builtin_riscv_esp_vsub_s32">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s32_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s32_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_s8: ClangBuiltin<"__builtin_riscv_esp_vsub_s8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_u16: ClangBuiltin<"__builtin_riscv_esp_vsub_u16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_u32: ClangBuiltin<"__builtin_riscv_esp_vsub_u32">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u32_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u32_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_u8: ClangBuiltin<"__builtin_riscv_esp_vsub_u8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u8_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vsub_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u8_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_addx2: ClangBuiltin<"__builtin_riscv_esp_addx2">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; @@ -594,7 +594,7 @@ def int_riscv_esp_addx4: ClangBuiltin<"__builtin_riscv_esp_addx4">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; def int_riscv_esp_sat: ClangBuiltin<"__builtin_riscv_esp_sat">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; def int_riscv_esp_subx2: ClangBuiltin<"__builtin_riscv_esp_subx2">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; @@ -699,7 +699,7 @@ def int_riscv_esp_movi_8_q: ClangBuiltin<"__builtin_riscv_esp_movi_8_q">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_movx_r_cfg: ClangBuiltin<"__builtin_riscv_esp_movx_r_cfg">, - Intrinsic<[], [llvm_i32_ty], []>; + Intrinsic<[llvm_i32_ty], [], []>; def int_riscv_esp_movx_r_fft_bit_width: ClangBuiltin<"__builtin_riscv_esp_movx_r_fft_bit_width">, Intrinsic<[], [llvm_i32_ty], []>; @@ -792,184 +792,184 @@ def int_riscv_esp_zero_xacc: ClangBuiltin<"__builtin_riscv_esp_zero_xacc">, Intrinsic<[], [], []>; def int_riscv_esp_fft_ams_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_ld_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_fft_ams_s16_ld_incp_uaup: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_ld_incp_uaup">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_fft_ams_s16_ld_r32_decp: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_ld_r32_decp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_fft_ams_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_st_incp">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_fft_bitrev: ClangBuiltin<"__builtin_riscv_esp_fft_bitrev">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_fft_cmul_s16_ld_xp: ClangBuiltin<"__builtin_riscv_esp_fft_cmul_s16_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_fft_cmul_s16_st_xp: ClangBuiltin<"__builtin_riscv_esp_fft_cmul_s16_st_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_fft_r2bf_s16: ClangBuiltin<"__builtin_riscv_esp_fft_r2bf_s16">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_fft_r2bf_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_fft_r2bf_s16_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_fft_vst_r32_decp: ClangBuiltin<"__builtin_riscv_esp_fft_vst_r32_decp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_ld_128_usar_ip: ClangBuiltin<"__builtin_riscv_esp_ld_128_usar_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_ld_128_usar_xp: ClangBuiltin<"__builtin_riscv_esp_ld_128_usar_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ld_xacc_ip: ClangBuiltin<"__builtin_riscv_esp_ld_xacc_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ldqa_s16_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_s16_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ldqa_s16_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_s16_128_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>; def int_riscv_esp_ldqa_s8_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_s8_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ldqa_s8_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_s8_128_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>; def int_riscv_esp_ldqa_u16_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_u16_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ldqa_u16_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_u16_128_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>; def int_riscv_esp_ldqa_u8_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_u8_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ldqa_u8_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_u8_128_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>; def int_riscv_esp_vldbc_16_ip: ClangBuiltin<"__builtin_riscv_esp_vldbc_16_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vldbc_16_xp: ClangBuiltin<"__builtin_riscv_esp_vldbc_16_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_vldbc_32_ip: ClangBuiltin<"__builtin_riscv_esp_vldbc_32_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vldbc_32_xp: ClangBuiltin<"__builtin_riscv_esp_vldbc_32_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_vldbc_8_ip: ClangBuiltin<"__builtin_riscv_esp_vldbc_8_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vldbc_8_xp: ClangBuiltin<"__builtin_riscv_esp_vldbc_8_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_vldext_s16_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_s16_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vldext_s16_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_s16_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vldext_s8_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_s8_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vldext_s8_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_s8_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vldext_u16_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_u16_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vldext_u16_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_u16_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vldext_u8_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_u8_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vldext_u8_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_u8_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vldhbc_16_incp: ClangBuiltin<"__builtin_riscv_esp_vldhbc_16_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_ld_qacc_h_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_h_h_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ld_qacc_h_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_h_l_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ld_qacc_l_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_l_h_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ld_qacc_l_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_l_l_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ld_ua_state_ip: ClangBuiltin<"__builtin_riscv_esp_ld_ua_state_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_ldxq_32: ClangBuiltin<"__builtin_riscv_esp_ldxq_32">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_st_qacc_h_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_h_h_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_st_qacc_h_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_h_l_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_st_qacc_l_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_l_h_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_st_qacc_l_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_l_l_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_st_ua_state_ip: ClangBuiltin<"__builtin_riscv_esp_st_ua_state_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_stxq_32: ClangBuiltin<"__builtin_riscv_esp_stxq_32">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_vld_128_ip: ClangBuiltin<"__builtin_riscv_esp_vld_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vld_128_xp: ClangBuiltin<"__builtin_riscv_esp_vld_128_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_vld_h_64_ip: ClangBuiltin<"__builtin_riscv_esp_vld_h_64_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vld_h_64_xp: ClangBuiltin<"__builtin_riscv_esp_vld_h_64_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_vld_l_64_ip: ClangBuiltin<"__builtin_riscv_esp_vld_l_64_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vld_l_64_xp: ClangBuiltin<"__builtin_riscv_esp_vld_l_64_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_vst_128_ip: ClangBuiltin<"__builtin_riscv_esp_vst_128_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vst_128_xp: ClangBuiltin<"__builtin_riscv_esp_vst_128_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_vst_h_64_ip: ClangBuiltin<"__builtin_riscv_esp_vst_h_64_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vst_h_64_xp: ClangBuiltin<"__builtin_riscv_esp_vst_h_64_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_vst_l_64_ip: ClangBuiltin<"__builtin_riscv_esp_vst_l_64_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_vst_l_64_xp: ClangBuiltin<"__builtin_riscv_esp_vst_l_64_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_slci_2q: ClangBuiltin<"__builtin_riscv_esp_slci_2q">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; @@ -981,10 +981,10 @@ def int_riscv_esp_src_q: ClangBuiltin<"__builtin_riscv_esp_src_q">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_src_q_ld_ip: ClangBuiltin<"__builtin_riscv_esp_src_q_ld_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_src_q_ld_xp: ClangBuiltin<"__builtin_riscv_esp_src_q_ld_xp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_src_q_qup: ClangBuiltin<"__builtin_riscv_esp_src_q_qup">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; @@ -1017,7 +1017,7 @@ def int_riscv_esp_srcmb_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_u8_qacc Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_srcq_128_st_incp: ClangBuiltin<"__builtin_riscv_esp_srcq_128_st_incp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; def int_riscv_esp_srcxxp_2q: ClangBuiltin<"__builtin_riscv_esp_srcxxp_2q">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; @@ -1056,10 +1056,10 @@ def int_riscv_esp_vsrd_8: ClangBuiltin<"__builtin_riscv_esp_vsrd_8">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; def int_riscv_esp_st_s_xacc_ip: ClangBuiltin<"__builtin_riscv_esp_st_s_xacc_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; def int_riscv_esp_st_u_xacc_ip: ClangBuiltin<"__builtin_riscv_esp_st_u_xacc_ip">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; } diff --git a/llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp index 0ec3aaf3ad1dc..481cf134d5064 100644 --- a/llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp @@ -46,25 +46,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VCMULAS_S16_QACC_H_LD_IP_P: { unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_H_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_ip " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_ip " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_ip " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -76,25 +74,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VCMULAS_S16_QACC_H_LD_XP_P: { unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_H_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_xp " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_xp " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_xp " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -124,25 +120,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VCMULAS_S16_QACC_L_LD_IP_P: { unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_L_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_ip " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_ip " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_ip " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -154,25 +148,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VCMULAS_S16_QACC_L_LD_XP_P: { unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_L_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_xp " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_xp " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_xp " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -202,25 +194,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VCMULAS_S8_QACC_H_LD_IP_P: { unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_H_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -232,25 +222,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VCMULAS_S8_QACC_H_LD_XP_P: { unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_H_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -280,25 +268,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VCMULAS_S8_QACC_L_LD_IP_P: { unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_L_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -310,25 +296,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VCMULAS_S8_QACC_L_LD_XP_P: { unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_L_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -358,25 +342,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S16_QACC_LD_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -388,25 +370,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S16_QACC_LD_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -418,24 +398,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S16_QACC_ST_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_ST_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &OFFSET_16_16 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &OFFSET_16_16 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -448,24 +426,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S16_QACC_ST_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_ST_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -496,25 +472,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S16_XACC_LD_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -526,25 +500,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S16_XACC_LD_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -556,24 +528,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S16_XACC_ST_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_ST_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &OFFSET_16_16 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &OFFSET_16_16 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -586,24 +556,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S16_XACC_ST_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_ST_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -634,25 +602,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S8_QACC_LD_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -664,25 +630,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S8_QACC_LD_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -694,24 +658,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S8_QACC_ST_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_ST_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &OFFSET_16_16 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &OFFSET_16_16 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -724,24 +686,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S8_QACC_ST_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_ST_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -772,25 +732,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S8_XACC_LD_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -802,25 +760,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S8_XACC_LD_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -832,24 +788,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S8_XACC_ST_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_ST_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &OFFSET_16_16 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &OFFSET_16_16 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -862,24 +816,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S8_XACC_ST_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_ST_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -910,25 +862,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U16_QACC_LD_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -940,25 +890,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U16_QACC_LD_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -970,24 +918,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U16_QACC_ST_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_ST_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &OFFSET_16_16 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &OFFSET_16_16 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -1000,24 +946,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U16_QACC_ST_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_ST_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -1048,25 +992,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U16_XACC_LD_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -1078,25 +1020,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U16_XACC_LD_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -1108,24 +1048,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U16_XACC_ST_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_ST_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &OFFSET_16_16 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &OFFSET_16_16 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -1138,24 +1076,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U16_XACC_ST_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_ST_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -1186,25 +1122,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U8_QACC_LD_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -1216,25 +1150,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U8_QACC_LD_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -1246,24 +1178,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U8_QACC_ST_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_ST_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &OFFSET_16_16 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &OFFSET_16_16 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -1276,24 +1206,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U8_QACC_ST_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_ST_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -1324,25 +1252,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U8_XACC_LD_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &OFFSET_16_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -1354,25 +1280,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U8_XACC_LD_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -1384,24 +1308,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U8_XACC_ST_IP_P: { unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_ST_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_ip first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &OFFSET_16_16 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &OFFSET_16_16 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -1414,24 +1336,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U8_XACC_ST_XP_P: { unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_ST_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(4); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(5); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -1444,24 +1364,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S16_QACC_LDBC_INCP_P: { unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_LDBC_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -1472,24 +1390,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_S8_QACC_LDBC_INCP_P: { unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_LDBC_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -1500,24 +1416,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U16_QACC_LDBC_INCP_P: { unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_LDBC_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -1528,24 +1442,22 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMULAS_U8_QACC_LDBC_INCP_P: { unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_LDBC_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ldbc_incp " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -1576,25 +1488,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSMULAS_S16_QACC_LD_INCP_P: { unsigned Opc = RISCV::ESP_VSMULAS_S16_QACC_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc_ld_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc_ld_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &SELECT_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc_ld_incp " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -1626,25 +1536,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSMULAS_S8_QACC_LD_INCP_P: { unsigned Opc = RISCV::ESP_VSMULAS_S8_QACC_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &SELECT_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -1676,25 +1584,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSMULAS_U16_QACC_LD_INCP_P: { unsigned Opc = RISCV::ESP_VSMULAS_U16_QACC_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc_ld_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc_ld_incp " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &SELECT_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc_ld_incp " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -1726,25 +1632,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSMULAS_U8_QACC_LD_INCP_P: { unsigned Opc = RISCV::ESP_VSMULAS_U8_QACC_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &SELECT_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -1781,30 +1685,28 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_CMUL_S16_LD_INCP_P: { unsigned Opc = RISCV::ESP_CMUL_S16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &SELECT_4 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(5); + MachineOperand &QU = MI.getOperand(6); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -1816,29 +1718,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_CMUL_S16_ST_INCP_P: { unsigned Opc = RISCV::ESP_CMUL_S16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &SELECT_4 = MI.getOperand(4); - MachineOperand &QZ = MI.getOperand(5); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &SELECT_4 = MI.getOperand(5); + MachineOperand &QZ = MI.getOperand(6); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -1876,30 +1776,28 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_CMUL_S8_LD_INCP_P: { unsigned Opc = RISCV::ESP_CMUL_S8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &SELECT_4 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(5); + MachineOperand &QU = MI.getOperand(6); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -1911,29 +1809,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_CMUL_S8_ST_INCP_P: { unsigned Opc = RISCV::ESP_CMUL_S8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &SELECT_4 = MI.getOperand(4); - MachineOperand &QZ = MI.getOperand(5); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &SELECT_4 = MI.getOperand(5); + MachineOperand &QZ = MI.getOperand(6); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -1971,30 +1867,28 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_CMUL_U16_LD_INCP_P: { unsigned Opc = RISCV::ESP_CMUL_U16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); - assert(QYVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " - "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &SELECT_4 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(5); + MachineOperand &QU = MI.getOperand(6); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -2006,29 +1900,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_CMUL_U16_ST_INCP_P: { unsigned Opc = RISCV::ESP_CMUL_U16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &SELECT_4 = MI.getOperand(4); - MachineOperand &QZ = MI.getOperand(5); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &SELECT_4 = MI.getOperand(5); + MachineOperand &QZ = MI.getOperand(6); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -2066,30 +1958,28 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_CMUL_U8_LD_INCP_P: { unsigned Opc = RISCV::ESP_CMUL_U8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &SELECT_4 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(5); + MachineOperand &QU = MI.getOperand(6); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -2101,29 +1991,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_CMUL_U8_ST_INCP_P: { unsigned Opc = RISCV::ESP_CMUL_U8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &SELECT_4 = MI.getOperand(4); - MachineOperand &QZ = MI.getOperand(5); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &SELECT_4 = MI.getOperand(5); + MachineOperand &QZ = MI.getOperand(6); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -2405,29 +2293,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_S16_LD_INCP_P: { unsigned Opc = RISCV::ESP_VADD_S16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -2438,28 +2324,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_S16_ST_INCP_P: { unsigned Opc = RISCV::ESP_VADD_S16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -2494,29 +2378,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_S32_LD_INCP_P: { unsigned Opc = RISCV::ESP_VADD_S32_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -2527,28 +2409,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_S32_ST_INCP_P: { unsigned Opc = RISCV::ESP_VADD_S32_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -2583,29 +2463,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_S8_LD_INCP_P: { unsigned Opc = RISCV::ESP_VADD_S8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -2616,28 +2494,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_S8_ST_INCP_P: { unsigned Opc = RISCV::ESP_VADD_S8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -2672,29 +2548,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_U16_LD_INCP_P: { unsigned Opc = RISCV::ESP_VADD_U16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -2705,28 +2579,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_U16_ST_INCP_P: { unsigned Opc = RISCV::ESP_VADD_U16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -2761,29 +2633,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_U32_LD_INCP_P: { unsigned Opc = RISCV::ESP_VADD_U32_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -2794,28 +2664,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_U32_ST_INCP_P: { unsigned Opc = RISCV::ESP_VADD_U32_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -2850,29 +2718,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_U8_LD_INCP_P: { unsigned Opc = RISCV::ESP_VADD_U8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -2883,28 +2749,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VADD_U8_ST_INCP_P: { unsigned Opc = RISCV::ESP_VADD_U8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -2959,29 +2823,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_S16_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_S16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -2992,28 +2854,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_S16_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_S16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3048,29 +2908,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_S32_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_S32_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3081,28 +2939,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_S32_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_S32_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3137,29 +2993,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_S8_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_S8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3170,28 +3024,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_S8_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_S8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3226,29 +3078,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_U16_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_U16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3259,28 +3109,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_U16_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_U16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3315,29 +3163,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_U32_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_U32_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3348,28 +3194,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_U32_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_U32_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3404,29 +3248,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_U8_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_U8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3437,28 +3279,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMAX_U8_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMAX_U8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3493,29 +3333,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_S16_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_S16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3526,28 +3364,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_S16_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_S16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3582,29 +3418,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_S32_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_S32_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3615,28 +3449,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_S32_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_S32_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3671,29 +3503,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_S8_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_S8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3704,28 +3534,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_S8_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_S8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3760,29 +3588,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_U16_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_U16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3793,28 +3619,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_U16_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_U16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3849,29 +3673,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_U32_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_U32_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3882,28 +3704,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_U32_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_U32_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -3938,29 +3758,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_U8_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_U8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -3971,28 +3789,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMIN_U8_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMIN_U8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -4027,29 +3843,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMUL_S16_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMUL_S16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -4088,28 +3902,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMUL_S16_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMUL_S16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -4172,29 +3984,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMUL_S8_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMUL_S8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -4205,28 +4015,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMUL_S8_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMUL_S8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -4261,29 +4069,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMUL_U16_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMUL_U16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -4294,28 +4100,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMUL_U16_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMUL_U16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -4350,29 +4154,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMUL_U8_LD_INCP_P: { unsigned Opc = RISCV::ESP_VMUL_U8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -4383,28 +4185,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VMUL_U8_ST_INCP_P: { unsigned Opc = RISCV::ESP_VMUL_U8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -4817,29 +4617,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_S16_LD_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_S16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -4850,28 +4648,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_S16_ST_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_S16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -4906,29 +4702,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_S32_LD_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_S32_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -4939,28 +4733,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_S32_ST_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_S32_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -4995,29 +4787,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_S8_LD_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_S8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -5028,28 +4818,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_S8_ST_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_S8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -5084,29 +4872,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_U16_LD_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_U16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -5117,28 +4903,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_U16_ST_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_U16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -5173,29 +4957,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_U32_LD_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_U32_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -5206,28 +4988,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_U32_ST_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_U32_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -5262,29 +5042,27 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_U8_LD_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_U8_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QV = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()); @@ -5295,28 +5073,26 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VSUB_U8_ST_INCP_P: { unsigned Opc = RISCV::ESP_VSUB_U8_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &QV = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &QV = MI.getOperand(5); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QUVal) @@ -5358,13 +5134,11 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_SAT_P: { unsigned Opc = RISCV::ESP_SAT; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS0 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &RSD = MI.getOperand(2); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS0 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &RSD = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS0.getReg()) .addReg(RS1.getReg()) .addReg(RSD.getReg()); @@ -6059,9 +5833,8 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_MOVX_R_CFG_P: { unsigned Opc = RISCV::ESP_MOVX_R_CFG; MachineBasicBlock *MBB = MI.getParent(); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); - BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg(), RegState::Define); MI.eraseFromParent(); return MBB; @@ -6079,8 +5852,8 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_MOVX_R_PERF_P: { unsigned Opc = RISCV::ESP_MOVX_R_PERF; MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - MachineOperand &RS1 = MI.getOperand(1); unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(R1, RegState::Define) @@ -6540,39 +6313,37 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_FFT_AMS_S16_LD_INCP_P: { unsigned Opc = RISCV::ESP_FFT_AMS_S16_LD_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QW = MI.getOperand(2); + MachineOperand &QW = MI.getOperand(3); unsigned QWVal = QW.getImm(); assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &SELECT_2 = MI.getOperand(4); - MachineOperand &QU = MI.getOperand(5); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &SELECT_2 = MI.getOperand(5); + MachineOperand &QU = MI.getOperand(6); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(6); + MachineOperand &QZ = MI.getOperand(7); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QV = MI.getOperand(7); + MachineOperand &QV = MI.getOperand(8); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QWVal) @@ -6585,39 +6356,37 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_FFT_AMS_S16_LD_INCP_UAUP_P: { unsigned Opc = RISCV::ESP_FFT_AMS_S16_LD_INCP_UAUP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " "first argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " "first argument, it must bi in range [0,7]"); - MachineOperand &QW = MI.getOperand(2); + MachineOperand &QW = MI.getOperand(3); unsigned QWVal = QW.getImm(); assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " "first argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &SELECT_2 = MI.getOperand(4); - MachineOperand &QU = MI.getOperand(5); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &SELECT_2 = MI.getOperand(5); + MachineOperand &QU = MI.getOperand(6); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " "first argument, it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(6); + MachineOperand &QZ = MI.getOperand(7); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " "first argument, it must bi in range [0,7]"); - MachineOperand &QV = MI.getOperand(7); + MachineOperand &QV = MI.getOperand(8); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " "first argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QWVal) @@ -6630,39 +6399,37 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_FFT_AMS_S16_LD_R32_DECP_P: { unsigned Opc = RISCV::ESP_FFT_AMS_S16_LD_R32_DECP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " "argument, it must bi in range [0,7]"); - MachineOperand &QW = MI.getOperand(2); + MachineOperand &QW = MI.getOperand(3); unsigned QWVal = QW.getImm(); assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &SELECT_2 = MI.getOperand(4); - MachineOperand &QU = MI.getOperand(5); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &SELECT_2 = MI.getOperand(5); + MachineOperand &QU = MI.getOperand(6); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " "argument, it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(6); + MachineOperand &QZ = MI.getOperand(7); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " "argument, it must bi in range [0,7]"); - MachineOperand &QV = MI.getOperand(7); + MachineOperand &QV = MI.getOperand(8); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QVVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QWVal) @@ -6719,15 +6486,13 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_FFT_BITREV_P: { unsigned Opc = RISCV::ESP_FFT_BITREV; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &QV = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QV = MI.getOperand(2); unsigned QVVal = QV.getImm(); assert(QVVal < 8 && "Unexpected value of esp_fft_bitrev first argument, it " "must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QVVal, RegState::Define) .addReg(RS1.getReg()) .addReg(RISCV::Q0 + QVVal); @@ -6738,31 +6503,29 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_FFT_CMUL_S16_LD_XP_P: { unsigned Opc = RISCV::ESP_FFT_CMUL_S16_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(3); - MachineOperand &SELECT_8 = MI.getOperand(4); - MachineOperand &QZ = MI.getOperand(5); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &SELECT_8 = MI.getOperand(5); + MachineOperand &QZ = MI.getOperand(6); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(6); + MachineOperand &QU = MI.getOperand(7); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -6775,27 +6538,25 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_FFT_CMUL_S16_ST_XP_P: { unsigned Opc = RISCV::ESP_FFT_CMUL_S16_ST_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QX = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_fft_cmul_s16_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(2); + MachineOperand &QY = MI.getOperand(3); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_fft_cmul_s16_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_fft_cmul_s16_st_xp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(4); - MachineOperand &SELECT_4 = MI.getOperand(5); - MachineOperand &UPD_4 = MI.getOperand(6); - MachineOperand &SELECT_8 = MI.getOperand(7); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(5); + MachineOperand &SELECT_4 = MI.getOperand(6); + MachineOperand &UPD_4 = MI.getOperand(7); + MachineOperand &SELECT_8 = MI.getOperand(8); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) @@ -6841,25 +6602,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_FFT_R2BF_S16_ST_INCP_P: { unsigned Opc = RISCV::ESP_FFT_R2BF_S16_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QX = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); unsigned QXVal = QX.getImm(); assert(QXVal < 8 && "Unexpected value of esp_fft_r2bf_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_fft_r2bf_s16_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &SELECT_4 = MI.getOperand(3); - MachineOperand &QZ = MI.getOperand(4); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_fft_r2bf_s16_st_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QXVal) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -6871,16 +6630,14 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_FFT_VST_R32_DECP_P: { unsigned Opc = RISCV::ESP_FFT_VST_R32_DECP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QU = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_fft_vst_r32_decp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &SELECT_2 = MI.getOperand(2); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_2 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QUVal) .addReg(RS1.getReg()) .addImm(SELECT_2.getImm()); @@ -6891,17 +6648,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LD_128_USAR_IP_P: { unsigned Opc = RISCV::ESP_LD_128_USAR_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_ld_128_usar_ip first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -6911,17 +6666,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LD_128_USAR_XP_P: { unsigned Opc = RISCV::ESP_LD_128_USAR_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_ld_128_usar_xp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -6931,12 +6684,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LD_XACC_IP_P: { unsigned Opc = RISCV::ESP_LD_XACC_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_8 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_8 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_8.getImm()); @@ -6946,12 +6697,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LDQA_S16_128_IP_P: { unsigned Opc = RISCV::ESP_LDQA_S16_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -6961,12 +6710,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LDQA_S16_128_XP_P: { unsigned Opc = RISCV::ESP_LDQA_S16_128_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -6976,12 +6723,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LDQA_S8_128_IP_P: { unsigned Opc = RISCV::ESP_LDQA_S8_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -6991,12 +6736,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LDQA_S8_128_XP_P: { unsigned Opc = RISCV::ESP_LDQA_S8_128_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7006,12 +6749,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LDQA_U16_128_IP_P: { unsigned Opc = RISCV::ESP_LDQA_U16_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7021,12 +6762,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LDQA_U16_128_XP_P: { unsigned Opc = RISCV::ESP_LDQA_U16_128_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7036,12 +6775,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LDQA_U8_128_IP_P: { unsigned Opc = RISCV::ESP_LDQA_U8_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7051,12 +6788,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LDQA_U8_128_XP_P: { unsigned Opc = RISCV::ESP_LDQA_U8_128_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7066,17 +6801,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDBC_16_IP_P: { unsigned Opc = RISCV::ESP_VLDBC_16_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_4 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_4 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldbc_16_ip first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_4.getImm()); @@ -7086,17 +6819,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDBC_16_XP_P: { unsigned Opc = RISCV::ESP_VLDBC_16_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldbc_16_xp first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7106,17 +6837,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDBC_32_IP_P: { unsigned Opc = RISCV::ESP_VLDBC_32_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_4 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_4 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldbc_32_ip first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_4.getImm()); @@ -7126,17 +6855,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDBC_32_XP_P: { unsigned Opc = RISCV::ESP_VLDBC_32_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldbc_32_xp first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7146,17 +6873,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDBC_8_IP_P: { unsigned Opc = RISCV::ESP_VLDBC_8_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_4 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_4 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldbc_8_ip first argument, it " "must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_4.getImm()); @@ -7166,17 +6891,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDBC_8_XP_P: { unsigned Opc = RISCV::ESP_VLDBC_8_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldbc_8_xp first argument, it " "must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7186,22 +6909,20 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDEXT_S16_IP_P: { unsigned Opc = RISCV::ESP_VLDEXT_S16_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_16_16 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_16_16 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldext_s16_ip first argument, " "it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vldext_s16_ip first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_16_16.getImm()); @@ -7211,22 +6932,20 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDEXT_S16_XP_P: { unsigned Opc = RISCV::ESP_VLDEXT_S16_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldext_s16_xp first argument, " "it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vldext_s16_xp first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7236,22 +6955,20 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDEXT_S8_IP_P: { unsigned Opc = RISCV::ESP_VLDEXT_S8_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_16_16 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_16_16 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldext_s8_ip first argument, " "it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vldext_s8_ip first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_16_16.getImm()); @@ -7261,22 +6978,20 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDEXT_S8_XP_P: { unsigned Opc = RISCV::ESP_VLDEXT_S8_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldext_s8_xp first argument, " "it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vldext_s8_xp first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7286,22 +7001,20 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDEXT_U16_IP_P: { unsigned Opc = RISCV::ESP_VLDEXT_U16_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_16_16 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_16_16 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldext_u16_ip first argument, " "it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vldext_u16_ip first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_16_16.getImm()); @@ -7311,22 +7024,20 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDEXT_U16_XP_P: { unsigned Opc = RISCV::ESP_VLDEXT_U16_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldext_u16_xp first argument, " "it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vldext_u16_xp first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7336,22 +7047,20 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDEXT_U8_IP_P: { unsigned Opc = RISCV::ESP_VLDEXT_U8_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_16_16 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_16_16 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldext_u8_ip first argument, " "it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vldext_u8_ip first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_16_16.getImm()); @@ -7361,22 +7070,20 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDEXT_U8_XP_P: { unsigned Opc = RISCV::ESP_VLDEXT_U8_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldext_u8_xp first argument, " "it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vldext_u8_xp first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7386,21 +7093,19 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLDHBC_16_INCP_P: { unsigned Opc = RISCV::ESP_VLDHBC_16_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &QU = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vldhbc_16_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QZ = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); unsigned QZVal = QZ.getImm(); assert(QZVal < 8 && "Unexpected value of esp_vldhbc_16_incp first " "argument, it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) .addReg(RISCV::Q0 + QZVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()); MI.eraseFromParent(); @@ -7409,12 +7114,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LD_QACC_H_H_128_IP_P: { unsigned Opc = RISCV::ESP_LD_QACC_H_H_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7424,12 +7127,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LD_QACC_H_L_128_IP_P: { unsigned Opc = RISCV::ESP_LD_QACC_H_L_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7439,12 +7140,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LD_QACC_L_H_128_IP_P: { unsigned Opc = RISCV::ESP_LD_QACC_L_H_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7454,12 +7153,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LD_QACC_L_L_128_IP_P: { unsigned Opc = RISCV::ESP_LD_QACC_L_L_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7469,12 +7166,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_LD_UA_STATE_IP_P: { unsigned Opc = RISCV::ESP_LD_UA_STATE_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7508,12 +7203,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_ST_QACC_H_H_128_IP_P: { unsigned Opc = RISCV::ESP_ST_QACC_H_H_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7523,12 +7216,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_ST_QACC_H_L_128_IP_P: { unsigned Opc = RISCV::ESP_ST_QACC_H_L_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7538,12 +7229,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_ST_QACC_L_H_128_IP_P: { unsigned Opc = RISCV::ESP_ST_QACC_L_H_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7553,12 +7242,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_ST_QACC_L_L_128_IP_P: { unsigned Opc = RISCV::ESP_ST_QACC_L_L_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7568,12 +7255,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_ST_UA_STATE_IP_P: { unsigned Opc = RISCV::ESP_ST_UA_STATE_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7607,17 +7292,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLD_128_IP_P: { unsigned Opc = RISCV::ESP_VLD_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_16 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vld_128_ip first argument, it " "must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7627,17 +7310,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLD_128_XP_P: { unsigned Opc = RISCV::ESP_VLD_128_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vld_128_xp first argument, it " "must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7647,17 +7328,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLD_H_64_IP_P: { unsigned Opc = RISCV::ESP_VLD_H_64_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_8 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_8 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vld_h_64_ip first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_8.getImm()); @@ -7667,17 +7346,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLD_H_64_XP_P: { unsigned Opc = RISCV::ESP_VLD_H_64_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vld_h_64_xp first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7687,17 +7364,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLD_L_64_IP_P: { unsigned Opc = RISCV::ESP_VLD_L_64_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_8 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_8 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vld_l_64_ip first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_8.getImm()); @@ -7707,17 +7382,15 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VLD_L_64_XP_P: { unsigned Opc = RISCV::ESP_VLD_L_64_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QU = MI.getOperand(2); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vld_l_64_xp first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RS1.getReg()); @@ -7727,16 +7400,14 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VST_128_IP_P: { unsigned Opc = RISCV::ESP_VST_128_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QU = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vst_128_ip first argument, it " "must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &OFFSET_256_16 = MI.getOperand(2); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_256_16 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QUVal) .addReg(RS1.getReg()) .addImm(OFFSET_256_16.getImm()); @@ -7747,16 +7418,14 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VST_128_XP_P: { unsigned Opc = RISCV::ESP_VST_128_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QU = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vst_128_xp first argument, it " "must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QUVal) .addReg(RS1.getReg()); @@ -7767,16 +7436,14 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VST_H_64_IP_P: { unsigned Opc = RISCV::ESP_VST_H_64_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QU = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vst_h_64_ip first argument, " "it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &OFFSET_256_8 = MI.getOperand(2); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_256_8 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QUVal) .addReg(RS1.getReg()) .addImm(OFFSET_256_8.getImm()); @@ -7787,16 +7454,14 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VST_H_64_XP_P: { unsigned Opc = RISCV::ESP_VST_H_64_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QU = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vst_h_64_xp first argument, " "it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QUVal) .addReg(RS1.getReg()); @@ -7807,16 +7472,14 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VST_L_64_IP_P: { unsigned Opc = RISCV::ESP_VST_L_64_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QU = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vst_l_64_ip first argument, " "it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &OFFSET_256_8 = MI.getOperand(2); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_256_8 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QUVal) .addReg(RS1.getReg()) .addImm(OFFSET_256_8.getImm()); @@ -7827,16 +7490,14 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_VST_L_64_XP_P: { unsigned Opc = RISCV::ESP_VST_L_64_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QU = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_vst_l_64_xp first argument, " "it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QUVal) .addReg(RS1.getReg()); @@ -7916,25 +7577,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_SRC_Q_LD_IP_P: { unsigned Opc = RISCV::ESP_SRC_Q_LD_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QY = MI.getOperand(0); + MachineOperand &QY = MI.getOperand(1); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_src_q_ld_ip first argument, " "it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(1); - MachineOperand &QW = MI.getOperand(2); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QW = MI.getOperand(3); unsigned QWVal = QW.getImm(); assert(QWVal < 8 && "Unexpected value of esp_src_q_ld_ip first argument, " "it must bi in range [0,7]"); - MachineOperand &OFFSET_256_16 = MI.getOperand(3); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &OFFSET_256_16 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_src_q_ld_ip first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QWVal, RegState::Define) .addReg(RISCV::Q0 + QYVal) .addReg(RS1.getReg()) @@ -7947,25 +7606,23 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_SRC_Q_LD_XP_P: { unsigned Opc = RISCV::ESP_SRC_Q_LD_XP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS2 = MI.getOperand(0); - MachineOperand &QY = MI.getOperand(1); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_src_q_ld_xp first argument, " "it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - MachineOperand &QW = MI.getOperand(3); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QW = MI.getOperand(4); unsigned QWVal = QW.getImm(); assert(QWVal < 8 && "Unexpected value of esp_src_q_ld_xp first argument, " "it must bi in range [0,7]"); - MachineOperand &QU = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); unsigned QUVal = QU.getImm(); assert(QUVal < 8 && "Unexpected value of esp_src_q_ld_xp first argument, " "it must bi in range [0,7]"); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); BuildMI(*MBB, MI, DL, TII.get(Opc)) .addReg(RISCV::Q0 + QUVal, RegState::Define) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QWVal, RegState::Define) .addReg(RS2.getReg()) .addReg(RISCV::Q0 + QYVal) @@ -8172,19 +7829,17 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_SRCQ_128_ST_INCP_P: { unsigned Opc = RISCV::ESP_SRCQ_128_ST_INCP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &QY = MI.getOperand(0); + MachineOperand &QY = MI.getOperand(1); unsigned QYVal = QY.getImm(); assert(QYVal < 8 && "Unexpected value of esp_srcq_128_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &QW = MI.getOperand(1); + MachineOperand &QW = MI.getOperand(2); unsigned QWVal = QW.getImm(); assert(QWVal < 8 && "Unexpected value of esp_srcq_128_st_incp first " "argument, it must bi in range [0,7]"); - MachineOperand &RS1 = MI.getOperand(2); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(3); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RISCV::Q0 + QYVal) .addReg(RISCV::Q0 + QWVal) .addReg(RS1.getReg()); @@ -8437,12 +8092,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_ST_S_XACC_IP_P: { unsigned Opc = RISCV::ESP_ST_S_XACC_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_8 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_8 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_8.getImm()); @@ -8452,12 +8105,10 @@ MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( case RISCV::ESP_ST_U_XACC_IP_P: { unsigned Opc = RISCV::ESP_ST_U_XACC_IP; MachineBasicBlock *MBB = MI.getParent(); - MachineOperand &RS1 = MI.getOperand(0); - MachineOperand &OFFSET_256_8 = MI.getOperand(1); - const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; - unsigned R1 = MRI.createVirtualRegister(RC); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_8 = MI.getOperand(2); BuildMI(*MBB, MI, DL, TII.get(Opc)) - .addReg(R1, RegState::Define) + .addReg(MI.getOperand(0).getReg(), RegState::Define) .addReg(RS1.getReg()) .addImm(OFFSET_256_8.getImm()); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td b/llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td index bd97a927ea98f..e89c6a304b2d2 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td @@ -103,10 +103,10 @@ def ESP_VCMULAS_S16_QACC_H_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VCMULAS_S16_QACC_H_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VCMULAS_S16_QACC_H_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vcmulas_s16_qacc_h_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vcmulas_s16_qacc_h_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vcmulas_s16_qacc_h_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VCMULAS_S16_QACC_H_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vcmulas.s16.qacc.h.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -147,10 +147,10 @@ def ESP_VCMULAS_S16_QACC_H_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VCMULAS_S16_QACC_H_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VCMULAS_S16_QACC_H_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vcmulas_s16_qacc_h_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vcmulas_s16_qacc_h_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vcmulas_s16_qacc_h_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VCMULAS_S16_QACC_L: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vcmulas.s16.qacc.l\t $qx, $qy", []> @@ -234,10 +234,10 @@ def ESP_VCMULAS_S16_QACC_L_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VCMULAS_S16_QACC_L_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VCMULAS_S16_QACC_L_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vcmulas_s16_qacc_l_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vcmulas_s16_qacc_l_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vcmulas_s16_qacc_l_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VCMULAS_S16_QACC_L_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vcmulas.s16.qacc.l.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -278,10 +278,10 @@ def ESP_VCMULAS_S16_QACC_L_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VCMULAS_S16_QACC_L_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VCMULAS_S16_QACC_L_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vcmulas_s16_qacc_l_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vcmulas_s16_qacc_l_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vcmulas_s16_qacc_l_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VCMULAS_S8_QACC_H: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vcmulas.s8.qacc.h\t $qx, $qy", []> @@ -365,10 +365,10 @@ def ESP_VCMULAS_S8_QACC_H_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VCMULAS_S8_QACC_H_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VCMULAS_S8_QACC_H_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vcmulas_s8_qacc_h_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vcmulas_s8_qacc_h_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vcmulas_s8_qacc_h_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VCMULAS_S8_QACC_H_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vcmulas.s8.qacc.h.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -409,10 +409,10 @@ def ESP_VCMULAS_S8_QACC_H_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins G let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VCMULAS_S8_QACC_H_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VCMULAS_S8_QACC_H_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vcmulas_s8_qacc_h_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vcmulas_s8_qacc_h_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vcmulas_s8_qacc_h_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VCMULAS_S8_QACC_L: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vcmulas.s8.qacc.l\t $qx, $qy", []> @@ -496,10 +496,10 @@ def ESP_VCMULAS_S8_QACC_L_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VCMULAS_S8_QACC_L_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VCMULAS_S8_QACC_L_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vcmulas_s8_qacc_l_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vcmulas_s8_qacc_l_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vcmulas_s8_qacc_l_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VCMULAS_S8_QACC_L_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vcmulas.s8.qacc.l.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -540,10 +540,10 @@ def ESP_VCMULAS_S8_QACC_L_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins G let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VCMULAS_S8_QACC_L_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VCMULAS_S8_QACC_L_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vcmulas_s8_qacc_l_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vcmulas_s8_qacc_l_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vcmulas_s8_qacc_l_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_S16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vmulas.s16.qacc\t $qx, $qy", []> @@ -628,10 +628,10 @@ def ESP_VMULAS_S16_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR: let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S16_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S16_QACC_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vmulas_s16_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_s16_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s16_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VMULAS_S16_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.s16.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -672,10 +672,10 @@ def ESP_VMULAS_S16_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPR let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S16_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S16_QACC_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_s16_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_s16_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s16_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_S16_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "esp.vmulas.s16.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> @@ -716,10 +716,10 @@ def ESP_VMULAS_S16_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR: let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S16_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S16_QACC_ST_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "!esp_vmulas_s16_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_s16_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s16_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616))]>; def ESP_VMULAS_S16_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmulas.s16.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -760,10 +760,10 @@ def ESP_VMULAS_S16_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2 let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S16_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S16_QACC_ST_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), "!esp_vmulas_s16_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_s16_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s16_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1))]>; def ESP_VMULAS_S16_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vmulas.s16.xacc\t $qx, $qy", []> @@ -848,10 +848,10 @@ def ESP_VMULAS_S16_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR: let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S16_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S16_XACC_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vmulas_s16_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_s16_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s16_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VMULAS_S16_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.s16.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -892,10 +892,10 @@ def ESP_VMULAS_S16_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPR let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S16_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S16_XACC_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_s16_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_s16_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s16_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_S16_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "esp.vmulas.s16.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> @@ -936,10 +936,10 @@ def ESP_VMULAS_S16_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR: let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S16_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S16_XACC_ST_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "!esp_vmulas_s16_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_s16_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s16_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616))]>; def ESP_VMULAS_S16_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmulas.s16.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -980,10 +980,10 @@ def ESP_VMULAS_S16_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2 let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S16_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S16_XACC_ST_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), "!esp_vmulas_s16_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_s16_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s16_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1))]>; def ESP_VMULAS_S8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vmulas.s8.qacc\t $qx, $qy", []> @@ -1068,10 +1068,10 @@ def ESP_VMULAS_S8_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S8_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S8_QACC_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vmulas_s8_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_s8_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s8_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VMULAS_S8_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.s8.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -1112,10 +1112,10 @@ def ESP_VMULAS_S8_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S8_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S8_QACC_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_s8_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_s8_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s8_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_S8_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "esp.vmulas.s8.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> @@ -1156,10 +1156,10 @@ def ESP_VMULAS_S8_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S8_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S8_QACC_ST_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "!esp_vmulas_s8_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_s8_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s8_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616))]>; def ESP_VMULAS_S8_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmulas.s8.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -1200,10 +1200,10 @@ def ESP_VMULAS_S8_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S8_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S8_QACC_ST_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), "!esp_vmulas_s8_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_s8_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s8_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1))]>; def ESP_VMULAS_S8_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vmulas.s8.xacc\t $qx, $qy", []> @@ -1288,10 +1288,10 @@ def ESP_VMULAS_S8_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S8_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S8_XACC_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vmulas_s8_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_s8_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s8_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VMULAS_S8_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.s8.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -1332,10 +1332,10 @@ def ESP_VMULAS_S8_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S8_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S8_XACC_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_s8_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_s8_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s8_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_S8_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "esp.vmulas.s8.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> @@ -1376,10 +1376,10 @@ def ESP_VMULAS_S8_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S8_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S8_XACC_ST_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "!esp_vmulas_s8_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_s8_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s8_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616))]>; def ESP_VMULAS_S8_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmulas.s8.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -1420,10 +1420,10 @@ def ESP_VMULAS_S8_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S8_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S8_XACC_ST_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), "!esp_vmulas_s8_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_s8_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s8_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1))]>; def ESP_VMULAS_U16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vmulas.u16.qacc\t $qx, $qy", []> @@ -1508,10 +1508,10 @@ def ESP_VMULAS_U16_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR: let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U16_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U16_QACC_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vmulas_u16_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_u16_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u16_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VMULAS_U16_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.u16.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -1552,10 +1552,10 @@ def ESP_VMULAS_U16_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPR let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U16_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U16_QACC_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_u16_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_u16_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u16_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_U16_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "esp.vmulas.u16.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> @@ -1596,10 +1596,10 @@ def ESP_VMULAS_U16_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR: let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U16_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U16_QACC_ST_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "!esp_vmulas_u16_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_u16_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u16_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616))]>; def ESP_VMULAS_U16_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmulas.u16.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -1640,10 +1640,10 @@ def ESP_VMULAS_U16_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2 let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U16_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U16_QACC_ST_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), "!esp_vmulas_u16_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_u16_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u16_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1))]>; def ESP_VMULAS_U16_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vmulas.u16.xacc\t $qx, $qy", []> @@ -1728,10 +1728,10 @@ def ESP_VMULAS_U16_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR: let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U16_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U16_XACC_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vmulas_u16_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_u16_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u16_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VMULAS_U16_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.u16.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -1772,10 +1772,10 @@ def ESP_VMULAS_U16_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPR let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U16_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U16_XACC_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_u16_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_u16_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u16_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_U16_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "esp.vmulas.u16.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> @@ -1816,10 +1816,10 @@ def ESP_VMULAS_U16_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR: let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U16_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U16_XACC_ST_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "!esp_vmulas_u16_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_u16_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u16_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616))]>; def ESP_VMULAS_U16_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmulas.u16.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -1860,10 +1860,10 @@ def ESP_VMULAS_U16_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2 let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U16_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U16_XACC_ST_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), "!esp_vmulas_u16_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_u16_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u16_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1))]>; def ESP_VMULAS_U8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vmulas.u8.qacc\t $qx, $qy", []> @@ -1948,10 +1948,10 @@ def ESP_VMULAS_U8_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U8_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U8_QACC_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vmulas_u8_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_u8_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u8_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VMULAS_U8_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.u8.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -1992,10 +1992,10 @@ def ESP_VMULAS_U8_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U8_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U8_QACC_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_u8_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_u8_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u8_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_U8_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "esp.vmulas.u8.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> @@ -2036,10 +2036,10 @@ def ESP_VMULAS_U8_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U8_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U8_QACC_ST_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "!esp_vmulas_u8_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_u8_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u8_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616))]>; def ESP_VMULAS_U8_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmulas.u8.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -2080,10 +2080,10 @@ def ESP_VMULAS_U8_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U8_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U8_QACC_ST_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), "!esp_vmulas_u8_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_u8_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u8_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1))]>; def ESP_VMULAS_U8_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), "esp.vmulas.u8.xacc\t $qx, $qy", []> @@ -2168,10 +2168,10 @@ def ESP_VMULAS_U8_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U8_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U8_XACC_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), "!esp_vmulas_u8_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_u8_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u8_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu))]>; def ESP_VMULAS_U8_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.u8.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -2212,10 +2212,10 @@ def ESP_VMULAS_U8_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U8_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U8_XACC_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_u8_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_u8_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u8_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_U8_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "esp.vmulas.u8.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> @@ -2256,10 +2256,10 @@ def ESP_VMULAS_U8_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U8_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U8_XACC_ST_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), "!esp_vmulas_u8_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", - [(int_riscv_esp_vmulas_u8_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u8_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616))]>; def ESP_VMULAS_U8_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmulas.u8.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> @@ -2300,10 +2300,10 @@ def ESP_VMULAS_U8_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U8_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U8_XACC_ST_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), "!esp_vmulas_u8_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", - [(int_riscv_esp_vmulas_u8_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u8_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1))]>; def ESP_VMULAS_S16_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.s16.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> @@ -2345,10 +2345,10 @@ def ESP_VMULAS_S16_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S16_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S16_QACC_LDBC_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_s16_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", - [(int_riscv_esp_vmulas_s16_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s16_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_S8_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.s8.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> @@ -2390,10 +2390,10 @@ def ESP_VMULAS_S8_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_S8_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_S8_QACC_LDBC_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_s8_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", - [(int_riscv_esp_vmulas_s8_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_s8_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_U16_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.u16.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> @@ -2435,10 +2435,10 @@ def ESP_VMULAS_U16_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U16_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U16_QACC_LDBC_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_u16_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", - [(int_riscv_esp_vmulas_u16_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u16_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VMULAS_U8_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), "esp.vmulas.u8.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> @@ -2480,10 +2480,10 @@ def ESP_VMULAS_U8_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMULAS_U8_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMULAS_U8_QACC_LDBC_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), "!esp_vmulas_u8_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", - [(int_riscv_esp_vmulas_u8_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmulas_u8_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu))]>; def ESP_VSMULAS_S16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), "esp.vsmulas.s16.qacc\t $qx, $qy, $sel16", []> @@ -2565,10 +2565,10 @@ def ESP_VSMULAS_S16_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSMULAS_S16_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSMULAS_S16_QACC_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), "!esp_vsmulas_s16_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", - [(int_riscv_esp_vsmulas_s16_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsmulas_s16_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu))]>; def ESP_VSMULAS_S8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), "esp.vsmulas.s8.qacc\t $qx, $qy, $sel16", []> @@ -2650,10 +2650,10 @@ def ESP_VSMULAS_S8_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSMULAS_S8_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSMULAS_S8_QACC_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), "!esp_vsmulas_s8_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", - [(int_riscv_esp_vsmulas_s8_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsmulas_s8_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu))]>; def ESP_VSMULAS_U16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), "esp.vsmulas.u16.qacc\t $qx, $qy, $sel16", []> @@ -2735,10 +2735,10 @@ def ESP_VSMULAS_U16_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSMULAS_U16_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSMULAS_U16_QACC_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), "!esp_vsmulas_u16_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", - [(int_riscv_esp_vsmulas_u16_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsmulas_u16_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu))]>; def ESP_VSMULAS_U8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), "esp.vsmulas.u8.qacc\t $qx, $qy, $sel16", []> @@ -2820,10 +2820,10 @@ def ESP_VSMULAS_U8_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSMULAS_U8_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSMULAS_U8_QACC_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), "!esp_vsmulas_u8_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", - [(int_riscv_esp_vsmulas_u8_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsmulas_u8_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu))]>; def ESP_CMUL_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), "esp.cmul.s16\t $qz, $qx, $qy, $sel4", []> @@ -2907,10 +2907,10 @@ def ESP_CMUL_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_CMUL_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_CMUL_S16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), "!esp_cmul_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", - [(int_riscv_esp_cmul_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_cmul_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu))]>; def ESP_CMUL_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), "esp.cmul.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> @@ -2951,10 +2951,10 @@ def ESP_CMUL_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_CMUL_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_CMUL_S16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), "!esp_cmul_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", - [(int_riscv_esp_cmul_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_cmul_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz))]>; def ESP_CMUL_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), "esp.cmul.s8\t $qz, $qx, $qy, $sel4", []> @@ -3038,10 +3038,10 @@ def ESP_CMUL_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_CMUL_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_CMUL_S8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), "!esp_cmul_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", - [(int_riscv_esp_cmul_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_cmul_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu))]>; def ESP_CMUL_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), "esp.cmul.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> @@ -3082,10 +3082,10 @@ def ESP_CMUL_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_CMUL_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_CMUL_S8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), "!esp_cmul_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", - [(int_riscv_esp_cmul_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_cmul_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz))]>; def ESP_CMUL_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), "esp.cmul.u16\t $qz, $qx, $qy, $sel4", []> @@ -3169,10 +3169,10 @@ def ESP_CMUL_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_CMUL_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_CMUL_U16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), "!esp_cmul_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", - [(int_riscv_esp_cmul_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_cmul_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu))]>; def ESP_CMUL_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), "esp.cmul.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> @@ -3213,10 +3213,10 @@ def ESP_CMUL_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_CMUL_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_CMUL_U16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), "!esp_cmul_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", - [(int_riscv_esp_cmul_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_cmul_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz))]>; def ESP_CMUL_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), "esp.cmul.u8\t $qz, $qx, $qy, $sel4", []> @@ -3300,10 +3300,10 @@ def ESP_CMUL_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_CMUL_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_CMUL_U8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), "!esp_cmul_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", - [(int_riscv_esp_cmul_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_cmul_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu))]>; def ESP_CMUL_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), "esp.cmul.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> @@ -3344,10 +3344,10 @@ def ESP_CMUL_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_CMUL_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_CMUL_U8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), "!esp_cmul_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", - [(int_riscv_esp_cmul_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_cmul_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz))]>; def ESP_MAX_S16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), "esp.max.s16.a\t $qw, $rd", []> @@ -4103,10 +4103,10 @@ def ESP_VADD_S16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_S16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vadd_s16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VADD_S16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vadd.s16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -4147,10 +4147,10 @@ def ESP_VADD_S16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_S16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vadd_s16_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VADD_S32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), "esp.vadd.s32\t $qv, $qx, $qy", []> @@ -4234,10 +4234,10 @@ def ESP_VADD_S32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_S32_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vadd_s32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VADD_S32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vadd.s32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -4278,10 +4278,10 @@ def ESP_VADD_S32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_S32_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vadd_s32_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VADD_S8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), "esp.vadd.s8\t $qv, $qx, $qy", []> @@ -4365,10 +4365,10 @@ def ESP_VADD_S8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_S8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vadd_s8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VADD_S8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vadd.s8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -4409,10 +4409,10 @@ def ESP_VADD_S8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_S8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vadd_s8_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VADD_U16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), "esp.vadd.u16\t $qv, $qx, $qy", []> @@ -4496,10 +4496,10 @@ def ESP_VADD_U16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_U16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vadd_u16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VADD_U16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vadd.u16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -4540,10 +4540,10 @@ def ESP_VADD_U16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_U16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vadd_u16_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VADD_U32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), "esp.vadd.u32\t $qv, $qx, $qy", []> @@ -4627,10 +4627,10 @@ def ESP_VADD_U32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_U32_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vadd_u32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VADD_U32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vadd.u32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -4671,10 +4671,10 @@ def ESP_VADD_U32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_U32_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vadd_u32_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VADD_U8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), "esp.vadd.u8\t $qv, $qx, $qy", []> @@ -4758,10 +4758,10 @@ def ESP_VADD_U8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_U8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vadd_u8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VADD_U8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vadd.u8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -4802,10 +4802,10 @@ def ESP_VADD_U8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VADD_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VADD_U8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vadd_u8_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vadd_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vadd_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VCLAMP_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, select_16:$sel16), "esp.vclamp.s16\t $qz, $qx, $sel16", []> @@ -4932,10 +4932,10 @@ def ESP_VMAX_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_S16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmax_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMAX_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmax.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -4976,10 +4976,10 @@ def ESP_VMAX_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_S16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmax_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMAX_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmax.s32\t $qz, $qx, $qy", []> @@ -5063,10 +5063,10 @@ def ESP_VMAX_S32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_S32_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmax_s32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMAX_S32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmax.s32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -5107,10 +5107,10 @@ def ESP_VMAX_S32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_S32_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmax_s32_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMAX_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmax.s8\t $qz, $qx, $qy", []> @@ -5194,10 +5194,10 @@ def ESP_VMAX_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_S8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmax_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMAX_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmax.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -5238,10 +5238,10 @@ def ESP_VMAX_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_S8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmax_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMAX_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmax.u16\t $qz, $qx, $qy", []> @@ -5325,10 +5325,10 @@ def ESP_VMAX_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_U16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmax_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMAX_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmax.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -5369,10 +5369,10 @@ def ESP_VMAX_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_U16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmax_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMAX_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmax.u32\t $qz, $qx, $qy", []> @@ -5456,10 +5456,10 @@ def ESP_VMAX_U32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_U32_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmax_u32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMAX_U32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmax.u32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -5500,10 +5500,10 @@ def ESP_VMAX_U32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_U32_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmax_u32_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMAX_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmax.u8\t $qz, $qx, $qy", []> @@ -5587,10 +5587,10 @@ def ESP_VMAX_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_U8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmax_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMAX_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmax.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -5631,10 +5631,10 @@ def ESP_VMAX_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMAX_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMAX_U8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmax_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmax_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmax_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMIN_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmin.s16\t $qz, $qx, $qy", []> @@ -5718,10 +5718,10 @@ def ESP_VMIN_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_S16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmin_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMIN_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmin.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -5762,10 +5762,10 @@ def ESP_VMIN_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_S16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmin_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMIN_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmin.s32\t $qz, $qx, $qy", []> @@ -5849,10 +5849,10 @@ def ESP_VMIN_S32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_S32_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmin_s32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMIN_S32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmin.s32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -5893,10 +5893,10 @@ def ESP_VMIN_S32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_S32_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmin_s32_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMIN_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmin.s8\t $qz, $qx, $qy", []> @@ -5980,10 +5980,10 @@ def ESP_VMIN_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_S8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmin_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMIN_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmin.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -6024,10 +6024,10 @@ def ESP_VMIN_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_S8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmin_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMIN_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmin.u16\t $qz, $qx, $qy", []> @@ -6111,10 +6111,10 @@ def ESP_VMIN_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_U16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmin_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMIN_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmin.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -6155,10 +6155,10 @@ def ESP_VMIN_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_U16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmin_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMIN_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmin.u32\t $qz, $qx, $qy", []> @@ -6242,10 +6242,10 @@ def ESP_VMIN_U32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_U32_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmin_u32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMIN_U32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmin.u32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -6286,10 +6286,10 @@ def ESP_VMIN_U32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_U32_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmin_u32_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMIN_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmin.u8\t $qz, $qx, $qy", []> @@ -6373,10 +6373,10 @@ def ESP_VMIN_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_U8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmin_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMIN_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmin.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -6417,10 +6417,10 @@ def ESP_VMIN_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMIN_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMIN_U8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmin_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmin_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmin_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMUL_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmul.s16\t $qz, $qx, $qy", []> @@ -6504,10 +6504,10 @@ def ESP_VMUL_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMUL_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMUL_S16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmul_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmul_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmul_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMUL_S16_S8XS8: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qx, QR:$qy), "esp.vmul.s16.s8xs8\t $qz, $qv, $qx, $qy", []> @@ -6590,10 +6590,10 @@ def ESP_VMUL_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMUL_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMUL_S16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmul_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmul_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmul_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMUL_S32_S16XS16: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qx, QR:$qy), "esp.vmul.s32.s16xs16\t $qz, $qv, $qx, $qy", []> @@ -6719,10 +6719,10 @@ def ESP_VMUL_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMUL_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMUL_S8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmul_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmul_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmul_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMUL_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmul.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -6763,10 +6763,10 @@ def ESP_VMUL_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMUL_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMUL_S8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmul_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmul_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmul_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMUL_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmul.u16\t $qz, $qx, $qy", []> @@ -6850,10 +6850,10 @@ def ESP_VMUL_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMUL_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMUL_U16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmul_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmul_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmul_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMUL_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmul.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -6894,10 +6894,10 @@ def ESP_VMUL_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMUL_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMUL_U16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmul_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmul_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmul_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VMUL_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), "esp.vmul.u8\t $qz, $qx, $qy", []> @@ -6981,10 +6981,10 @@ def ESP_VMUL_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMUL_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMUL_U8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), "!esp_vmul_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmul_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmul_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu))]>; def ESP_VMUL_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vmul.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> @@ -7025,10 +7025,10 @@ def ESP_VMUL_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VMUL_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VMUL_U8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), "!esp_vmul_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy", - [(int_riscv_esp_vmul_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vmul_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz))]>; def ESP_VPRELU_S16: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, QR:$qx, QR:$qy), "esp.vprelu.s16\t $qz, $qy, $qx, $rs1", []> @@ -7884,10 +7884,10 @@ def ESP_VSUB_S16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_S16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vsub_s16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VSUB_S16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vsub.s16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -7928,10 +7928,10 @@ def ESP_VSUB_S16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_S16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vsub_s16_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VSUB_S32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), "esp.vsub.s32\t $qv, $qx, $qy", []> @@ -8015,10 +8015,10 @@ def ESP_VSUB_S32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_S32_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vsub_s32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VSUB_S32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vsub.s32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -8059,10 +8059,10 @@ def ESP_VSUB_S32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_S32_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vsub_s32_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VSUB_S8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), "esp.vsub.s8\t $qv, $qx, $qy", []> @@ -8146,10 +8146,10 @@ def ESP_VSUB_S8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_S8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vsub_s8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VSUB_S8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vsub.s8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -8190,10 +8190,10 @@ def ESP_VSUB_S8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_S8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vsub_s8_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VSUB_U16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), "esp.vsub.u16\t $qv, $qx, $qy", []> @@ -8277,10 +8277,10 @@ def ESP_VSUB_U16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_U16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vsub_u16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VSUB_U16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vsub.u16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -8321,10 +8321,10 @@ def ESP_VSUB_U16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_U16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vsub_u16_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VSUB_U32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), "esp.vsub.u32\t $qv, $qx, $qy", []> @@ -8408,10 +8408,10 @@ def ESP_VSUB_U32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_U32_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vsub_u32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VSUB_U32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vsub.u32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -8452,10 +8452,10 @@ def ESP_VSUB_U32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_U32_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vsub_u32_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_VSUB_U8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), "esp.vsub.u8\t $qv, $qx, $qy", []> @@ -8539,10 +8539,10 @@ def ESP_VSUB_U8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_U8_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), "!esp_vsub_u8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu))]>; def ESP_VSUB_U8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), "esp.vsub.u8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> @@ -8583,10 +8583,10 @@ def ESP_VSUB_U8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VSUB_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VSUB_U8_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), "!esp_vsub_u8_st_incp_p $qu, $rs1, $qv, $qx, $qy", - [(int_riscv_esp_vsub_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vsub_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv))]>; def ESP_ADDX2: Esp32P4Inst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), "esp.addx2\t $rd, $rs1, $rs2", []> @@ -8697,10 +8697,10 @@ def ESP_SAT: Esp32P4Inst<(outs GPR:$rsdr), (ins GPR:$rs0, GPR:$rs1, GPR:$rsd), let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_SAT_P : PseudoESP32P4<(outs), (ins GPR:$rs0, GPR:$rs1, GPR:$rsd), +let usesCustomInserter = 1, Constraints = "$rsdr = $rsd" in +def ESP_SAT_P : PseudoESP32P4<(outs GPRPIE:$rsdr), (ins GPR:$rs0, GPR:$rs1, GPR:$rsd), "!esp_sat_p $rsd, $rs0, $rs1", - [(int_riscv_esp_sat GPR:$rs0, GPR:$rs1, GPR:$rsd)]>; + [(set GPRPIE:$rsdr, (int_riscv_esp_sat GPR:$rs0, GPR:$rs1, GPR:$rsd))]>; def ESP_SUBX2: Esp32P4Inst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), "esp.subx2\t $rd, $rs1, $rs2", []> @@ -10201,9 +10201,9 @@ def ESP_MOVX_R_CFG: Esp32P4Inst<(outs GPRPIE:$rd), (ins), } let usesCustomInserter = 1 in -def ESP_MOVX_R_CFG_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), +def ESP_MOVX_R_CFG_P : PseudoESP32P4<(outs GPRPIE:$rd), (ins), "!esp_movx_r_cfg_p $rd", - [(int_riscv_esp_movx_r_cfg GPRPIE:$rd)]>; + [(set GPRPIE:$rd, (int_riscv_esp_movx_r_cfg))]>; def ESP_MOVX_R_FFT_BIT_WIDTH: Esp32P4Inst<(outs GPRPIE:$rd), (ins), "esp.movx.r.fft.bit.width\t $rd", []> @@ -11625,10 +11625,10 @@ def ESP_FFT_AMS_S16_LD_INCP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRPIE:$r let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_FFT_AMS_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_FFT_AMS_S16_LD_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), "!esp_fft_ams_s16_ld_incp_p $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", - [(int_riscv_esp_fft_ams_s16_ld_incp timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_fft_ams_s16_ld_incp timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv))]>; def ESP_FFT_AMS_S16_LD_INCP_UAUP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qw, GPRPIE:$rs1, select_2:$sel2), "esp.fft.ams.s16.ld.incp.uaup\t $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", []> @@ -11669,10 +11669,10 @@ def ESP_FFT_AMS_S16_LD_INCP_UAUP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_FFT_AMS_S16_LD_INCP_UAUP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_FFT_AMS_S16_LD_INCP_UAUP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), "!esp_fft_ams_s16_ld_incp_uaup_p $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", - [(int_riscv_esp_fft_ams_s16_ld_incp_uaup timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_fft_ams_s16_ld_incp_uaup timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv))]>; def ESP_FFT_AMS_S16_LD_R32_DECP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qw, GPRPIE:$rs1, select_2:$sel2), "esp.fft.ams.s16.ld.r32.decp\t $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", []> @@ -11713,10 +11713,10 @@ def ESP_FFT_AMS_S16_LD_R32_DECP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRPI let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_FFT_AMS_S16_LD_R32_DECP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_FFT_AMS_S16_LD_R32_DECP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), "!esp_fft_ams_s16_ld_r32_decp_p $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", - [(int_riscv_esp_fft_ams_s16_ld_r32_decp timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_fft_ams_s16_ld_r32_decp timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv))]>; def ESP_FFT_AMS_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r, GPRPIE:$rs2r), (ins QR:$qx, QR:$qy, QR:$qw, QR:$qu, GPRPIE:$rs1, GPRPIE:$rs2, select_2:$sel2), "esp.fft.ams.s16.st.incp\t $qu, $qz, $rs2, $rs1, $qx, $qw, $qy, $sel2", []> @@ -11806,10 +11806,10 @@ def ESP_FFT_BITREV: Esp32P4Inst<(outs GPRPIE:$rs1r, QR:$qvr), (ins GPRPIE:$rs1, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_FFT_BITREV_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qv), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_FFT_BITREV_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, imm8:$qv), "!esp_fft_bitrev_p $qv, $rs1", - [(int_riscv_esp_fft_bitrev GPRPIE:$rs1, timm:$qv)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_fft_bitrev GPRPIE:$rs1, timm:$qv))]>; def ESP_FFT_CMUL_S16_LD_XP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1, select_8:$sel8), "esp.fft.cmul.s16.ld.xp\t $qu, $rs1, $rs2, $qz, $qy, $qx, $sel8", []> @@ -11849,10 +11849,10 @@ def ESP_FFT_CMUL_S16_LD_XP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (in let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_FFT_CMUL_S16_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_8:$sel8, imm8:$qz, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_FFT_CMUL_S16_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_8:$sel8, imm8:$qz, imm8:$qu), "!esp_fft_cmul_s16_ld_xp_p $qu, $rs1, $rs2, $qz, $qy, $qx, $sel8", - [(int_riscv_esp_fft_cmul_s16_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel8, timm:$qz, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_fft_cmul_s16_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel8, timm:$qz, timm:$qu))]>; def ESP_FFT_CMUL_S16_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4, select_4:$upd4, select_8:$sel8), "esp.fft.cmul.s16.st.xp\t $qy, $qx, $qu, $rs1, $rs2, $sel8, $upd4, $sel4", []> @@ -11893,10 +11893,10 @@ def ESP_FFT_CMUL_S16_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, Q let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_FFT_CMUL_S16_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, select_4:$upd4, select_8:$sel8), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_FFT_CMUL_S16_ST_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, select_4:$upd4, select_8:$sel8), "!esp_fft_cmul_s16_st_xp_p $qy, $qx, $qu, $rs1, $rs2, $sel8, $upd4, $sel4", - [(int_riscv_esp_fft_cmul_s16_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$upd4, timm:$sel8)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_fft_cmul_s16_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$upd4, timm:$sel8))]>; def ESP_FFT_R2BF_S16: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qx, QR:$qy, select_2:$sel2), "esp.fft.r2bf.s16\t $qz, $qv, $qx, $qy, $sel2", []> @@ -11981,10 +11981,10 @@ def ESP_FFT_R2BF_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_FFT_R2BF_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_FFT_R2BF_S16_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), "!esp_fft_r2bf_s16_st_incp_p $qz, $qx, $qy, $rs1, $sel4", - [(int_riscv_esp_fft_r2bf_s16_st_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_fft_r2bf_s16_st_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz))]>; def ESP_FFT_VST_R32_DECP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, select_2:$sel2), "esp.fft.vst.r32.decp\t $qu, $rs1, $sel2", []> @@ -12029,10 +12029,10 @@ def ESP_FFT_VST_R32_DECP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_FFT_VST_R32_DECP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, select_2:$sel2), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_FFT_VST_R32_DECP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qu, GPRPIE:$rs1, select_2:$sel2), "!esp_fft_vst_r32_decp_p $qu, $rs1, $sel2", - [(int_riscv_esp_fft_vst_r32_decp timm:$qu, GPRPIE:$rs1, timm:$sel2)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_fft_vst_r32_decp timm:$qu, GPRPIE:$rs1, timm:$sel2))]>; def ESP_LD_128_USAR_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.ld.128.usar.ip\t $qu, $rs1, $off25616", []> @@ -12072,10 +12072,10 @@ def ESP_LD_128_USAR_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LD_128_USAR_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LD_128_USAR_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616, imm8:$qu), "!esp_ld_128_usar_ip_p $qu, $rs1, $off25616", - [(int_riscv_esp_ld_128_usar_ip GPRPIE:$rs1, timm:$off25616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ld_128_usar_ip GPRPIE:$rs1, timm:$off25616, timm:$qu))]>; def ESP_LD_128_USAR_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.ld.128.usar.xp\t $qu, $rs1, $rs2", []> @@ -12118,10 +12118,10 @@ def ESP_LD_128_USAR_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LD_128_USAR_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LD_128_USAR_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), "!esp_ld_128_usar_xp_p $qu, $rs1, $rs2", - [(int_riscv_esp_ld_128_usar_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ld_128_usar_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu))]>; def ESP_LD_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), "esp.ld.xacc.ip\t $rs1, $off2568", []> @@ -12162,10 +12162,10 @@ def ESP_LD_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_25 let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LD_XACC_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LD_XACC_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), "!esp_ld_xacc_ip_p $rs1, $off2568", - [(int_riscv_esp_ld_xacc_ip GPRPIE:$rs1, timm:$off2568)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ld_xacc_ip GPRPIE:$rs1, timm:$off2568))]>; def ESP_LDQA_S16_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.ldqa.s16.128.ip\t $rs1, $off25616", []> @@ -12205,10 +12205,10 @@ def ESP_LDQA_S16_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offs let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LDQA_S16_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LDQA_S16_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_ldqa_s16_128_ip_p $rs1, $off25616", - [(int_riscv_esp_ldqa_s16_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ldqa_s16_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_LDQA_S16_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.ldqa.s16.128.xp\t $rs1, $rs2", []> @@ -12252,10 +12252,10 @@ def ESP_LDQA_S16_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LDQA_S16_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LDQA_S16_128_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "!esp_ldqa_s16_128_xp_p $rs1, $rs2", - [(int_riscv_esp_ldqa_s16_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ldqa_s16_128_xp GPRPIE:$rs2, GPRPIE:$rs1))]>; def ESP_LDQA_S8_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.ldqa.s8.128.ip\t $rs1, $off25616", []> @@ -12295,10 +12295,10 @@ def ESP_LDQA_S8_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offse let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LDQA_S8_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LDQA_S8_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_ldqa_s8_128_ip_p $rs1, $off25616", - [(int_riscv_esp_ldqa_s8_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ldqa_s8_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_LDQA_S8_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.ldqa.s8.128.xp\t $rs1, $rs2", []> @@ -12342,10 +12342,10 @@ def ESP_LDQA_S8_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPI let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LDQA_S8_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LDQA_S8_128_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "!esp_ldqa_s8_128_xp_p $rs1, $rs2", - [(int_riscv_esp_ldqa_s8_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ldqa_s8_128_xp GPRPIE:$rs2, GPRPIE:$rs1))]>; def ESP_LDQA_U16_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.ldqa.u16.128.ip\t $rs1, $off25616", []> @@ -12385,10 +12385,10 @@ def ESP_LDQA_U16_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offs let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LDQA_U16_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LDQA_U16_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_ldqa_u16_128_ip_p $rs1, $off25616", - [(int_riscv_esp_ldqa_u16_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ldqa_u16_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_LDQA_U16_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.ldqa.u16.128.xp\t $rs1, $rs2", []> @@ -12432,10 +12432,10 @@ def ESP_LDQA_U16_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LDQA_U16_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LDQA_U16_128_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "!esp_ldqa_u16_128_xp_p $rs1, $rs2", - [(int_riscv_esp_ldqa_u16_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ldqa_u16_128_xp GPRPIE:$rs2, GPRPIE:$rs1))]>; def ESP_LDQA_U8_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.ldqa.u8.128.ip\t $rs1, $off25616", []> @@ -12475,10 +12475,10 @@ def ESP_LDQA_U8_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offse let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LDQA_U8_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LDQA_U8_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_ldqa_u8_128_ip_p $rs1, $off25616", - [(int_riscv_esp_ldqa_u8_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ldqa_u8_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_LDQA_U8_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.ldqa.u8.128.xp\t $rs1, $rs2", []> @@ -12522,10 +12522,10 @@ def ESP_LDQA_U8_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPI let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LDQA_U8_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LDQA_U8_128_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "!esp_ldqa_u8_128_xp_p $rs1, $rs2", - [(int_riscv_esp_ldqa_u8_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ldqa_u8_128_xp GPRPIE:$rs2, GPRPIE:$rs1))]>; def ESP_VLDBC_16_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564), "esp.vldbc.16.ip\t $qu, $rs1, $off2564", []> @@ -12565,10 +12565,10 @@ def ESP_VLDBC_16_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDBC_16_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDBC_16_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), "!esp_vldbc_16_ip_p $qu, $rs1, $off2564", - [(int_riscv_esp_vldbc_16_ip GPRPIE:$rs1, timm:$off2564, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldbc_16_ip GPRPIE:$rs1, timm:$off2564, timm:$qu))]>; def ESP_VLDBC_16_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.vldbc.16.xp\t $qu, $rs1, $rs2", []> @@ -12611,10 +12611,10 @@ def ESP_VLDBC_16_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDBC_16_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDBC_16_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), "!esp_vldbc_16_xp_p $qu, $rs1, $rs2", - [(int_riscv_esp_vldbc_16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldbc_16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu))]>; def ESP_VLDBC_32_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564), "esp.vldbc.32.ip\t $qu, $rs1, $off2564", []> @@ -12654,10 +12654,10 @@ def ESP_VLDBC_32_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDBC_32_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDBC_32_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), "!esp_vldbc_32_ip_p $qu, $rs1, $off2564", - [(int_riscv_esp_vldbc_32_ip GPRPIE:$rs1, timm:$off2564, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldbc_32_ip GPRPIE:$rs1, timm:$off2564, timm:$qu))]>; def ESP_VLDBC_32_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.vldbc.32.xp\t $qu, $rs1, $rs2", []> @@ -12700,10 +12700,10 @@ def ESP_VLDBC_32_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDBC_32_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDBC_32_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), "!esp_vldbc_32_xp_p $qu, $rs1, $rs2", - [(int_riscv_esp_vldbc_32_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldbc_32_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu))]>; def ESP_VLDBC_8_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564), "esp.vldbc.8.ip\t $qu, $rs1, $off2564", []> @@ -12743,10 +12743,10 @@ def ESP_VLDBC_8_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDBC_8_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDBC_8_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), "!esp_vldbc_8_ip_p $qu, $rs1, $off2564", - [(int_riscv_esp_vldbc_8_ip GPRPIE:$rs1, timm:$off2564, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldbc_8_ip GPRPIE:$rs1, timm:$off2564, timm:$qu))]>; def ESP_VLDBC_8_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.vldbc.8.xp\t $qu, $rs1, $rs2", []> @@ -12789,10 +12789,10 @@ def ESP_VLDBC_8_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, G let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDBC_8_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDBC_8_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), "!esp_vldbc_8_xp_p $qu, $rs1, $rs2", - [(int_riscv_esp_vldbc_8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldbc_8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu))]>; def ESP_VLDEXT_S16_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), "esp.vldext.s16.ip\t $qu, $qz, $rs1, $off1616", []> @@ -12833,10 +12833,10 @@ def ESP_VLDEXT_S16_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPR let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDEXT_S16_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDEXT_S16_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), "!esp_vldext_s16_ip_p $qu, $qz, $rs1, $off1616", - [(int_riscv_esp_vldext_s16_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldext_s16_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz))]>; def ESP_VLDEXT_S16_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.vldext.s16.xp\t $qu, $qz, $rs1, $rs2", []> @@ -12878,10 +12878,10 @@ def ESP_VLDEXT_S16_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPR let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDEXT_S16_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDEXT_S16_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), "!esp_vldext_s16_xp_p $qu, $qz, $rs1, $rs2", - [(int_riscv_esp_vldext_s16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldext_s16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz))]>; def ESP_VLDEXT_S8_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), "esp.vldext.s8.ip\t $qu, $qz, $rs1, $off1616", []> @@ -12922,10 +12922,10 @@ def ESP_VLDEXT_S8_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDEXT_S8_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDEXT_S8_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), "!esp_vldext_s8_ip_p $qu, $qz, $rs1, $off1616", - [(int_riscv_esp_vldext_s8_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldext_s8_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz))]>; def ESP_VLDEXT_S8_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.vldext.s8.xp\t $qu, $qz, $rs1, $rs2", []> @@ -12967,10 +12967,10 @@ def ESP_VLDEXT_S8_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDEXT_S8_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDEXT_S8_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), "!esp_vldext_s8_xp_p $qu, $qz, $rs1, $rs2", - [(int_riscv_esp_vldext_s8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldext_s8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz))]>; def ESP_VLDEXT_U16_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), "esp.vldext.u16.ip\t $qu, $qz, $rs1, $off1616", []> @@ -13011,10 +13011,10 @@ def ESP_VLDEXT_U16_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPR let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDEXT_U16_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDEXT_U16_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), "!esp_vldext_u16_ip_p $qu, $qz, $rs1, $off1616", - [(int_riscv_esp_vldext_u16_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldext_u16_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz))]>; def ESP_VLDEXT_U16_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.vldext.u16.xp\t $qu, $qz, $rs1, $rs2", []> @@ -13056,10 +13056,10 @@ def ESP_VLDEXT_U16_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPR let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDEXT_U16_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDEXT_U16_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), "!esp_vldext_u16_xp_p $qu, $qz, $rs1, $rs2", - [(int_riscv_esp_vldext_u16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldext_u16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz))]>; def ESP_VLDEXT_U8_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), "esp.vldext.u8.ip\t $qu, $qz, $rs1, $off1616", []> @@ -13100,10 +13100,10 @@ def ESP_VLDEXT_U8_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDEXT_U8_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDEXT_U8_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), "!esp_vldext_u8_ip_p $qu, $qz, $rs1, $off1616", - [(int_riscv_esp_vldext_u8_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldext_u8_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz))]>; def ESP_VLDEXT_U8_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.vldext.u8.xp\t $qu, $qz, $rs1, $rs2", []> @@ -13145,10 +13145,10 @@ def ESP_VLDEXT_U8_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDEXT_U8_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDEXT_U8_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), "!esp_vldext_u8_xp_p $qu, $qz, $rs1, $rs2", - [(int_riscv_esp_vldext_u8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldext_u8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz))]>; def ESP_VLDHBC_16_INCP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1), "esp.vldhbc.16.incp\t $qu, $qz, $rs1", []> @@ -13191,10 +13191,10 @@ def ESP_VLDHBC_16_INCP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLDHBC_16_INCP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qu, imm8:$qz), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLDHBC_16_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, imm8:$qu, imm8:$qz), "!esp_vldhbc_16_incp_p $qu, $qz, $rs1", - [(int_riscv_esp_vldhbc_16_incp GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vldhbc_16_incp GPRPIE:$rs1, timm:$qu, timm:$qz))]>; def ESP_LD_QACC_H_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.ld.qacc.h.h.128.ip\t $rs1, $off25616", []> @@ -13234,10 +13234,10 @@ def ESP_LD_QACC_H_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LD_QACC_H_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LD_QACC_H_H_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_ld_qacc_h_h_128_ip_p $rs1, $off25616", - [(int_riscv_esp_ld_qacc_h_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ld_qacc_h_h_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_LD_QACC_H_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.ld.qacc.h.l.128.ip\t $rs1, $off25616", []> @@ -13277,10 +13277,10 @@ def ESP_LD_QACC_H_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LD_QACC_H_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LD_QACC_H_L_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_ld_qacc_h_l_128_ip_p $rs1, $off25616", - [(int_riscv_esp_ld_qacc_h_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ld_qacc_h_l_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_LD_QACC_L_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.ld.qacc.l.h.128.ip\t $rs1, $off25616", []> @@ -13320,10 +13320,10 @@ def ESP_LD_QACC_L_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LD_QACC_L_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LD_QACC_L_H_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_ld_qacc_l_h_128_ip_p $rs1, $off25616", - [(int_riscv_esp_ld_qacc_l_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ld_qacc_l_h_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_LD_QACC_L_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.ld.qacc.l.l.128.ip\t $rs1, $off25616", []> @@ -13363,10 +13363,10 @@ def ESP_LD_QACC_L_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LD_QACC_L_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LD_QACC_L_L_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_ld_qacc_l_l_128_ip_p $rs1, $off25616", - [(int_riscv_esp_ld_qacc_l_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ld_qacc_l_l_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_LD_UA_STATE_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.ld.ua.state.ip\t $rs1, $off25616", []> @@ -13407,10 +13407,10 @@ def ESP_LD_UA_STATE_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offse let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_LD_UA_STATE_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_LD_UA_STATE_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_ld_ua_state_ip_p $rs1, $off25616", - [(int_riscv_esp_ld_ua_state_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_ld_ua_state_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_LDXQ_32: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, QR:$qw, select_4:$sel4, select_8:$sel8), "esp.ldxq.32\t $qu, $qw, $rs1, $sel4, $sel8", []> @@ -13493,10 +13493,10 @@ def ESP_ST_QACC_H_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_ST_QACC_H_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_ST_QACC_H_H_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_st_qacc_h_h_128_ip_p $rs1, $off25616", - [(int_riscv_esp_st_qacc_h_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_st_qacc_h_h_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_ST_QACC_H_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.st.qacc.h.l.128.ip\t $rs1, $off25616", []> @@ -13536,10 +13536,10 @@ def ESP_ST_QACC_H_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_ST_QACC_H_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_ST_QACC_H_L_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_st_qacc_h_l_128_ip_p $rs1, $off25616", - [(int_riscv_esp_st_qacc_h_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_st_qacc_h_l_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_ST_QACC_L_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.st.qacc.l.h.128.ip\t $rs1, $off25616", []> @@ -13579,10 +13579,10 @@ def ESP_ST_QACC_L_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_ST_QACC_L_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_ST_QACC_L_H_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_st_qacc_l_h_128_ip_p $rs1, $off25616", - [(int_riscv_esp_st_qacc_l_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_st_qacc_l_h_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_ST_QACC_L_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.st.qacc.l.l.128.ip\t $rs1, $off25616", []> @@ -13622,10 +13622,10 @@ def ESP_ST_QACC_L_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_ST_QACC_L_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_ST_QACC_L_L_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_st_qacc_l_l_128_ip_p $rs1, $off25616", - [(int_riscv_esp_st_qacc_l_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_st_qacc_l_l_128_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_ST_UA_STATE_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "esp.st.ua.state.ip\t $rs1, $off25616", []> @@ -13666,10 +13666,10 @@ def ESP_ST_UA_STATE_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offse let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_ST_UA_STATE_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_ST_UA_STATE_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), "!esp_st_ua_state_ip_p $rs1, $off25616", - [(int_riscv_esp_st_ua_state_ip GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_st_ua_state_ip GPRPIE:$rs1, timm:$off25616))]>; def ESP_STXQ_32: Esp32P4Inst<(outs), (ins GPRPIE:$rs1, QR:$qw, QR:$qu, select_4:$sel4, select_8:$sel8), "esp.stxq.32\t $qu, $qw, $rs1, $sel4, $sel8", []> @@ -13752,10 +13752,10 @@ def ESP_VLD_128_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLD_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLD_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616, imm8:$qu), "!esp_vld_128_ip_p $qu, $rs1, $off25616", - [(int_riscv_esp_vld_128_ip GPRPIE:$rs1, timm:$off25616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vld_128_ip GPRPIE:$rs1, timm:$off25616, timm:$qu))]>; def ESP_VLD_128_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.vld.128.xp\t $qu, $rs1, $rs2", []> @@ -13798,10 +13798,10 @@ def ESP_VLD_128_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, G let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLD_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLD_128_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), "!esp_vld_128_xp_p $qu, $rs1, $rs2", - [(int_riscv_esp_vld_128_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vld_128_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu))]>; def ESP_VLD_H_64_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), "esp.vld.h.64.ip\t $qu, $rs1, $off2568", []> @@ -13841,10 +13841,10 @@ def ESP_VLD_H_64_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLD_H_64_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLD_H_64_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568, imm8:$qu), "!esp_vld_h_64_ip_p $qu, $rs1, $off2568", - [(int_riscv_esp_vld_h_64_ip GPRPIE:$rs1, timm:$off2568, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vld_h_64_ip GPRPIE:$rs1, timm:$off2568, timm:$qu))]>; def ESP_VLD_H_64_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.vld.h.64.xp\t $qu, $rs1, $rs2", []> @@ -13887,10 +13887,10 @@ def ESP_VLD_H_64_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLD_H_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLD_H_64_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), "!esp_vld_h_64_xp_p $qu, $rs1, $rs2", - [(int_riscv_esp_vld_h_64_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vld_h_64_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu))]>; def ESP_VLD_L_64_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), "esp.vld.l.64.ip\t $qu, $rs1, $off2568", []> @@ -13930,10 +13930,10 @@ def ESP_VLD_L_64_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLD_L_64_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLD_L_64_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568, imm8:$qu), "!esp_vld_l_64_ip_p $qu, $rs1, $off2568", - [(int_riscv_esp_vld_l_64_ip GPRPIE:$rs1, timm:$off2568, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vld_l_64_ip GPRPIE:$rs1, timm:$off2568, timm:$qu))]>; def ESP_VLD_L_64_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), "esp.vld.l.64.xp\t $qu, $rs1, $rs2", []> @@ -13976,10 +13976,10 @@ def ESP_VLD_L_64_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VLD_L_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VLD_L_64_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), "!esp_vld_l_64_xp_p $qu, $rs1, $rs2", - [(int_riscv_esp_vld_l_64_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vld_l_64_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu))]>; def ESP_VST_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, offset_256_16:$off25616), "esp.vst.128.ip\t $qu, $rs1, $off25616", []> @@ -14019,10 +14019,10 @@ def ESP_VST_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, o let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VST_128_IP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, offset_256_16:$off25616), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VST_128_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qu, GPRPIE:$rs1, offset_256_16:$off25616), "!esp_vst_128_ip_p $qu, $rs1, $off25616", - [(int_riscv_esp_vst_128_ip timm:$qu, GPRPIE:$rs1, timm:$off25616)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vst_128_ip timm:$qu, GPRPIE:$rs1, timm:$off25616))]>; def ESP_VST_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, GPRPIE:$rs1), "esp.vst.128.xp\t $qu, $rs1, $rs2", []> @@ -14065,10 +14065,10 @@ def ESP_VST_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, G let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VST_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VST_128_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), "!esp_vst_128_xp_p $qu, $rs1, $rs2", - [(int_riscv_esp_vst_128_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vst_128_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1))]>; def ESP_VST_H_64_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, offset_256_8:$off2568), "esp.vst.h.64.ip\t $qu, $rs1, $off2568", []> @@ -14108,10 +14108,10 @@ def ESP_VST_H_64_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VST_H_64_IP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, offset_256_8:$off2568), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VST_H_64_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qu, GPRPIE:$rs1, offset_256_8:$off2568), "!esp_vst_h_64_ip_p $qu, $rs1, $off2568", - [(int_riscv_esp_vst_h_64_ip timm:$qu, GPRPIE:$rs1, timm:$off2568)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vst_h_64_ip timm:$qu, GPRPIE:$rs1, timm:$off2568))]>; def ESP_VST_H_64_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, GPRPIE:$rs1), "esp.vst.h.64.xp\t $qu, $rs1, $rs2", []> @@ -14154,10 +14154,10 @@ def ESP_VST_H_64_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VST_H_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VST_H_64_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), "!esp_vst_h_64_xp_p $qu, $rs1, $rs2", - [(int_riscv_esp_vst_h_64_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vst_h_64_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1))]>; def ESP_VST_L_64_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, offset_256_8:$off2568), "esp.vst.l.64.ip\t $qu, $rs1, $off2568", []> @@ -14197,10 +14197,10 @@ def ESP_VST_L_64_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VST_L_64_IP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, offset_256_8:$off2568), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VST_L_64_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qu, GPRPIE:$rs1, offset_256_8:$off2568), "!esp_vst_l_64_ip_p $qu, $rs1, $off2568", - [(int_riscv_esp_vst_l_64_ip timm:$qu, GPRPIE:$rs1, timm:$off2568)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vst_l_64_ip timm:$qu, GPRPIE:$rs1, timm:$off2568))]>; def ESP_VST_L_64_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, GPRPIE:$rs1), "esp.vst.l.64.xp\t $qu, $rs1, $rs2", []> @@ -14243,10 +14243,10 @@ def ESP_VST_L_64_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_VST_L_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_VST_L_64_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), "!esp_vst_l_64_xp_p $qu, $rs1, $rs2", - [(int_riscv_esp_vst_l_64_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_vst_l_64_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1))]>; def ESP_SLCI_2Q: Esp32P4Inst<(outs QR:$qyr, QR:$qwr), (ins QR:$qy, QR:$qw, select_16:$sel16), "esp.slci.2q\t $qy, $qw, $sel16", []> @@ -14425,10 +14425,10 @@ def ESP_SRC_Q_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r, QR:$qwr), (ins QR:$ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_SRC_Q_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qy, GPRPIE:$rs1, imm8:$qw, offset_256_16:$off25616, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_SRC_Q_LD_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qy, GPRPIE:$rs1, imm8:$qw, offset_256_16:$off25616, imm8:$qu), "!esp_src_q_ld_ip_p $qu, $rs1, $off25616, $qw, $qy", - [(int_riscv_esp_src_q_ld_ip timm:$qy, GPRPIE:$rs1, timm:$qw, timm:$off25616, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_src_q_ld_ip timm:$qy, GPRPIE:$rs1, timm:$qw, timm:$off25616, timm:$qu))]>; def ESP_SRC_Q_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r, QR:$qwr), (ins GPRPIE:$rs2, QR:$qy, GPRPIE:$rs1, QR:$qw), "esp.src.q.ld.xp\t $qu, $rs1, $rs2, $qw, $qy", []> @@ -14471,10 +14471,10 @@ def ESP_SRC_Q_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r, QR:$qwr), (ins GPRP let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_SRC_Q_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qy, GPRPIE:$rs1, imm8:$qw, imm8:$qu), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_SRC_Q_LD_XP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, imm8:$qy, GPRPIE:$rs1, imm8:$qw, imm8:$qu), "!esp_src_q_ld_xp_p $qu, $rs1, $rs2, $qw, $qy", - [(int_riscv_esp_src_q_ld_xp GPRPIE:$rs2, timm:$qy, GPRPIE:$rs1, timm:$qw, timm:$qu)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_src_q_ld_xp GPRPIE:$rs2, timm:$qy, GPRPIE:$rs1, timm:$qw, timm:$qu))]>; def ESP_SRC_Q_QUP: Esp32P4Inst<(outs QR:$qz, QR:$qwr), (ins QR:$qy, QR:$qw), "esp.src.q.qup\t $qz, $qw, $qy", []> @@ -14977,10 +14977,10 @@ def ESP_SRCQ_128_ST_INCP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qy, QR:$qw, let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_SRCQ_128_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, GPRPIE:$rs1), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_SRCQ_128_ST_INCP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins imm8:$qy, imm8:$qw, GPRPIE:$rs1), "!esp_srcq_128_st_incp_p $qw, $qy, $rs1", - [(int_riscv_esp_srcq_128_st_incp timm:$qy, timm:$qw, GPRPIE:$rs1)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_srcq_128_st_incp timm:$qy, timm:$qw, GPRPIE:$rs1))]>; def ESP_SRCXXP_2Q: Esp32P4Inst<(outs QR:$qyr, QR:$qwr), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qy, QR:$qw), "esp.srcxxp.2q\t $qy, $qw, $rs1, $rs2", []> @@ -15552,10 +15552,10 @@ def ESP_ST_S_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_ST_S_XACC_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_ST_S_XACC_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), "!esp_st_s_xacc_ip_p $rs1, $off2568", - [(int_riscv_esp_st_s_xacc_ip GPRPIE:$rs1, timm:$off2568)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_st_s_xacc_ip GPRPIE:$rs1, timm:$off2568))]>; def ESP_ST_U_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), "esp.st.u.xacc.ip\t $rs1, $off2568", []> @@ -15596,8 +15596,8 @@ def ESP_ST_U_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_ let Inst{0} = 1; } -let usesCustomInserter = 1 in -def ESP_ST_U_XACC_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568), +let usesCustomInserter = 1, Constraints = "$rs1r = $rs1" in +def ESP_ST_U_XACC_IP_P : PseudoESP32P4<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), "!esp_st_u_xacc_ip_p $rs1, $off2568", - [(int_riscv_esp_st_u_xacc_ip GPRPIE:$rs1, timm:$off2568)]>; + [(set GPRPIE:$rs1r, (int_riscv_esp_st_u_xacc_ip GPRPIE:$rs1, timm:$off2568))]>; diff --git a/llvm/test/CodeGen/RISCV/esp32p4.ll b/llvm/test/CodeGen/RISCV/esp32p4.ll index a30afb429845e..d6d174a73d193 100644 --- a/llvm/test/CodeGen/RISCV/esp32p4.ll +++ b/llvm/test/CodeGen/RISCV/esp32p4.ll @@ -14,1001 +14,1003 @@ define void @test(){ ; CHECK-NEXT: .cfi_offset s10, -8 ; CHECK-NEXT: .cfi_offset s11, -4 ; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vld.128.ip q0, a0, 784 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vld.128.ip q1, a0, 784 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vld.128.ip q2, a0, 784 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vld.128.ip q3, a0, 784 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vld.128.ip q4, a0, 784 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vld.128.ip q5, a0, 784 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vld.128.ip q6, a0, 784 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vld.128.ip q7, a0, 784 -; CHECK-NEXT: esp.vcmulas.s16.qacc.h q0, q4 +; CHECK-NEXT: esp.vld.128.ip q0, a0, 496 +; CHECK-NEXT: esp.vld.128.ip q1, a0, 496 +; CHECK-NEXT: esp.vld.128.ip q2, a0, 496 +; CHECK-NEXT: esp.vld.128.ip q3, a0, 496 +; CHECK-NEXT: esp.vld.128.ip q4, a0, 496 +; CHECK-NEXT: esp.vld.128.ip q5, a0, 496 +; CHECK-NEXT: esp.vld.128.ip q6, a0, 496 +; CHECK-NEXT: esp.vld.128.ip q7, a0, 496 +; CHECK-NEXT: esp.vcmulas.s16.qacc.h q0, q6 ; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: esp.vcmulas.s16.qacc.h.ld.ip q1, a0, -48, q6, q1 -; CHECK-NEXT: li s9, 12 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.vcmulas.s16.qacc.h.ld.xp q1, a0, s9, q2, q7 -; CHECK-NEXT: esp.vcmulas.s16.qacc.l q7, q6 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vcmulas.s16.qacc.l.ld.ip q7, a0, 48, q7, q0 -; CHECK-NEXT: li t4, 14 -; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: esp.vcmulas.s16.qacc.l.ld.xp q1, a0, t4, q2, q7 -; CHECK-NEXT: esp.vcmulas.s8.qacc.h q1, q1 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.vcmulas.s8.qacc.h.ld.ip q4, a0, 32, q1, q6 -; CHECK-NEXT: li s11, 7 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.vcmulas.s8.qacc.h.ld.xp q6, a0, s11, q3, q2 -; CHECK-NEXT: esp.vcmulas.s8.qacc.l q4, q5 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.vcmulas.s8.qacc.l.ld.ip q4, a0, -48, q2, q5 -; CHECK-NEXT: li a0, 14 -; CHECK-NEXT: esp.vcmulas.s8.qacc.l.ld.xp q7, a0, s11, q6, q3 -; CHECK-NEXT: esp.vmulas.s16.qacc q4, q2 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.vmulas.s16.qacc.ld.ip q1, a0, 96, q5, q7 -; CHECK-NEXT: li t3, 3 +; CHECK-NEXT: esp.vcmulas.s16.qacc.h.ld.ip q2, a0, 0, q0, q3 +; CHECK-NEXT: li t6, 7 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vcmulas.s16.qacc.h.ld.xp q6, a0, t6, q2, q0 +; CHECK-NEXT: esp.vcmulas.s16.qacc.l q2, q3 ; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vmulas.s16.qacc.ld.xp q6, a0, t3, q4, q2 -; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: esp.vmulas.s16.qacc.st.ip q1, a0, 80, q7, q6 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.vmulas.s16.qacc.st.xp q6, a0, a0, q0, q7 -; CHECK-NEXT: esp.vmulas.s16.xacc q3, q5 +; CHECK-NEXT: esp.vcmulas.s16.qacc.l.ld.ip q1, a0, -64, q4, q3 +; CHECK-NEXT: li s11, 1 +; CHECK-NEXT: li a1, 12 +; CHECK-NEXT: esp.vcmulas.s16.qacc.l.ld.xp q7, a1, s11, q5, q4 +; CHECK-NEXT: esp.vcmulas.s8.qacc.h q0, q3 +; CHECK-NEXT: li a1, 3 +; CHECK-NEXT: esp.vcmulas.s8.qacc.h.ld.ip q5, a1, -48, q3, q7 +; CHECK-NEXT: li a3, 11 +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: esp.vcmulas.s8.qacc.h.ld.xp q4, a1, a3, q4, q0 +; CHECK-NEXT: esp.vcmulas.s8.qacc.l q4, q0 +; CHECK-NEXT: li a1, 2 +; CHECK-NEXT: esp.vcmulas.s8.qacc.l.ld.ip q0, a1, 0, q7, q5 +; CHECK-NEXT: li t5, 8 +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: esp.vcmulas.s8.qacc.l.ld.xp q6, a1, t5, q3, q0 +; CHECK-NEXT: esp.vmulas.s16.qacc q3, q0 +; CHECK-NEXT: li a1, 7 +; CHECK-NEXT: esp.vmulas.s16.qacc.ld.ip q4, a1, 80, q0, q1 +; CHECK-NEXT: li a1, 2 +; CHECK-NEXT: esp.vmulas.s16.qacc.ld.xp q7, a1, t6, q7, q2 +; CHECK-NEXT: li a1, 9 +; CHECK-NEXT: esp.vmulas.s16.qacc.st.ip q2, a1, -32, q3, q4 +; CHECK-NEXT: li a1, 10 +; CHECK-NEXT: esp.vmulas.s16.qacc.st.xp q4, a1, t5, q6, q6 +; CHECK-NEXT: esp.vmulas.s16.xacc q1, q0 +; CHECK-NEXT: li a1, 8 +; CHECK-NEXT: esp.vmulas.s16.xacc.ld.ip q4, a1, -16, q1, q6 +; CHECK-NEXT: li a1, 12 +; CHECK-NEXT: esp.vmulas.s16.xacc.ld.xp q7, a1, s11, q2, q3 +; CHECK-NEXT: li a1, 13 +; CHECK-NEXT: esp.vmulas.s16.xacc.st.ip q1, a1, 16, q4, q2 +; CHECK-NEXT: li a1, 3 +; CHECK-NEXT: esp.vmulas.s16.xacc.st.xp q3, a1, t5, q7, q4 +; CHECK-NEXT: esp.vmulas.s8.qacc q4, q3 +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: esp.vmulas.s8.qacc.ld.ip q4, a1, 32, q5, q2 +; CHECK-NEXT: li a1, 14 +; CHECK-NEXT: esp.vmulas.s8.qacc.ld.xp q7, a1, a1, q1, q6 +; CHECK-NEXT: li a1, 5 +; CHECK-NEXT: esp.vmulas.s8.qacc.st.ip q6, a1, -112, q1, q6 ; CHECK-NEXT: li a0, 9 -; CHECK-NEXT: esp.vmulas.s16.xacc.ld.ip q5, a0, 96, q1, q7 +; CHECK-NEXT: li a1, 7 +; CHECK-NEXT: esp.vmulas.s8.qacc.st.xp q5, a1, a0, q3, q3 +; CHECK-NEXT: esp.vmulas.s8.xacc q7, q0 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: li a1, 12 +; CHECK-NEXT: esp.vmulas.s8.xacc.ld.ip q7, a1, 16, q7, q0 +; CHECK-NEXT: li a1, 3 +; CHECK-NEXT: esp.vmulas.s8.xacc.ld.xp q0, a1, a0, q2, q1 +; CHECK-NEXT: li a1, 13 +; CHECK-NEXT: esp.vmulas.s8.xacc.st.ip q4, a1, 32, q5, q6 +; CHECK-NEXT: li s1, 3 +; CHECK-NEXT: li a1, 12 +; CHECK-NEXT: esp.vmulas.s8.xacc.st.xp q2, a1, s1, q1, q7 +; CHECK-NEXT: esp.vmulas.u16.qacc q5, q0 +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: esp.vmulas.u16.qacc.ld.ip q2, a1, 48, q4, q4 +; CHECK-NEXT: li a1, 9 +; CHECK-NEXT: esp.vmulas.u16.qacc.ld.xp q3, a1, t6, q1, q7 +; CHECK-NEXT: li a1, 12 +; CHECK-NEXT: esp.vmulas.u16.qacc.st.ip q1, a1, -128, q1, q2 +; CHECK-NEXT: li a1, 11 +; CHECK-NEXT: esp.vmulas.u16.qacc.st.xp q2, a1, t6, q1, q6 +; CHECK-NEXT: li a1, 12 +; CHECK-NEXT: esp.vmulas.u16.xacc q1, q7 +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: esp.vmulas.u16.xacc.ld.ip q4, a2, 16, q2, q4 +; CHECK-NEXT: li a2, 3 +; CHECK-NEXT: esp.vmulas.u16.xacc.ld.xp q5, a2, a1, q1, q7 +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: esp.vmulas.u16.xacc.st.ip q1, a2, -64, q7, q6 +; CHECK-NEXT: li a2, 9 +; CHECK-NEXT: esp.vmulas.u16.xacc.st.xp q1, a2, a2, q2, q4 +; CHECK-NEXT: li a5, 14 +; CHECK-NEXT: esp.vmulas.u8.qacc q7, q4 +; CHECK-NEXT: li a2, 5 +; CHECK-NEXT: esp.vmulas.u8.qacc.ld.ip q1, a2, -32, q7, q6 ; CHECK-NEXT: li a2, 8 -; CHECK-NEXT: li a0, 13 -; CHECK-NEXT: esp.vmulas.s16.xacc.ld.xp q0, a0, a2, q5, q5 -; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: esp.vmulas.s16.xacc.st.ip q2, a0, 16, q4, q6 -; CHECK-NEXT: li t6, 5 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.vmulas.s16.xacc.st.xp q7, a0, t6, q7, q7 -; CHECK-NEXT: esp.vmulas.s8.qacc q6, q1 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vmulas.s8.qacc.ld.ip q2, a0, -128, q3, q5 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vmulas.s8.qacc.ld.xp q4, a0, t6, q0, q5 -; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: esp.vmulas.s8.qacc.st.ip q7, a0, 16, q6, q0 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: esp.vmulas.s8.qacc.st.xp q4, a0, s9, q6, q1 -; CHECK-NEXT: esp.vmulas.s8.xacc q3, q7 -; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: esp.vmulas.s8.xacc.ld.ip q7, a0, -16, q4, q5 -; CHECK-NEXT: li t5, 10 -; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: esp.vmulas.s8.xacc.ld.xp q1, a0, t5, q7, q0 -; CHECK-NEXT: li a1, 2 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vmulas.s8.xacc.st.ip q6, a0, -128, q6, q1 +; CHECK-NEXT: esp.vmulas.u8.qacc.ld.xp q6, a2, a5, q6, q0 +; CHECK-NEXT: li a2, 12 +; CHECK-NEXT: esp.vmulas.u8.qacc.st.ip q0, a2, 96, q6, q4 +; CHECK-NEXT: li a2, 3 +; CHECK-NEXT: esp.vmulas.u8.qacc.st.xp q5, a2, a1, q2, q6 +; CHECK-NEXT: li s9, 4 +; CHECK-NEXT: esp.vmulas.u8.xacc q0, q2 +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: esp.vmulas.u8.xacc.ld.ip q2, a2, -112, q7, q6 +; CHECK-NEXT: li a2, 9 +; CHECK-NEXT: esp.vmulas.u8.xacc.ld.xp q3, a2, s9, q4, q0 +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: esp.vmulas.u8.xacc.st.ip q3, a2, -48, q1, q5 +; CHECK-NEXT: li s8, 5 +; CHECK-NEXT: li a2, 6 +; CHECK-NEXT: esp.vmulas.u8.xacc.st.xp q7, a2, s8, q5, q7 +; CHECK-NEXT: li a2, 6 +; CHECK-NEXT: esp.vmulas.s16.qacc.ldbc.incp q7, a2, q1, q2 +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: esp.vmulas.s8.qacc.ldbc.incp q2, a2, q1, q0 +; CHECK-NEXT: li a2, 6 +; CHECK-NEXT: esp.vmulas.u16.qacc.ldbc.incp q2, a2, q7, q6 +; CHECK-NEXT: li a2, 10 +; CHECK-NEXT: esp.vmulas.u8.qacc.ldbc.incp q4, a2, q7, q7 +; CHECK-NEXT: li s0, 0 +; CHECK-NEXT: esp.vsmulas.s16.qacc q4, q2, 0 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: esp.vsmulas.s16.qacc.ld.incp q1, a2, q7, q7, 11 +; CHECK-NEXT: esp.vsmulas.s8.qacc q4, q0, 2 +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: esp.vsmulas.s8.qacc.ld.incp q5, a2, q5, q1, 7 +; CHECK-NEXT: esp.vsmulas.u16.qacc q5, q5, 3 +; CHECK-NEXT: li a2, 11 +; CHECK-NEXT: esp.vsmulas.u16.qacc.ld.incp q1, a2, q7, q5, 14 +; CHECK-NEXT: esp.vsmulas.u8.qacc q2, q4, 2 +; CHECK-NEXT: li a2, 6 +; CHECK-NEXT: esp.vsmulas.u8.qacc.ld.incp q5, a2, q3, q1, 14 +; CHECK-NEXT: esp.cmul.s16 q5, q3, q2, 1 +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: esp.cmul.s16.ld.incp q7, a2, q0, q6, q3, 2 +; CHECK-NEXT: li a2, 9 +; CHECK-NEXT: esp.cmul.s16.st.incp q0, a2, q5, q1, q7, 2 +; CHECK-NEXT: esp.cmul.s8 q3, q7, q6, 3 +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: esp.cmul.s8.ld.incp q1, a2, q3, q5, q0, 3 +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: esp.cmul.s8.st.incp q1, a2, q2, q1, q3, 2 +; CHECK-NEXT: esp.cmul.u16 q5, q1, q1, 2 +; CHECK-NEXT: li a2, 12 +; CHECK-NEXT: esp.cmul.u16.ld.incp q1, a2, q2, q5, q6, 2 +; CHECK-NEXT: li a2, 14 +; CHECK-NEXT: esp.cmul.u16.st.incp q2, a2, q4, q2, q5, 1 +; CHECK-NEXT: esp.cmul.u8 q6, q1, q3, 1 +; CHECK-NEXT: li a2, 14 +; CHECK-NEXT: esp.cmul.u8.ld.incp q5, a2, q3, q2, q2, 3 +; CHECK-NEXT: li a2, 10 +; CHECK-NEXT: esp.cmul.u8.st.incp q7, a2, q7, q1, q7, 3 +; CHECK-NEXT: esp.max.s16.a q0, a2 +; CHECK-NEXT: esp.max.s32.a q1, a2 +; CHECK-NEXT: esp.max.s8.a q3, a2 +; CHECK-NEXT: esp.max.u16.a q6, a2 +; CHECK-NEXT: esp.max.u32.a q7, a2 +; CHECK-NEXT: esp.max.u8.a q5, a2 +; CHECK-NEXT: esp.min.s16.a q3, a2 +; CHECK-NEXT: esp.min.s32.a q6, a2 +; CHECK-NEXT: esp.min.s8.a q0, a2 +; CHECK-NEXT: esp.min.u16.a q1, a2 +; CHECK-NEXT: esp.min.u32.a q0, a2 +; CHECK-NEXT: esp.min.u8.a q3, a2 +; CHECK-NEXT: esp.vabs.16 q5, q7 +; CHECK-NEXT: esp.vabs.32 q1, q3 +; CHECK-NEXT: esp.vabs.8 q4, q5 +; CHECK-NEXT: esp.vadd.s16 q4, q1, q5 +; CHECK-NEXT: li a2, 10 +; CHECK-NEXT: esp.vadd.s16.ld.incp q5, a2, q7, q2, q3 +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: esp.vadd.s16.st.incp q0, a2, q4, q0, q7 +; CHECK-NEXT: esp.vadd.s32 q4, q0, q5 +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: esp.vadd.s32.ld.incp q5, a2, q0, q3, q6 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: esp.vadd.s32.st.incp q7, a2, q4, q3, q1 +; CHECK-NEXT: esp.vadd.s8 q7, q4, q4 +; CHECK-NEXT: li a2, 13 +; CHECK-NEXT: esp.vadd.s8.ld.incp q6, a2, q7, q6, q4 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: esp.vadd.s8.st.incp q5, a2, q1, q2, q6 +; CHECK-NEXT: esp.vadd.u16 q0, q4, q6 +; CHECK-NEXT: li a2, 14 +; CHECK-NEXT: esp.vadd.u16.ld.incp q1, a2, q7, q0, q1 +; CHECK-NEXT: li a2, 5 +; CHECK-NEXT: esp.vadd.u16.st.incp q3, a2, q4, q0, q3 +; CHECK-NEXT: esp.vadd.u32 q3, q7, q5 +; CHECK-NEXT: li a2, 11 +; CHECK-NEXT: esp.vadd.u32.ld.incp q3, a2, q7, q3, q5 +; CHECK-NEXT: li a2, 10 +; CHECK-NEXT: esp.vadd.u32.st.incp q0, a2, q6, q0, q6 +; CHECK-NEXT: esp.vadd.u8 q5, q3, q0 +; CHECK-NEXT: li a2, 12 +; CHECK-NEXT: esp.vadd.u8.ld.incp q4, a2, q4, q4, q2 +; CHECK-NEXT: li a2, 3 +; CHECK-NEXT: esp.vadd.u8.st.incp q7, a2, q6, q5, q1 +; CHECK-NEXT: esp.vclamp.s16 q6, q0, 10 +; CHECK-NEXT: esp.vmax.s16 q5, q3, q1 +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: esp.vmax.s16.ld.incp q7, a2, q3, q6, q5 +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: esp.vmax.s16.st.incp q0, a2, q6, q5, q4 +; CHECK-NEXT: esp.vmax.s32 q7, q1, q3 +; CHECK-NEXT: li a2, 9 +; CHECK-NEXT: esp.vmax.s32.ld.incp q5, a2, q0, q0, q1 +; CHECK-NEXT: li a2, 5 +; CHECK-NEXT: esp.vmax.s32.st.incp q3, a2, q5, q0, q3 +; CHECK-NEXT: esp.vmax.s8 q4, q1, q6 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: esp.vmax.s8.ld.incp q3, a2, q3, q6, q2 +; CHECK-NEXT: li a2, 6 +; CHECK-NEXT: esp.vmax.s8.st.incp q7, a2, q3, q6, q2 +; CHECK-NEXT: esp.vmax.u16 q5, q1, q4 +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: esp.vmax.u16.ld.incp q0, a2, q4, q6, q1 +; CHECK-NEXT: li a2, 10 +; CHECK-NEXT: esp.vmax.u16.st.incp q2, a2, q3, q6, q6 +; CHECK-NEXT: esp.vmax.u32 q0, q6, q5 +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: esp.vmax.u32.ld.incp q4, a2, q6, q6, q3 +; CHECK-NEXT: li a2, 11 +; CHECK-NEXT: esp.vmax.u32.st.incp q7, a2, q5, q3, q2 +; CHECK-NEXT: esp.vmax.u8 q5, q6, q4 +; CHECK-NEXT: li a2, 12 +; CHECK-NEXT: esp.vmax.u8.ld.incp q2, a2, q2, q3, q5 +; CHECK-NEXT: li a2, 5 +; CHECK-NEXT: esp.vmax.u8.st.incp q7, a2, q0, q7, q1 +; CHECK-NEXT: esp.vmin.s16 q2, q7, q6 +; CHECK-NEXT: li a2, 7 +; CHECK-NEXT: esp.vmin.s16.ld.incp q6, a2, q1, q0, q3 +; CHECK-NEXT: li a2, 12 +; CHECK-NEXT: esp.vmin.s16.st.incp q1, a2, q2, q1, q6 +; CHECK-NEXT: esp.vmin.s32 q3, q6, q3 +; CHECK-NEXT: li a2, 9 +; CHECK-NEXT: esp.vmin.s32.ld.incp q3, a2, q5, q1, q4 +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: esp.vmin.s32.st.incp q3, a2, q3, q2, q6 +; CHECK-NEXT: esp.vmin.s8 q4, q2, q2 +; CHECK-NEXT: li a2, 6 +; CHECK-NEXT: esp.vmin.s8.ld.incp q4, a2, q6, q2, q6 +; CHECK-NEXT: li a2, 11 +; CHECK-NEXT: esp.vmin.s8.st.incp q5, a2, q1, q5, q0 +; CHECK-NEXT: esp.vmin.u16 q5, q1, q5 +; CHECK-NEXT: li a2, 6 +; CHECK-NEXT: esp.vmin.u16.ld.incp q2, a2, q2, q7, q2 +; CHECK-NEXT: li a2, 14 +; CHECK-NEXT: esp.vmin.u16.st.incp q0, a2, q4, q5, q5 +; CHECK-NEXT: esp.vmin.u32 q5, q6, q0 +; CHECK-NEXT: li a2, 11 +; CHECK-NEXT: esp.vmin.u32.ld.incp q5, a2, q2, q3, q4 +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: esp.vmin.u32.st.incp q1, a2, q2, q3, q5 +; CHECK-NEXT: esp.vmin.u8 q0, q6, q6 +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: esp.vmin.u8.ld.incp q3, a2, q5, q4, q7 +; CHECK-NEXT: li a2, 6 +; CHECK-NEXT: esp.vmin.u8.st.incp q4, a2, q7, q2, q5 +; CHECK-NEXT: esp.vmul.s16 q6, q7, q6 +; CHECK-NEXT: li a2, 3 +; CHECK-NEXT: esp.vmul.s16.ld.incp q6, a2, q1, q5, q2 +; CHECK-NEXT: esp.vmul.s16.s8xs8 q2, q3, q0, q6 +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: esp.vmul.s16.st.incp q6, a2, q0, q6, q5 +; CHECK-NEXT: esp.vmul.s32.s16xs16 q4, q7, q2, q0 +; CHECK-NEXT: esp.vmul.s8 q7, q3, q7 +; CHECK-NEXT: li a2, 6 +; CHECK-NEXT: esp.vmul.s8.ld.incp q5, a2, q7, q0, q5 +; CHECK-NEXT: li a2, 6 +; CHECK-NEXT: esp.vmul.s8.st.incp q1, a2, q3, q2, q5 +; CHECK-NEXT: esp.vmul.u16 q6, q1, q4 +; CHECK-NEXT: li a2, 3 +; CHECK-NEXT: esp.vmul.u16.ld.incp q0, a2, q6, q2, q4 +; CHECK-NEXT: li a2, 10 +; CHECK-NEXT: esp.vmul.u16.st.incp q1, a2, q6, q1, q3 +; CHECK-NEXT: esp.vmul.u8 q6, q7, q1 +; CHECK-NEXT: li a2, 14 +; CHECK-NEXT: esp.vmul.u8.ld.incp q5, a2, q4, q0, q2 +; CHECK-NEXT: li a2, 10 +; CHECK-NEXT: li a4, 13 ; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.vmulas.s8.xacc.st.xp q5, a0, a1, q4, q1 -; CHECK-NEXT: esp.vmulas.u16.qacc q6, q1 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vmulas.u16.qacc.ld.ip q7, a0, -32, q0, q0 +; CHECK-NEXT: esp.vmul.u8.st.incp q1, a0, q4, q0, q5 +; CHECK-NEXT: esp.vprelu.s16 q4, q2, q2, s0 +; CHECK-NEXT: esp.vprelu.s8 q5, q6, q4, a3 +; CHECK-NEXT: esp.vrelu.s16 q6, t5, s1 ; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.vmulas.u16.qacc.ld.xp q2, a0, s11, q6, q7 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vmulas.u16.qacc.st.ip q4, a0, 16, q6, q5 -; CHECK-NEXT: li s0, 9 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.vmulas.u16.qacc.st.xp q4, a0, s0, q3, q7 -; CHECK-NEXT: esp.vmulas.u16.xacc q6, q1 -; CHECK-NEXT: li a4, 6 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: esp.vmulas.u16.xacc.ld.ip q2, a0, -48, q2, q2 -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: esp.vmulas.u16.xacc.ld.xp q7, a0, a4, q3, q0 -; CHECK-NEXT: li a0, 9 -; CHECK-NEXT: esp.vmulas.u16.xacc.st.ip q0, a0, 96, q1, q4 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.vmulas.u16.xacc.st.xp q6, a0, t6, q3, q7 -; CHECK-NEXT: esp.vmulas.u8.qacc q7, q1 -; CHECK-NEXT: li a0, 9 -; CHECK-NEXT: esp.vmulas.u8.qacc.ld.ip q7, a0, -48, q7, q4 -; CHECK-NEXT: li a0, 11 -; CHECK-NEXT: esp.vmulas.u8.qacc.ld.xp q4, a0, s9, q6, q7 -; CHECK-NEXT: li a0, 14 -; CHECK-NEXT: esp.vmulas.u8.qacc.st.ip q2, a0, 0, q1, q7 -; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: esp.vmulas.u8.qacc.st.xp q4, a0, a0, q0, q0 -; CHECK-NEXT: esp.vmulas.u8.xacc q6, q4 +; CHECK-NEXT: esp.vrelu.s8 q0, a0, s8 +; CHECK-NEXT: esp.vsadds.s16 q7, q4, s0 +; CHECK-NEXT: esp.vsadds.s8 q2, q2, a1 +; CHECK-NEXT: esp.vsadds.u16 q7, q2, t6 +; CHECK-NEXT: esp.vsadds.u8 q4, q0, s9 +; CHECK-NEXT: esp.vsat.s16 q1, q5, t5, t6 +; CHECK-NEXT: esp.vsat.s32 q7, q6, s0, a0 +; CHECK-NEXT: esp.vsat.s8 q1, q4, a0, a3 +; CHECK-NEXT: li t4, 6 +; CHECK-NEXT: li t3, 9 +; CHECK-NEXT: esp.vsat.u16 q4, q5, t3, a4 +; CHECK-NEXT: esp.vsat.u32 q6, q6, a3, a2 +; CHECK-NEXT: esp.vsat.u8 q1, q6, a5, t6 +; CHECK-NEXT: esp.vssubs.s16 q5, q0, s8 +; CHECK-NEXT: esp.vssubs.s8 q5, q2, t5 +; CHECK-NEXT: esp.vssubs.u16 q6, q7, a2 +; CHECK-NEXT: esp.vssubs.u8 q0, q7, s0 +; CHECK-NEXT: esp.vsub.s16 q1, q7, q1 ; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.vmulas.u8.xacc.ld.ip q3, a0, -80, q6, q2 -; CHECK-NEXT: li a0, 13 -; CHECK-NEXT: esp.vmulas.u8.xacc.ld.xp q4, a0, a0, q5, q1 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.vmulas.u8.xacc.st.ip q7, a0, -128, q2, q3 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.vmulas.u8.xacc.st.xp q2, a0, a4, q7, q2 -; CHECK-NEXT: li a0, 14 -; CHECK-NEXT: esp.vmulas.s16.qacc.ldbc.incp q0, a0, q0, q2 -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: esp.vmulas.s8.qacc.ldbc.incp q5, a0, q2, q6 +; CHECK-NEXT: esp.vsub.s16.ld.incp q2, a0, q1, q5, q2 ; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vmulas.u16.qacc.ldbc.incp q5, a0, q7, q3 -; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.vmulas.u8.qacc.ldbc.incp q3, a0, q4, q4 -; CHECK-NEXT: esp.vsmulas.s16.qacc q1, q5, 14 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.vsmulas.s16.qacc.ld.incp q0, a0, q7, q4, 0 -; CHECK-NEXT: esp.vsmulas.s8.qacc q3, q5, 0 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vsmulas.s8.qacc.ld.incp q1, a0, q1, q4, 6 -; CHECK-NEXT: esp.vsmulas.u16.qacc q6, q5, 15 -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: esp.vsmulas.u16.qacc.ld.incp q7, a0, q7, q1, 10 -; CHECK-NEXT: esp.vsmulas.u8.qacc q0, q7, 2 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: esp.vsmulas.u8.qacc.ld.incp q4, a0, q3, q7, 8 -; CHECK-NEXT: esp.cmul.s16 q6, q0, q7, 3 -; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.cmul.s16.ld.incp q5, a0, q3, q0, q3, 0 -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: esp.cmul.s16.st.incp q5, a0, q0, q4, q5, 2 -; CHECK-NEXT: esp.cmul.s8 q1, q1, q0, 3 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.cmul.s8.ld.incp q4, a0, q7, q5, q4, 1 -; CHECK-NEXT: li a0, 14 -; CHECK-NEXT: esp.cmul.s8.st.incp q5, a0, q0, q6, q0, 3 -; CHECK-NEXT: esp.cmul.u16 q7, q7, q5, 2 +; CHECK-NEXT: esp.vsub.s16.st.incp q2, a0, q2, q4, q5 +; CHECK-NEXT: esp.vsub.s32 q0, q7, q0 ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.cmul.u16.ld.incp q0, a0, q0, q0, q1, 1 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.cmul.u16.st.incp q2, a0, q4, q1, q4, 2 -; CHECK-NEXT: esp.cmul.u8 q3, q7, q5, 0 -; CHECK-NEXT: li a0, 11 -; CHECK-NEXT: esp.cmul.u8.ld.incp q4, a0, q0, q0, q2, 0 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: esp.cmul.u8.st.incp q1, a0, q4, q6, q3, 2 -; CHECK-NEXT: esp.max.s16.a q6, a0 -; CHECK-NEXT: esp.max.s32.a q2, a0 -; CHECK-NEXT: esp.max.s8.a q0, a0 -; CHECK-NEXT: esp.max.u16.a q6, a0 -; CHECK-NEXT: esp.max.u32.a q6, a0 -; CHECK-NEXT: esp.max.u8.a q1, a0 -; CHECK-NEXT: esp.min.s16.a q6, a0 -; CHECK-NEXT: esp.min.s32.a q1, a0 -; CHECK-NEXT: esp.min.s8.a q0, a0 -; CHECK-NEXT: esp.min.u16.a q3, a0 -; CHECK-NEXT: esp.min.u32.a q0, a0 -; CHECK-NEXT: esp.min.u8.a q5, a0 -; CHECK-NEXT: esp.vabs.16 q6, q0 -; CHECK-NEXT: esp.vabs.32 q1, q4 -; CHECK-NEXT: esp.vabs.8 q5, q2 -; CHECK-NEXT: esp.vadd.s16 q6, q1, q5 +; CHECK-NEXT: esp.vsub.s32.ld.incp q6, a0, q5, q5, q7 ; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: esp.vadd.s16.ld.incp q0, a0, q1, q0, q6 -; CHECK-NEXT: li a0, 11 -; CHECK-NEXT: esp.vadd.s16.st.incp q1, a0, q7, q0, q4 -; CHECK-NEXT: esp.vadd.s32 q7, q7, q3 -; CHECK-NEXT: li a0, 14 -; CHECK-NEXT: esp.vadd.s32.ld.incp q4, a0, q4, q7, q2 -; CHECK-NEXT: li a0, 13 -; CHECK-NEXT: esp.vadd.s32.st.incp q2, a0, q7, q1, q7 -; CHECK-NEXT: esp.vadd.s8 q7, q1, q7 -; CHECK-NEXT: li a0, 14 -; CHECK-NEXT: esp.vadd.s8.ld.incp q2, a0, q1, q5, q6 -; CHECK-NEXT: li a0, 9 -; CHECK-NEXT: esp.vadd.s8.st.incp q3, a0, q4, q1, q0 -; CHECK-NEXT: esp.vadd.u16 q0, q7, q7 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vadd.u16.ld.incp q6, a0, q1, q7, q5 +; CHECK-NEXT: esp.vsub.s32.st.incp q2, a0, q2, q3, q2 +; CHECK-NEXT: esp.vsub.s8 q0, q7, q2 ; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.vadd.u16.st.incp q0, a0, q7, q6, q3 -; CHECK-NEXT: esp.vadd.u32 q4, q0, q1 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.vadd.u32.ld.incp q1, a0, q4, q5, q0 -; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: esp.vadd.u32.st.incp q4, a0, q6, q0, q1 -; CHECK-NEXT: esp.vadd.u8 q5, q2, q5 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vadd.u8.ld.incp q7, a0, q1, q4, q3 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.vadd.u8.st.incp q0, a0, q2, q0, q0 -; CHECK-NEXT: esp.vclamp.s16 q4, q5, 14 -; CHECK-NEXT: esp.vmax.s16 q5, q6, q5 -; CHECK-NEXT: li a0, 11 -; CHECK-NEXT: esp.vmax.s16.ld.incp q2, a0, q3, q5, q5 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.vmax.s16.st.incp q3, a0, q4, q3, q5 -; CHECK-NEXT: esp.vmax.s32 q2, q5, q2 -; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: esp.vmax.s32.ld.incp q0, a0, q6, q0, q1 -; CHECK-NEXT: li a0, 14 -; CHECK-NEXT: esp.vmax.s32.st.incp q6, a0, q1, q7, q6 -; CHECK-NEXT: esp.vmax.s8 q7, q5, q7 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.vmax.s8.ld.incp q6, a0, q1, q5, q1 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.vmax.s8.st.incp q5, a0, q7, q1, q3 -; CHECK-NEXT: esp.vmax.u16 q1, q4, q1 -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: esp.vmax.u16.ld.incp q3, a0, q5, q5, q4 +; CHECK-NEXT: esp.vsub.s8.ld.incp q4, a0, q6, q4, q0 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vsub.s8.st.incp q2, a0, q7, q1, q6 +; CHECK-NEXT: esp.vsub.u16 q0, q6, q7 ; CHECK-NEXT: li a0, 11 -; CHECK-NEXT: esp.vmax.u16.st.incp q5, a0, q5, q0, q7 -; CHECK-NEXT: esp.vmax.u32 q4, q0, q2 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.vmax.u32.ld.incp q6, a0, q1, q0, q6 -; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.vmax.u32.st.incp q0, a0, q1, q4, q7 -; CHECK-NEXT: esp.vmax.u8 q5, q2, q0 -; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: esp.vmax.u8.ld.incp q0, a0, q5, q6, q1 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: esp.vmax.u8.st.incp q7, a0, q1, q6, q7 -; CHECK-NEXT: esp.vmin.s16 q4, q1, q3 +; CHECK-NEXT: esp.vsub.u16.ld.incp q7, a0, q5, q1, q6 ; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: esp.vmin.s16.ld.incp q2, a0, q2, q2, q1 -; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: esp.vmin.s16.st.incp q2, a0, q1, q7, q6 -; CHECK-NEXT: esp.vmin.s32 q2, q0, q3 -; CHECK-NEXT: li a0, 14 -; CHECK-NEXT: esp.vmin.s32.ld.incp q1, a0, q5, q7, q6 -; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.vmin.s32.st.incp q7, a0, q5, q5, q1 -; CHECK-NEXT: esp.vmin.s8 q2, q3, q6 +; CHECK-NEXT: esp.vsub.u16.st.incp q1, a0, q0, q4, q7 +; CHECK-NEXT: esp.vsub.u32 q6, q5, q7 ; CHECK-NEXT: li a0, 11 -; CHECK-NEXT: esp.vmin.s8.ld.incp q7, a0, q1, q4, q3 -; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.vmin.s8.st.incp q1, a0, q4, q0, q1 -; CHECK-NEXT: esp.vmin.u16 q4, q3, q7 -; CHECK-NEXT: li a0, 11 -; CHECK-NEXT: esp.vmin.u16.ld.incp q4, a0, q5, q6, q6 -; CHECK-NEXT: li a0, 12 -; CHECK-NEXT: esp.vmin.u16.st.incp q1, a0, q2, q6, q0 -; CHECK-NEXT: esp.vmin.u32 q5, q0, q7 -; CHECK-NEXT: li a0, 13 -; CHECK-NEXT: esp.vmin.u32.ld.incp q7, a0, q6, q5, q6 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.vmin.u32.st.incp q5, a0, q4, q3, q7 -; CHECK-NEXT: esp.vmin.u8 q7, q5, q5 -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: esp.vmin.u8.ld.incp q2, a0, q5, q0, q5 -; CHECK-NEXT: li a0, 12 -; CHECK-NEXT: esp.vmin.u8.st.incp q2, a0, q1, q6, q6 -; CHECK-NEXT: esp.vmul.s16 q6, q2, q1 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: esp.vmul.s16.ld.incp q0, a0, q3, q6, q7 -; CHECK-NEXT: esp.vmul.s16.s8xs8 q7, q0, q3, q5 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: esp.vmul.s16.st.incp q2, a0, q0, q7, q1 -; CHECK-NEXT: esp.vmul.s32.s16xs16 q3, q4, q5, q2 -; CHECK-NEXT: esp.vmul.s8 q3, q4, q0 -; CHECK-NEXT: li a0, 0 -; CHECK-NEXT: esp.vmul.s8.ld.incp q0, a0, q2, q2, q3 -; CHECK-NEXT: li s1, 4 -; CHECK-NEXT: li s8, 13 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vmul.s8.st.incp q0, a0, q3, q0, q7 -; CHECK-NEXT: esp.vmul.u16 q2, q3, q7 -; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: esp.vmul.u16.ld.incp q5, a0, q5, q6, q6 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: esp.vmul.u16.st.incp q3, a0, q2, q4, q4 -; CHECK-NEXT: esp.vmul.u8 q7, q3, q7 +; CHECK-NEXT: esp.vsub.u32.ld.incp q0, a0, q4, q3, q0 +; CHECK-NEXT: li s10, 2 ; CHECK-NEXT: li a0, 9 -; CHECK-NEXT: esp.vmul.u8.ld.incp q0, a0, q1, q0, q6 -; CHECK-NEXT: li s10, 1 +; CHECK-NEXT: esp.vsub.u32.st.incp q4, a0, q4, q0, q4 +; CHECK-NEXT: esp.vsub.u8 q6, q6, q7 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vsub.u8.ld.incp q3, a0, q3, q3, q5 ; CHECK-NEXT: li a0, 11 -; CHECK-NEXT: li a3, 6 -; CHECK-NEXT: esp.vmul.u8.st.incp q7, a3, q4, q0, q3 -; CHECK-NEXT: esp.vprelu.s16 q1, q4, q3, a2 -; CHECK-NEXT: esp.vprelu.s8 q2, q4, q5, t3 -; CHECK-NEXT: esp.vrelu.s16 q6, s0, a2 -; CHECK-NEXT: esp.vrelu.s8 q5, s10, s9 -; CHECK-NEXT: esp.vsadds.s16 q3, q3, s9 -; CHECK-NEXT: esp.vsadds.s8 q7, q1, s11 -; CHECK-NEXT: esp.vsadds.u16 q3, q2, s1 -; CHECK-NEXT: esp.vsadds.u8 q2, q3, a2 -; CHECK-NEXT: esp.vsat.s16 q5, q0, s0, s10 -; CHECK-NEXT: esp.vsat.s32 q3, q3, s9, s0 -; CHECK-NEXT: esp.vsat.s8 q0, q7, t5, a1 -; CHECK-NEXT: esp.vsat.u16 q3, q7, s11, s11 -; CHECK-NEXT: esp.vsat.u32 q3, q5, a2, a1 -; CHECK-NEXT: esp.vsat.u8 q0, q6, s10, s8 -; CHECK-NEXT: esp.vssubs.s16 q3, q7, t3 -; CHECK-NEXT: esp.vssubs.s8 q7, q0, t4 -; CHECK-NEXT: esp.vssubs.u16 q5, q4, a5 -; CHECK-NEXT: esp.vssubs.u8 q5, q1, a0 -; CHECK-NEXT: esp.vsub.s16 q0, q0, q6 -; CHECK-NEXT: li a3, 6 -; CHECK-NEXT: esp.vsub.s16.ld.incp q2, a3, q2, q3, q7 -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: esp.vsub.s16.st.incp q7, a3, q0, q0, q3 -; CHECK-NEXT: esp.vsub.s32 q7, q2, q7 -; CHECK-NEXT: li a3, 7 -; CHECK-NEXT: esp.vsub.s32.ld.incp q4, a3, q3, q2, q0 -; CHECK-NEXT: li a3, 5 -; CHECK-NEXT: esp.vsub.s32.st.incp q4, a3, q1, q1, q1 -; CHECK-NEXT: esp.vsub.s8 q7, q5, q6 -; CHECK-NEXT: li a3, 1 -; CHECK-NEXT: esp.vsub.s8.ld.incp q4, a3, q1, q2, q6 -; CHECK-NEXT: li a3, 4 -; CHECK-NEXT: esp.vsub.s8.st.incp q5, a3, q4, q2, q3 -; CHECK-NEXT: esp.vsub.u16 q5, q7, q0 -; CHECK-NEXT: li a3, 11 -; CHECK-NEXT: esp.vsub.u16.ld.incp q4, a3, q0, q7, q5 -; CHECK-NEXT: li a3, 11 -; CHECK-NEXT: esp.vsub.u16.st.incp q0, a3, q1, q3, q1 -; CHECK-NEXT: esp.vsub.u32 q5, q4, q2 -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: esp.vsub.u32.ld.incp q4, a3, q2, q4, q2 -; CHECK-NEXT: li a3, 11 -; CHECK-NEXT: esp.vsub.u32.st.incp q0, a3, q7, q7, q4 -; CHECK-NEXT: esp.vsub.u8 q6, q5, q4 -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: esp.vsub.u8.ld.incp q4, a3, q6, q2, q4 -; CHECK-NEXT: esp.vsub.u8.st.incp q3, a0, q3, q2, q0 -; CHECK-NEXT: esp.addx2 zero, t6, t4 -; CHECK-NEXT: esp.addx4 zero, t4, t6 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.sat a0, a4, a1 -; CHECK-NEXT: esp.subx2 zero, a1, a1 -; CHECK-NEXT: esp.subx4 zero, a5, s0 -; CHECK-NEXT: esp.andq q7, q6, q3 -; CHECK-NEXT: esp.notq q6, q5 -; CHECK-NEXT: esp.orq q1, q1, q0 -; CHECK-NEXT: esp.xorq q5, q1, q6 -; CHECK-NEXT: esp.vcmp.eq.s16 q1, q0, q2 -; CHECK-NEXT: esp.vcmp.eq.s32 q5, q1, q6 -; CHECK-NEXT: esp.vcmp.eq.s8 q2, q0, q3 -; CHECK-NEXT: esp.vcmp.eq.u16 q7, q7, q1 -; CHECK-NEXT: esp.vcmp.eq.u32 q2, q1, q2 -; CHECK-NEXT: esp.vcmp.eq.u8 q3, q1, q6 -; CHECK-NEXT: esp.vcmp.gt.s16 q4, q5, q6 -; CHECK-NEXT: esp.vcmp.gt.s32 q0, q6, q2 -; CHECK-NEXT: esp.vcmp.gt.s8 q2, q3, q5 -; CHECK-NEXT: esp.vcmp.gt.u16 q7, q7, q4 -; CHECK-NEXT: esp.vcmp.gt.u32 q2, q6, q2 -; CHECK-NEXT: esp.vcmp.gt.u8 q0, q2, q0 -; CHECK-NEXT: esp.vcmp.lt.s16 q7, q2, q1 -; CHECK-NEXT: esp.vcmp.lt.s32 q4, q2, q1 -; CHECK-NEXT: esp.vcmp.lt.s8 q6, q5, q2 -; CHECK-NEXT: esp.vcmp.lt.u16 q4, q1, q5 -; CHECK-NEXT: esp.vcmp.lt.u32 q2, q5, q6 -; CHECK-NEXT: esp.vcmp.lt.u8 q5, q3, q5 -; CHECK-NEXT: esp.mov.s16.qacc q2 -; CHECK-NEXT: esp.mov.s8.qacc q5 -; CHECK-NEXT: esp.mov.u16.qacc q5 -; CHECK-NEXT: esp.mov.u8.qacc q3 -; CHECK-NEXT: esp.movi.16.a q2, a0, 3 -; CHECK-NEXT: esp.movi.16.q q3, s11, 13 -; CHECK-NEXT: esp.movi.32.a q6, a0, 1 -; CHECK-NEXT: esp.movi.32.q q5, s0, 1 -; CHECK-NEXT: esp.movi.8.a q5, a0, 15 -; CHECK-NEXT: esp.movi.8.q q1, a5, 6 -; CHECK-NEXT: esp.movx.r.cfg a0 +; CHECK-NEXT: esp.vsub.u8.st.incp q7, a0, q0, q6, q6 +; CHECK-NEXT: esp.addx2 zero, s8, t4 +; CHECK-NEXT: esp.addx4 zero, s10, t3 +; CHECK-NEXT: li t4, 9 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.sat a0, a5, t5 +; CHECK-NEXT: esp.subx2 zero, a2, a3 +; CHECK-NEXT: esp.subx4 zero, t6, a4 +; CHECK-NEXT: esp.andq q3, q6, q6 +; CHECK-NEXT: esp.notq q0, q3 +; CHECK-NEXT: esp.orq q6, q2, q6 +; CHECK-NEXT: esp.xorq q6, q2, q2 +; CHECK-NEXT: esp.vcmp.eq.s16 q2, q2, q6 +; CHECK-NEXT: esp.vcmp.eq.s32 q5, q1, q1 +; CHECK-NEXT: esp.vcmp.eq.s8 q7, q5, q3 +; CHECK-NEXT: esp.vcmp.eq.u16 q5, q5, q2 +; CHECK-NEXT: esp.vcmp.eq.u32 q5, q6, q2 +; CHECK-NEXT: esp.vcmp.eq.u8 q0, q1, q2 +; CHECK-NEXT: esp.vcmp.gt.s16 q5, q2, q1 +; CHECK-NEXT: esp.vcmp.gt.s32 q4, q6, q4 +; CHECK-NEXT: esp.vcmp.gt.s8 q6, q4, q1 +; CHECK-NEXT: esp.vcmp.gt.u16 q5, q1, q7 +; CHECK-NEXT: esp.vcmp.gt.u32 q5, q5, q3 +; CHECK-NEXT: esp.vcmp.gt.u8 q2, q1, q5 +; CHECK-NEXT: esp.vcmp.lt.s16 q1, q2, q6 +; CHECK-NEXT: esp.vcmp.lt.s32 q3, q4, q0 +; CHECK-NEXT: esp.vcmp.lt.s8 q7, q6, q3 +; CHECK-NEXT: esp.vcmp.lt.u16 q3, q6, q0 +; CHECK-NEXT: esp.vcmp.lt.u32 q2, q1, q6 +; CHECK-NEXT: esp.vcmp.lt.u8 q0, q5, q3 +; CHECK-NEXT: esp.mov.s16.qacc q1 +; CHECK-NEXT: esp.mov.s8.qacc q6 +; CHECK-NEXT: esp.mov.u16.qacc q2 +; CHECK-NEXT: esp.mov.u8.qacc q2 +; CHECK-NEXT: esp.movi.16.a q6, a0, 12 +; CHECK-NEXT: esp.movi.16.q q5, t6, 6 +; CHECK-NEXT: esp.movi.32.a q3, a0, 1 +; CHECK-NEXT: esp.movi.32.q q7, a1, 3 +; CHECK-NEXT: esp.movi.8.a q1, a0, 9 +; CHECK-NEXT: esp.movi.8.q q5, a5, 9 ; CHECK-NEXT: esp.movx.r.fft.bit.width a0 -; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: esp.movx.r.perf a0, a0 +; CHECK-NEXT: esp.movx.r.perf a0, s0 ; CHECK-NEXT: esp.movx.r.sar a0 ; CHECK-NEXT: esp.movx.r.sar.bytes a0 ; CHECK-NEXT: esp.movx.r.xacc.h a0 ; CHECK-NEXT: esp.movx.r.xacc.l a0 -; CHECK-NEXT: esp.movx.w.cfg t5 -; CHECK-NEXT: esp.movx.w.fft.bit.width s10 -; CHECK-NEXT: esp.movx.w.perf a2 -; CHECK-NEXT: esp.movx.w.sar t3 -; CHECK-NEXT: esp.movx.w.sar.bytes s1 -; CHECK-NEXT: esp.movx.w.xacc.h a2 -; CHECK-NEXT: esp.movx.w.xacc.l s1 -; CHECK-NEXT: esp.vext.s16 q7, q0, q6 -; CHECK-NEXT: esp.vext.s8 q5, q3, q3 -; CHECK-NEXT: esp.vext.u16 q4, q2, q6 -; CHECK-NEXT: esp.vext.u8 q4, q0, q0 -; CHECK-NEXT: esp.vunzip.16 q1, q0 -; CHECK-NEXT: esp.vunzip.32 q6, q4 -; CHECK-NEXT: esp.vunzip.8 q2, q1 -; CHECK-NEXT: esp.vunzipt.16 q7, q0, q2 -; CHECK-NEXT: esp.vunzipt.8 q0, q6, q2 -; CHECK-NEXT: esp.vzip.16 q1, q6 -; CHECK-NEXT: esp.vzip.32 q4, q6 -; CHECK-NEXT: esp.vzip.8 q4, q0 -; CHECK-NEXT: esp.vzipt.16 q0, q3, q5 -; CHECK-NEXT: esp.vzipt.8 q6, q1, q5 -; CHECK-NEXT: esp.zero.q q5 +; CHECK-NEXT: esp.movx.w.cfg s8 +; CHECK-NEXT: esp.movx.w.fft.bit.width a2 +; CHECK-NEXT: esp.movx.w.perf s11 +; CHECK-NEXT: esp.movx.w.sar t4 +; CHECK-NEXT: esp.movx.w.sar.bytes s8 +; CHECK-NEXT: esp.movx.w.xacc.h s10 +; CHECK-NEXT: esp.movx.w.xacc.l s8 +; CHECK-NEXT: esp.vext.s16 q6, q1, q6 +; CHECK-NEXT: esp.vext.s8 q6, q0, q0 +; CHECK-NEXT: esp.vext.u16 q3, q0, q7 +; CHECK-NEXT: esp.vext.u8 q3, q0, q3 +; CHECK-NEXT: esp.vunzip.16 q1, q5 +; CHECK-NEXT: esp.vunzip.32 q6, q1 +; CHECK-NEXT: esp.vunzip.8 q5, q6 +; CHECK-NEXT: esp.vunzipt.16 q2, q2, q5 +; CHECK-NEXT: esp.vunzipt.8 q6, q0, q7 +; CHECK-NEXT: esp.vzip.16 q1, q2 +; CHECK-NEXT: esp.vzip.32 q6, q3 +; CHECK-NEXT: esp.vzip.8 q1, q0 +; CHECK-NEXT: esp.vzipt.16 q7, q7, q4 +; CHECK-NEXT: esp.vzipt.8 q4, q5, q2 +; CHECK-NEXT: esp.zero.q q0 ; CHECK-NEXT: esp.zero.qacc ; CHECK-NEXT: esp.zero.xacc -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: esp.fft.ams.s16.ld.incp q6, a0, q6, q0, q3, q0, q1, 0 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.fft.ams.s16.ld.incp.uaup q3, a0, q0, q2, q3, q1, q0, 0 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: esp.fft.ams.s16.ld.r32.decp q7, a0, q0, q6, q3, q1, q5, 1 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: li a3, 4 -; CHECK-NEXT: esp.fft.ams.s16.st.incp q5, q7, a0, a3, q5, q3, q6, 0 -; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.fft.bitrev q7, a0 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.fft.cmul.s16.ld.xp q2, a0, s10, q3, q7, q7, 1 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.fft.ams.s16.ld.incp q6, a0, q2, q1, q7, q6, q0, 1 ; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.fft.cmul.s16.st.xp q7, q0, q4, a0, a1, 4, 3, 1 -; CHECK-NEXT: esp.fft.r2bf.s16 q7, q3, q5, q1, 0 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: esp.fft.r2bf.s16.st.incp q7, q7, q4, a0, 2 +; CHECK-NEXT: esp.fft.ams.s16.ld.incp.uaup q7, a0, q6, q3, q7, q7, q0, 1 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.fft.ams.s16.ld.r32.decp q4, a0, q2, q6, q3, q7, q1, 0 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: li t3, 14 +; CHECK-NEXT: esp.fft.ams.s16.st.incp q6, q0, t3, a0, q3, q1, q7, 1 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.fft.bitrev q2, a0 ; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: esp.fft.vst.r32.decp q5, a0, 1 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.ld.128.usar.ip q1, a0, 608 +; CHECK-NEXT: esp.fft.cmul.s16.ld.xp q4, a0, s11, q6, q1, q7, 2 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.fft.cmul.s16.st.xp q0, q5, q4, a0, a2, 2, 0, 1 +; CHECK-NEXT: esp.fft.r2bf.s16 q3, q7, q3, q0, 0 ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: esp.ld.128.usar.xp q2, a0, a2 -; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.ld.xacc.ip a0, 400 +; CHECK-NEXT: esp.fft.r2bf.s16.st.incp q1, q5, q0, a0, 0 +; CHECK-NEXT: mv a0, s0 +; CHECK-NEXT: esp.fft.vst.r32.decp q3, a0, 0 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.ld.128.usar.ip q3, a0, -1776 ; CHECK-NEXT: li a0, 13 -; CHECK-NEXT: esp.ldqa.s16.128.ip a0, 912 +; CHECK-NEXT: esp.ld.128.usar.xp q0, a0, s0 ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.ldqa.s16.128.xp a0, t5 +; CHECK-NEXT: esp.ld.xacc.ip a0, -488 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.ldqa.s16.128.ip a0, -1488 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.ldqa.s16.128.xp a0, s0 ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: esp.ldqa.s8.128.ip a0, 1824 -; CHECK-NEXT: li a0, 9 -; CHECK-NEXT: esp.ldqa.s8.128.xp a0, s1 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.ldqa.u16.128.ip a0, -1904 -; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.ldqa.s8.128.ip a0, -256 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.ldqa.s8.128.xp a0, a0 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.ldqa.u16.128.ip a0, -1936 +; CHECK-NEXT: li a0, 11 ; CHECK-NEXT: esp.ldqa.u16.128.xp a0, t4 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: esp.ldqa.u8.128.ip a0, 1216 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.ldqa.u8.128.ip a0, 688 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.ldqa.u8.128.xp a0, s1 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vldbc.16.ip q1, a0, 80 ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: esp.ldqa.u8.128.xp a0, a4 +; CHECK-NEXT: esp.vldbc.16.xp q2, a0, s1 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vldbc.32.ip q5, a0, 396 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vldbc.32.xp q3, a0, a5 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vldbc.8.ip q4, a0, -492 ; CHECK-NEXT: li a0, 9 -; CHECK-NEXT: esp.vldbc.16.ip q7, a0, -448 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.vldbc.16.xp q3, a0, s0 -; CHECK-NEXT: mv a0, a5 -; CHECK-NEXT: esp.vldbc.32.ip q3, a0, 220 -; CHECK-NEXT: li a0, 12 -; CHECK-NEXT: esp.vldbc.32.xp q7, a0, a1 +; CHECK-NEXT: esp.vldbc.8.xp q6, a0, a5 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vldext.s16.ip q6, q2, a0, 32 +; CHECK-NEXT: mv a0, s0 +; CHECK-NEXT: esp.vldext.s16.xp q2, q6, a0, a3 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vldext.s8.ip q1, q0, a0, -112 ; CHECK-NEXT: li a0, 12 -; CHECK-NEXT: esp.vldbc.8.ip q2, a0, 396 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.vldbc.8.xp q7, a0, s0 -; CHECK-NEXT: li a0, 13 -; CHECK-NEXT: esp.vldext.s16.ip q7, q4, a0, 16 -; CHECK-NEXT: mv a0, a5 -; CHECK-NEXT: esp.vldext.s16.xp q5, q0, a0, a2 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.vldext.s8.ip q3, q6, a0, 80 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: esp.vldext.s8.xp q1, q1, a0, a4 +; CHECK-NEXT: esp.vldext.s8.xp q7, q0, a0, a5 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vldext.u16.ip q3, q1, a0, 48 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vldext.u16.xp q0, q1, a0, s9 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vldext.u8.ip q5, q0, a0, -48 ; CHECK-NEXT: li a0, 14 -; CHECK-NEXT: esp.vldext.u16.ip q2, q5, a0, 48 -; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: esp.vldext.u16.xp q2, q0, a0, s9 -; CHECK-NEXT: li a0, 13 -; CHECK-NEXT: esp.vldext.u8.ip q7, q2, a0, 64 -; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.vldext.u8.xp q7, q2, a0, a0 +; CHECK-NEXT: esp.vldext.u8.xp q0, q6, a0, s0 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vldhbc.16.incp q6, q5, a0 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.ld.qacc.h.h.128.ip a0, -1296 ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: esp.vldhbc.16.incp q4, q7, a0 -; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.ld.qacc.h.h.128.ip a0, 512 -; CHECK-NEXT: li a0, 5 -; CHECK-NEXT: esp.ld.qacc.h.l.128.ip a0, -784 -; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: esp.ld.qacc.l.h.128.ip a0, -800 +; CHECK-NEXT: esp.ld.qacc.h.l.128.ip a0, -64 ; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: esp.ld.qacc.l.l.128.ip a0, -1952 -; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.ld.ua.state.ip a0, -752 -; CHECK-NEXT: esp.ldxq.32 q7, q4, a5, 2, 4 -; CHECK-NEXT: li a0, 13 -; CHECK-NEXT: esp.st.qacc.h.h.128.ip a0, -336 +; CHECK-NEXT: esp.ld.qacc.l.h.128.ip a0, 608 +; CHECK-NEXT: esp.ld.qacc.l.l.128.ip s1, 656 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.ld.ua.state.ip a0, 1392 +; CHECK-NEXT: esp.ldxq.32 q5, q2, t5, 2, 2 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.st.qacc.h.h.128.ip a0, -432 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.st.qacc.h.l.128.ip a0, -1792 +; CHECK-NEXT: esp.st.qacc.l.h.128.ip a2, 320 ; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.st.qacc.h.l.128.ip a0, 1568 -; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: esp.st.qacc.l.h.128.ip a0, 16 +; CHECK-NEXT: esp.st.qacc.l.l.128.ip a0, -496 ; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.st.qacc.l.l.128.ip a0, 416 +; CHECK-NEXT: esp.st.ua.state.ip a0, 1856 +; CHECK-NEXT: esp.stxq.32 q3, q5, s9, 2, 1 +; CHECK-NEXT: mv a0, s0 +; CHECK-NEXT: esp.vld.128.ip q0, a0, 496 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vld.128.xp q2, a0, s9 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vld.h.64.ip q2, a0, -88 +; CHECK-NEXT: esp.vld.h.64.xp q3, a3, s10 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vld.l.64.ip q2, a0, 240 +; CHECK-NEXT: esp.vld.l.64.xp q3, a1, s11 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vst.128.ip q6, a0, -512 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vst.128.xp q2, a0, s9 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vst.h.64.ip q5, a0, 56 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vst.h.64.xp q1, a0, a0 ; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: esp.st.ua.state.ip a0, -1360 -; CHECK-NEXT: esp.stxq.32 q0, q6, a4, 2, 5 +; CHECK-NEXT: esp.vst.l.64.ip q3, a0, 952 ; CHECK-NEXT: li a0, 8 -; CHECK-NEXT: esp.vld.128.ip q3, a0, 784 -; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: esp.vld.128.xp q3, a0, s0 -; CHECK-NEXT: mv a0, a5 -; CHECK-NEXT: esp.vld.h.64.ip q0, a0, -352 -; CHECK-NEXT: esp.vld.h.64.xp q2, a1, t4 -; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.vld.l.64.ip q2, a0, 56 -; CHECK-NEXT: esp.vld.l.64.xp q5, s0, s10 -; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: esp.vst.128.ip q5, a0, -960 -; CHECK-NEXT: li a0, 13 -; CHECK-NEXT: esp.vst.128.xp q6, a0, s11 -; CHECK-NEXT: esp.vst.h.64.ip q7, s1, 944 -; CHECK-NEXT: esp.vst.h.64.xp q7, s11, t5 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: esp.vst.l.64.ip q5, a0, 984 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: esp.vst.l.64.xp q5, a0, s9 -; CHECK-NEXT: esp.slci.2q q1, q5, 12 -; CHECK-NEXT: esp.slcxxp.2q q2, q3, t6, t6 -; CHECK-NEXT: esp.src.q q2, q1, q3 -; CHECK-NEXT: esp.src.q.ld.ip q0, a5, -272, q5, q5 -; CHECK-NEXT: li a0, 12 -; CHECK-NEXT: esp.src.q.ld.xp q1, a0, t3, q7, q6 -; CHECK-NEXT: esp.src.q.qup q3, q7, q4 -; CHECK-NEXT: esp.srci.2q q2, q3, 7 -; CHECK-NEXT: esp.srcmb.s16.q.qacc q2, q4, 0 -; CHECK-NEXT: esp.srcmb.s16.qacc q5, s8, 1 -; CHECK-NEXT: esp.srcmb.s8.q.qacc q5, q4, 0 -; CHECK-NEXT: esp.srcmb.s8.qacc q1, a2, 1 -; CHECK-NEXT: esp.srcmb.u16.q.qacc q0, q3, 1 -; CHECK-NEXT: esp.srcmb.u16.qacc q7, s8, 0 -; CHECK-NEXT: esp.srcmb.u8.q.qacc q3, q5, 1 -; CHECK-NEXT: esp.srcmb.u8.qacc q0, t6, 0 -; CHECK-NEXT: li a0, 12 -; CHECK-NEXT: esp.srcq.128.st.incp q0, q5, a0 -; CHECK-NEXT: esp.srcxxp.2q q4, q6, s9, a4 -; CHECK-NEXT: esp.srs.s.xacc a0, a4 -; CHECK-NEXT: esp.srs.u.xacc a0, t3 -; CHECK-NEXT: esp.vsl.32 q5, q2 -; CHECK-NEXT: esp.vsld.16 q3, q3, q7 -; CHECK-NEXT: esp.vsld.32 q3, q7, q1 -; CHECK-NEXT: esp.vsld.8 q0, q1, q5 -; CHECK-NEXT: esp.vsr.s32 q3, q0 -; CHECK-NEXT: esp.vsr.u32 q1, q2 -; CHECK-NEXT: esp.vsrd.16 q4, q3, q0 -; CHECK-NEXT: esp.vsrd.32 q0, q6, q3 -; CHECK-NEXT: esp.vsrd.8 q5, q4, q1 -; CHECK-NEXT: esp.st.s.xacc.ip a2, 80 -; CHECK-NEXT: esp.st.u.xacc.ip a4, -464 +; CHECK-NEXT: esp.vst.l.64.xp q2, a0, t4 +; CHECK-NEXT: esp.slci.2q q1, q3, 6 +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: esp.slcxxp.2q q4, q3, a1, s9 +; CHECK-NEXT: esp.src.q q4, q1, q5 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.src.q.ld.ip q2, a0, -1776, q2, q1 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.src.q.ld.xp q1, a0, s10, q1, q6 +; CHECK-NEXT: esp.src.q.qup q6, q0, q2 +; CHECK-NEXT: esp.srci.2q q3, q3, 12 +; CHECK-NEXT: esp.srcmb.s16.q.qacc q1, q5, 1 +; CHECK-NEXT: esp.srcmb.s16.qacc q3, s9, 0 +; CHECK-NEXT: esp.srcmb.s8.q.qacc q2, q5, 1 +; CHECK-NEXT: esp.srcmb.s8.qacc q0, s0, 0 +; CHECK-NEXT: esp.srcmb.u16.q.qacc q0, q4, 1 +; CHECK-NEXT: esp.srcmb.u16.qacc q6, t5, 0 +; CHECK-NEXT: esp.srcmb.u8.q.qacc q5, q6, 1 +; CHECK-NEXT: esp.srcmb.u8.qacc q6, t6, 0 +; CHECK-NEXT: esp.srcq.128.st.incp q0, q4, a5 +; CHECK-NEXT: esp.srcxxp.2q q5, q0, s0, s8 +; CHECK-NEXT: esp.srs.s.xacc a0, s10 +; CHECK-NEXT: esp.srs.u.xacc a0, a1 +; CHECK-NEXT: esp.vsl.32 q1, q6 +; CHECK-NEXT: esp.vsld.16 q3, q0, q2 +; CHECK-NEXT: esp.vsld.32 q0, q1, q1 +; CHECK-NEXT: esp.vsld.8 q1, q2, q1 +; CHECK-NEXT: esp.vsr.s32 q3, q7 +; CHECK-NEXT: esp.vsr.u32 q4, q5 +; CHECK-NEXT: esp.vsrd.16 q1, q5, q3 +; CHECK-NEXT: esp.vsrd.32 q4, q1, q2 +; CHECK-NEXT: esp.vsrd.8 q4, q7, q4 +; CHECK-NEXT: esp.st.s.xacc.ip s11, -720 +; CHECK-NEXT: esp.st.u.xacc.ip a4, -576 +; CHECK-NEXT: esp.movx.r.cfg a0 +; CHECK-NEXT: ori a0, a0, 2 +; CHECK-NEXT: esp.movx.w.cfg a0 ; CHECK-NEXT: cm.popret {ra, s0-s11}, 64 - tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 0) - tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 1) - tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 2) - tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 3) - tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 4) - tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 5) - tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 6) - tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 7) - tail call void @llvm.riscv.esp.vcmulas.s16.qacc.h(i32 0, i32 4) - tail call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32 6, i32 1, i32 10, i32 -48, i32 1) - tail call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32 12, i32 2, i32 7, i32 2, i32 1) - tail call void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32 7, i32 6) - tail call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32 7, i32 0, i32 8, i32 48, i32 7) - tail call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32 14, i32 2, i32 7, i32 7, i32 1) - tail call void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32 1, i32 1) - tail call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32 1, i32 6, i32 5, i32 32, i32 4) - tail call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32 7, i32 3, i32 2, i32 2, i32 6) - tail call void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32 4, i32 5) - tail call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32 2, i32 5, i32 4, i32 -48, i32 4) - tail call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32 7, i32 6, i32 3, i32 14, i32 7) - tail call void @llvm.riscv.esp.vmulas.s16.qacc(i32 4, i32 2) - tail call void @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32 5, i32 7, i32 4, i32 96, i32 1) - tail call void @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32 3, i32 4, i32 2, i32 8, i32 6) - tail call void @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32 7, i32 6, i32 1, i32 0, i32 80) - tail call void @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32 5, i32 0, i32 7, i32 6, i32 5) - tail call void @llvm.riscv.esp.vmulas.s16.xacc(i32 3, i32 5) - tail call void @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32 1, i32 7, i32 9, i32 96, i32 5) - tail call void @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32 8, i32 5, i32 5, i32 13, i32 0) - tail call void @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32 4, i32 6, i32 2, i32 1, i32 16) - tail call void @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32 5, i32 7, i32 7, i32 7, i32 2) - tail call void @llvm.riscv.esp.vmulas.s8.qacc(i32 6, i32 1) - tail call void @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32 3, i32 5, i32 8, i32 -128, i32 2) - tail call void @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32 5, i32 0, i32 5, i32 8, i32 4) - tail call void @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32 6, i32 0, i32 7, i32 1, i32 16) - tail call void @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32 12, i32 6, i32 1, i32 4, i32 10) - tail call void @llvm.riscv.esp.vmulas.s8.xacc(i32 3, i32 7) - tail call void @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32 4, i32 5, i32 1, i32 -16, i32 7) - tail call void @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32 10, i32 7, i32 0, i32 7, i32 1) - tail call void @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32 6, i32 1, i32 6, i32 8, i32 -128) - tail call void @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32 2, i32 4, i32 1, i32 5, i32 4) - tail call void @llvm.riscv.esp.vmulas.u16.qacc(i32 6, i32 1) - tail call void @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32 0, i32 0, i32 8, i32 -32, i32 7) - tail call void @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32 7, i32 6, i32 7, i32 6, i32 2) - tail call void @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32 6, i32 5, i32 4, i32 8, i32 16) - tail call void @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32 9, i32 3, i32 7, i32 4, i32 2) - tail call void @llvm.riscv.esp.vmulas.u16.xacc(i32 6, i32 1) - tail call void @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32 2, i32 2, i32 3, i32 -48, i32 2) - tail call void @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32 6, i32 3, i32 0, i32 0, i32 7) - tail call void @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32 1, i32 4, i32 0, i32 9, i32 96) - tail call void @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32 5, i32 3, i32 7, i32 6, i32 2) - tail call void @llvm.riscv.esp.vmulas.u8.qacc(i32 7, i32 1) - tail call void @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32 7, i32 4, i32 9, i32 -48, i32 7) - tail call void @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32 12, i32 6, i32 7, i32 11, i32 4) - tail call void @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32 1, i32 7, i32 2, i32 14, i32 0) - tail call void @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32 7, i32 0, i32 0, i32 4, i32 7) - tail call void @llvm.riscv.esp.vmulas.u8.xacc(i32 6, i32 4) - tail call void @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32 6, i32 2, i32 5, i32 -80, i32 3) - tail call void @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32 13, i32 5, i32 1, i32 13, i32 4) - tail call void @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32 2, i32 3, i32 7, i32 5, i32 -128) - tail call void @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32 6, i32 7, i32 2, i32 2, i32 5) - tail call void @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32 0, i32 2, i32 14, i32 0) - tail call void @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32 2, i32 6, i32 0, i32 5) - tail call void @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32 7, i32 3, i32 8, i32 5) - tail call void @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32 4, i32 4, i32 6, i32 3) - tail call void @llvm.riscv.esp.vsmulas.s16.qacc(i32 1, i32 5, i32 14) - tail call void @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32 7, i32 4, i32 5, i32 0, i32 0) - tail call void @llvm.riscv.esp.vsmulas.s8.qacc(i32 3, i32 5, i32 0) - tail call void @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32 1, i32 4, i32 8, i32 6, i32 1) - tail call void @llvm.riscv.esp.vsmulas.u16.qacc(i32 6, i32 5, i32 15) - tail call void @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32 7, i32 1, i32 0, i32 10, i32 7) - tail call void @llvm.riscv.esp.vsmulas.u8.qacc(i32 0, i32 7, i32 2) - tail call void @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32 3, i32 7, i32 10, i32 8, i32 4) - tail call void @llvm.riscv.esp.cmul.s16(i32 0, i32 7, i32 3, i32 6) - tail call void @llvm.riscv.esp.cmul.s16.ld.incp(i32 0, i32 3, i32 6, i32 0, i32 3, i32 5) - tail call void @llvm.riscv.esp.cmul.s16.st.incp(i32 4, i32 5, i32 5, i32 0, i32 2, i32 0) - tail call void @llvm.riscv.esp.cmul.s8(i32 1, i32 0, i32 3, i32 1) - tail call void @llvm.riscv.esp.cmul.s8.ld.incp(i32 5, i32 4, i32 5, i32 1, i32 7, i32 4) - tail call void @llvm.riscv.esp.cmul.s8.st.incp(i32 6, i32 0, i32 5, i32 14, i32 3, i32 0) - tail call void @llvm.riscv.esp.cmul.u16(i32 7, i32 5, i32 2, i32 7) - tail call void @llvm.riscv.esp.cmul.u16.ld.incp(i32 0, i32 1, i32 2, i32 1, i32 0, i32 0) - tail call void @llvm.riscv.esp.cmul.u16.st.incp(i32 1, i32 4, i32 2, i32 5, i32 2, i32 4) - tail call void @llvm.riscv.esp.cmul.u8(i32 7, i32 5, i32 0, i32 3) - tail call void @llvm.riscv.esp.cmul.u8.ld.incp(i32 0, i32 2, i32 11, i32 0, i32 0, i32 4) - tail call void @llvm.riscv.esp.cmul.u8.st.incp(i32 6, i32 3, i32 1, i32 10, i32 2, i32 4) - tail call void @llvm.riscv.esp.max.s16.a(i32 6, i32 3) - tail call void @llvm.riscv.esp.max.s32.a(i32 2, i32 0) - tail call void @llvm.riscv.esp.max.s8.a(i32 0, i32 9) - tail call void @llvm.riscv.esp.max.u16.a(i32 6, i32 6) - tail call void @llvm.riscv.esp.max.u32.a(i32 6, i32 1) - tail call void @llvm.riscv.esp.max.u8.a(i32 1, i32 4) - tail call void @llvm.riscv.esp.min.s16.a(i32 6, i32 11) - tail call void @llvm.riscv.esp.min.s32.a(i32 1, i32 14) - tail call void @llvm.riscv.esp.min.s8.a(i32 0, i32 1) - tail call void @llvm.riscv.esp.min.u16.a(i32 3, i32 14) - tail call void @llvm.riscv.esp.min.u32.a(i32 0, i32 9) - tail call void @llvm.riscv.esp.min.u8.a(i32 5, i32 8) - tail call void @llvm.riscv.esp.vabs.16(i32 0, i32 6) - tail call void @llvm.riscv.esp.vabs.32(i32 4, i32 1) - tail call void @llvm.riscv.esp.vabs.8(i32 2, i32 5) - tail call void @llvm.riscv.esp.vadd.s16(i32 1, i32 5, i32 6) - tail call void @llvm.riscv.esp.vadd.s16.ld.incp(i32 0, i32 6, i32 10, i32 1, i32 0) - tail call void @llvm.riscv.esp.vadd.s16.st.incp(i32 0, i32 4, i32 1, i32 11, i32 7) - tail call void @llvm.riscv.esp.vadd.s32(i32 7, i32 3, i32 7) - tail call void @llvm.riscv.esp.vadd.s32.ld.incp(i32 7, i32 2, i32 14, i32 4, i32 4) - tail call void @llvm.riscv.esp.vadd.s32.st.incp(i32 1, i32 7, i32 2, i32 13, i32 7) - tail call void @llvm.riscv.esp.vadd.s8(i32 1, i32 7, i32 7) - tail call void @llvm.riscv.esp.vadd.s8.ld.incp(i32 5, i32 6, i32 14, i32 1, i32 2) - tail call void @llvm.riscv.esp.vadd.s8.st.incp(i32 1, i32 0, i32 3, i32 9, i32 4) - tail call void @llvm.riscv.esp.vadd.u16(i32 7, i32 7, i32 0) - tail call void @llvm.riscv.esp.vadd.u16.ld.incp(i32 7, i32 5, i32 8, i32 1, i32 6) - tail call void @llvm.riscv.esp.vadd.u16.st.incp(i32 6, i32 3, i32 0, i32 5, i32 7) - tail call void @llvm.riscv.esp.vadd.u32(i32 0, i32 1, i32 4) - tail call void @llvm.riscv.esp.vadd.u32.ld.incp(i32 5, i32 0, i32 2, i32 4, i32 1) - tail call void @llvm.riscv.esp.vadd.u32.st.incp(i32 0, i32 1, i32 4, i32 1, i32 6) - tail call void @llvm.riscv.esp.vadd.u8(i32 2, i32 5, i32 5) - tail call void @llvm.riscv.esp.vadd.u8.ld.incp(i32 4, i32 3, i32 8, i32 1, i32 7) - tail call void @llvm.riscv.esp.vadd.u8.st.incp(i32 0, i32 0, i32 0, i32 5, i32 2) - tail call void @llvm.riscv.esp.vclamp.s16(i32 5, i32 14, i32 4) - tail call void @llvm.riscv.esp.vmax.s16(i32 6, i32 5, i32 5) - tail call void @llvm.riscv.esp.vmax.s16.ld.incp(i32 5, i32 5, i32 11, i32 3, i32 2) - tail call void @llvm.riscv.esp.vmax.s16.st.incp(i32 3, i32 5, i32 3, i32 2, i32 4) - tail call void @llvm.riscv.esp.vmax.s32(i32 5, i32 2, i32 2) - tail call void @llvm.riscv.esp.vmax.s32.ld.incp(i32 0, i32 1, i32 7, i32 6, i32 0) - tail call void @llvm.riscv.esp.vmax.s32.st.incp(i32 7, i32 6, i32 6, i32 14, i32 1) - tail call void @llvm.riscv.esp.vmax.s8(i32 5, i32 7, i32 7) - tail call void @llvm.riscv.esp.vmax.s8.ld.incp(i32 5, i32 1, i32 2, i32 1, i32 6) - tail call void @llvm.riscv.esp.vmax.s8.st.incp(i32 1, i32 3, i32 5, i32 4, i32 7) - tail call void @llvm.riscv.esp.vmax.u16(i32 4, i32 1, i32 1) - tail call void @llvm.riscv.esp.vmax.u16.ld.incp(i32 5, i32 4, i32 0, i32 5, i32 3) - tail call void @llvm.riscv.esp.vmax.u16.st.incp(i32 0, i32 7, i32 5, i32 11, i32 5) - tail call void @llvm.riscv.esp.vmax.u32(i32 0, i32 2, i32 4) - tail call void @llvm.riscv.esp.vmax.u32.ld.incp(i32 0, i32 6, i32 4, i32 1, i32 6) - tail call void @llvm.riscv.esp.vmax.u32.st.incp(i32 4, i32 7, i32 0, i32 6, i32 1) - tail call void @llvm.riscv.esp.vmax.u8(i32 2, i32 0, i32 5) - tail call void @llvm.riscv.esp.vmax.u8.ld.incp(i32 6, i32 1, i32 1, i32 5, i32 0) - tail call void @llvm.riscv.esp.vmax.u8.st.incp(i32 6, i32 7, i32 7, i32 10, i32 1) - tail call void @llvm.riscv.esp.vmin.s16(i32 1, i32 3, i32 4) - tail call void @llvm.riscv.esp.vmin.s16.ld.incp(i32 2, i32 1, i32 3, i32 2, i32 2) - tail call void @llvm.riscv.esp.vmin.s16.st.incp(i32 7, i32 6, i32 2, i32 7, i32 1) - tail call void @llvm.riscv.esp.vmin.s32(i32 0, i32 3, i32 2) - tail call void @llvm.riscv.esp.vmin.s32.ld.incp(i32 7, i32 6, i32 14, i32 5, i32 1) - tail call void @llvm.riscv.esp.vmin.s32.st.incp(i32 5, i32 1, i32 7, i32 6, i32 5) - tail call void @llvm.riscv.esp.vmin.s8(i32 3, i32 6, i32 2) - tail call void @llvm.riscv.esp.vmin.s8.ld.incp(i32 4, i32 3, i32 11, i32 1, i32 7) - tail call void @llvm.riscv.esp.vmin.s8.st.incp(i32 0, i32 1, i32 1, i32 6, i32 4) - tail call void @llvm.riscv.esp.vmin.u16(i32 3, i32 7, i32 4) - tail call void @llvm.riscv.esp.vmin.u16.ld.incp(i32 6, i32 6, i32 11, i32 5, i32 4) - tail call void @llvm.riscv.esp.vmin.u16.st.incp(i32 6, i32 0, i32 1, i32 12, i32 2) - tail call void @llvm.riscv.esp.vmin.u32(i32 0, i32 7, i32 5) - tail call void @llvm.riscv.esp.vmin.u32.ld.incp(i32 5, i32 6, i32 13, i32 6, i32 7) - tail call void @llvm.riscv.esp.vmin.u32.st.incp(i32 3, i32 7, i32 5, i32 4, i32 4) - tail call void @llvm.riscv.esp.vmin.u8(i32 5, i32 5, i32 7) - tail call void @llvm.riscv.esp.vmin.u8.ld.incp(i32 0, i32 5, i32 0, i32 5, i32 2) - tail call void @llvm.riscv.esp.vmin.u8.st.incp(i32 6, i32 6, i32 2, i32 12, i32 1) - tail call void @llvm.riscv.esp.vmul.s16(i32 2, i32 1, i32 6) - tail call void @llvm.riscv.esp.vmul.s16.ld.incp(i32 6, i32 7, i32 10, i32 3, i32 0) - tail call void @llvm.riscv.esp.vmul.s16.s8xs8(i32 3, i32 5, i32 7, i32 0) - tail call void @llvm.riscv.esp.vmul.s16.st.incp(i32 7, i32 1, i32 2, i32 3, i32 0) - tail call void @llvm.riscv.esp.vmul.s32.s16xs16(i32 5, i32 2, i32 3, i32 4) - tail call void @llvm.riscv.esp.vmul.s8(i32 4, i32 0, i32 3) - tail call void @llvm.riscv.esp.vmul.s8.ld.incp(i32 2, i32 3, i32 0, i32 2, i32 0) - tail call void @llvm.riscv.esp.vmul.s8.st.incp(i32 0, i32 7, i32 0, i32 8, i32 3) - tail call void @llvm.riscv.esp.vmul.u16(i32 3, i32 7, i32 2) - tail call void @llvm.riscv.esp.vmul.u16.ld.incp(i32 6, i32 6, i32 7, i32 5, i32 5) - tail call void @llvm.riscv.esp.vmul.u16.st.incp(i32 4, i32 4, i32 3, i32 3, i32 2) - tail call void @llvm.riscv.esp.vmul.u8(i32 3, i32 7, i32 7) - tail call void @llvm.riscv.esp.vmul.u8.ld.incp(i32 0, i32 6, i32 9, i32 1, i32 0) - tail call void @llvm.riscv.esp.vmul.u8.st.incp(i32 0, i32 3, i32 7, i32 6, i32 4) - tail call void @llvm.riscv.esp.vprelu.s16(i32 8, i32 3, i32 4, i32 1) - tail call void @llvm.riscv.esp.vprelu.s8(i32 3, i32 5, i32 4, i32 2) - tail call void @llvm.riscv.esp.vrelu.s16(i32 8, i32 9, i32 6) - tail call void @llvm.riscv.esp.vrelu.s8(i32 12, i32 1, i32 5) - tail call void @llvm.riscv.esp.vsadds.s16(i32 12, i32 3, i32 3) - tail call void @llvm.riscv.esp.vsadds.s8(i32 7, i32 1, i32 7) - tail call void @llvm.riscv.esp.vsadds.u16(i32 4, i32 2, i32 3) - tail call void @llvm.riscv.esp.vsadds.u8(i32 8, i32 3, i32 2) - tail call void @llvm.riscv.esp.vsat.s16(i32 9, i32 1, i32 0, i32 5) - tail call void @llvm.riscv.esp.vsat.s32(i32 12, i32 9, i32 3, i32 3) - tail call void @llvm.riscv.esp.vsat.s8(i32 10, i32 2, i32 7, i32 0) - tail call void @llvm.riscv.esp.vsat.u16(i32 7, i32 7, i32 7, i32 3) - tail call void @llvm.riscv.esp.vsat.u32(i32 8, i32 2, i32 5, i32 3) - tail call void @llvm.riscv.esp.vsat.u8(i32 1, i32 13, i32 6, i32 0) - tail call void @llvm.riscv.esp.vssubs.s16(i32 3, i32 7, i32 3) - tail call void @llvm.riscv.esp.vssubs.s8(i32 14, i32 0, i32 7) - tail call void @llvm.riscv.esp.vssubs.u16(i32 0, i32 4, i32 5) - tail call void @llvm.riscv.esp.vssubs.u8(i32 11, i32 1, i32 5) - tail call void @llvm.riscv.esp.vsub.s16(i32 0, i32 6, i32 0) - tail call void @llvm.riscv.esp.vsub.s16.ld.incp(i32 3, i32 7, i32 6, i32 2, i32 2) - tail call void @llvm.riscv.esp.vsub.s16.st.incp(i32 0, i32 3, i32 7, i32 0, i32 0) - tail call void @llvm.riscv.esp.vsub.s32(i32 2, i32 7, i32 7) - tail call void @llvm.riscv.esp.vsub.s32.ld.incp(i32 2, i32 0, i32 7, i32 3, i32 4) - tail call void @llvm.riscv.esp.vsub.s32.st.incp(i32 1, i32 1, i32 4, i32 5, i32 1) - tail call void @llvm.riscv.esp.vsub.s8(i32 5, i32 6, i32 7) - tail call void @llvm.riscv.esp.vsub.s8.ld.incp(i32 2, i32 6, i32 1, i32 1, i32 4) - tail call void @llvm.riscv.esp.vsub.s8.st.incp(i32 2, i32 3, i32 5, i32 4, i32 4) - tail call void @llvm.riscv.esp.vsub.u16(i32 7, i32 0, i32 5) - tail call void @llvm.riscv.esp.vsub.u16.ld.incp(i32 7, i32 5, i32 11, i32 0, i32 4) - tail call void @llvm.riscv.esp.vsub.u16.st.incp(i32 3, i32 1, i32 0, i32 11, i32 1) - tail call void @llvm.riscv.esp.vsub.u32(i32 4, i32 2, i32 5) - tail call void @llvm.riscv.esp.vsub.u32.ld.incp(i32 4, i32 2, i32 0, i32 2, i32 4) - tail call void @llvm.riscv.esp.vsub.u32.st.incp(i32 7, i32 4, i32 0, i32 11, i32 7) - tail call void @llvm.riscv.esp.vsub.u8(i32 5, i32 4, i32 6) - tail call void @llvm.riscv.esp.vsub.u8.ld.incp(i32 2, i32 4, i32 0, i32 6, i32 4) - tail call void @llvm.riscv.esp.vsub.u8.st.incp(i32 2, i32 0, i32 3, i32 11, i32 3) - tail call void @llvm.riscv.esp.addx2(i32 5, i32 14, i32 4) - tail call void @llvm.riscv.esp.addx4(i32 14, i32 5, i32 4) - tail call void @llvm.riscv.esp.sat(i32 6, i32 2, i32 4) - tail call void @llvm.riscv.esp.subx2(i32 2, i32 2, i32 9) - tail call void @llvm.riscv.esp.subx4(i32 0, i32 9, i32 3) - tail call void @llvm.riscv.esp.andq(i32 6, i32 3, i32 7) - tail call void @llvm.riscv.esp.notq(i32 5, i32 6) - tail call void @llvm.riscv.esp.orq(i32 1, i32 0, i32 1) - tail call void @llvm.riscv.esp.xorq(i32 1, i32 6, i32 5) - tail call void @llvm.riscv.esp.vcmp.eq.s16(i32 0, i32 2, i32 1) - tail call void @llvm.riscv.esp.vcmp.eq.s32(i32 1, i32 6, i32 5) - tail call void @llvm.riscv.esp.vcmp.eq.s8(i32 0, i32 3, i32 2) - tail call void @llvm.riscv.esp.vcmp.eq.u16(i32 7, i32 1, i32 7) - tail call void @llvm.riscv.esp.vcmp.eq.u32(i32 1, i32 2, i32 2) - tail call void @llvm.riscv.esp.vcmp.eq.u8(i32 1, i32 6, i32 3) - tail call void @llvm.riscv.esp.vcmp.gt.s16(i32 5, i32 6, i32 4) - tail call void @llvm.riscv.esp.vcmp.gt.s32(i32 6, i32 2, i32 0) - tail call void @llvm.riscv.esp.vcmp.gt.s8(i32 3, i32 5, i32 2) - tail call void @llvm.riscv.esp.vcmp.gt.u16(i32 7, i32 4, i32 7) - tail call void @llvm.riscv.esp.vcmp.gt.u32(i32 6, i32 2, i32 2) - tail call void @llvm.riscv.esp.vcmp.gt.u8(i32 2, i32 0, i32 0) - tail call void @llvm.riscv.esp.vcmp.lt.s16(i32 2, i32 1, i32 7) - tail call void @llvm.riscv.esp.vcmp.lt.s32(i32 2, i32 1, i32 4) - tail call void @llvm.riscv.esp.vcmp.lt.s8(i32 5, i32 2, i32 6) - tail call void @llvm.riscv.esp.vcmp.lt.u16(i32 1, i32 5, i32 4) - tail call void @llvm.riscv.esp.vcmp.lt.u32(i32 5, i32 6, i32 2) - tail call void @llvm.riscv.esp.vcmp.lt.u8(i32 3, i32 5, i32 5) - tail call void @llvm.riscv.esp.mov.s16.qacc(i32 2) - tail call void @llvm.riscv.esp.mov.s8.qacc(i32 5) - tail call void @llvm.riscv.esp.mov.u16.qacc(i32 5) - tail call void @llvm.riscv.esp.mov.u8.qacc(i32 3) - tail call void @llvm.riscv.esp.movi.16.a(i32 2, i32 3, i32 1) - tail call void @llvm.riscv.esp.movi.16.q(i32 7, i32 13, i32 3) - tail call void @llvm.riscv.esp.movi.32.a(i32 6, i32 1, i32 14) - tail call void @llvm.riscv.esp.movi.32.q(i32 9, i32 1, i32 5) - tail call void @llvm.riscv.esp.movi.8.a(i32 5, i32 15, i32 14) - tail call void @llvm.riscv.esp.movi.8.q(i32 0, i32 6, i32 1) - tail call void @llvm.riscv.esp.movx.r.cfg(i32 5) - tail call void @llvm.riscv.esp.movx.r.fft.bit.width(i32 2) - tail call void @llvm.riscv.esp.movx.r.perf(i32 3, i32 33) - tail call void @llvm.riscv.esp.movx.r.sar(i32 5) - tail call void @llvm.riscv.esp.movx.r.sar.bytes(i32 6) - tail call void @llvm.riscv.esp.movx.r.xacc.h(i32 10) - tail call void @llvm.riscv.esp.movx.r.xacc.l(i32 12) - tail call void @llvm.riscv.esp.movx.w.cfg(i32 10) - tail call void @llvm.riscv.esp.movx.w.fft.bit.width(i32 1) - tail call void @llvm.riscv.esp.movx.w.perf(i32 8) - tail call void @llvm.riscv.esp.movx.w.sar(i32 3) - tail call void @llvm.riscv.esp.movx.w.sar.bytes(i32 4) - tail call void @llvm.riscv.esp.movx.w.xacc.h(i32 8) - tail call void @llvm.riscv.esp.movx.w.xacc.l(i32 4) - tail call void @llvm.riscv.esp.vext.s16(i32 6, i32 7, i32 0) - tail call void @llvm.riscv.esp.vext.s8(i32 3, i32 5, i32 3) - tail call void @llvm.riscv.esp.vext.u16(i32 6, i32 4, i32 2) - tail call void @llvm.riscv.esp.vext.u8(i32 0, i32 4, i32 0) - tail call void @llvm.riscv.esp.vunzip.16(i32 1, i32 0) - tail call void @llvm.riscv.esp.vunzip.32(i32 6, i32 4) - tail call void @llvm.riscv.esp.vunzip.8(i32 2, i32 1) - tail call void @llvm.riscv.esp.vunzipt.16(i32 7, i32 0, i32 2) - tail call void @llvm.riscv.esp.vunzipt.8(i32 0, i32 6, i32 2) - tail call void @llvm.riscv.esp.vzip.16(i32 1, i32 6) - tail call void @llvm.riscv.esp.vzip.32(i32 4, i32 6) - tail call void @llvm.riscv.esp.vzip.8(i32 4, i32 0) - tail call void @llvm.riscv.esp.vzipt.16(i32 0, i32 3, i32 5) - tail call void @llvm.riscv.esp.vzipt.8(i32 6, i32 1, i32 5) - tail call void @llvm.riscv.esp.zero.q(i32 5) + %1 = tail call i32 @llvm.riscv.esp.vld.128.ip(i32 8, i32 496, i32 0) + %2 = tail call i32 @llvm.riscv.esp.vld.128.ip(i32 %1, i32 496, i32 1) + %3 = tail call i32 @llvm.riscv.esp.vld.128.ip(i32 %2, i32 496, i32 2) + %4 = tail call i32 @llvm.riscv.esp.vld.128.ip(i32 %3, i32 496, i32 3) + %5 = tail call i32 @llvm.riscv.esp.vld.128.ip(i32 %4, i32 496, i32 4) + %6 = tail call i32 @llvm.riscv.esp.vld.128.ip(i32 %5, i32 496, i32 5) + %7 = tail call i32 @llvm.riscv.esp.vld.128.ip(i32 %6, i32 496, i32 6) + %8 = tail call i32 @llvm.riscv.esp.vld.128.ip(i32 %7, i32 496, i32 7) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.h(i32 0, i32 6) + %9 = tail call i32 @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32 0, i32 3, i32 10, i32 0, i32 2) + %10 = tail call i32 @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32 7, i32 2, i32 0, i32 3, i32 6) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32 2, i32 3) + %11 = tail call i32 @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32 4, i32 3, i32 8, i32 -64, i32 1) + %12 = tail call i32 @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32 1, i32 5, i32 4, i32 12, i32 7) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32 0, i32 3) + %13 = tail call i32 @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32 3, i32 7, i32 3, i32 -48, i32 5) + %14 = tail call i32 @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32 11, i32 4, i32 0, i32 6, i32 4) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32 4, i32 0) + %15 = tail call i32 @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32 7, i32 5, i32 2, i32 0, i32 0) + %16 = tail call i32 @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32 8, i32 3, i32 0, i32 4, i32 6) + tail call void @llvm.riscv.esp.vmulas.s16.qacc(i32 3, i32 0) + %17 = tail call i32 @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32 0, i32 1, i32 7, i32 80, i32 4) + %18 = tail call i32 @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32 7, i32 7, i32 2, i32 2, i32 7) + %19 = tail call i32 @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32 3, i32 4, i32 2, i32 9, i32 -32) + %20 = tail call i32 @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32 8, i32 6, i32 6, i32 4, i32 10) + tail call void @llvm.riscv.esp.vmulas.s16.xacc(i32 1, i32 0) + %21 = tail call i32 @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32 1, i32 6, i32 8, i32 -16, i32 4) + %22 = tail call i32 @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32 1, i32 2, i32 3, i32 12, i32 7) + %23 = tail call i32 @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32 4, i32 2, i32 1, i32 13, i32 16) + %24 = tail call i32 @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32 8, i32 7, i32 4, i32 3, i32 3) + tail call void @llvm.riscv.esp.vmulas.s8.qacc(i32 4, i32 3) + %25 = tail call i32 @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32 5, i32 2, i32 4, i32 32, i32 4) + %26 = tail call i32 @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32 14, i32 1, i32 6, i32 14, i32 7) + %27 = tail call i32 @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32 1, i32 6, i32 6, i32 5, i32 -112) + %28 = tail call i32 @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32 9, i32 3, i32 3, i32 5, i32 7) + tail call void @llvm.riscv.esp.vmulas.s8.xacc(i32 7, i32 0) + %29 = tail call i32 @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32 7, i32 0, i32 12, i32 16, i32 7) + %30 = tail call i32 @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32 6, i32 2, i32 1, i32 3, i32 0) + %31 = tail call i32 @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32 5, i32 6, i32 4, i32 13, i32 32) + %32 = tail call i32 @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32 3, i32 1, i32 7, i32 2, i32 12) + tail call void @llvm.riscv.esp.vmulas.u16.qacc(i32 5, i32 0) + %33 = tail call i32 @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32 4, i32 4, i32 4, i32 48, i32 2) + %34 = tail call i32 @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32 7, i32 1, i32 7, i32 9, i32 3) + %35 = tail call i32 @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32 1, i32 2, i32 1, i32 12, i32 -128) + %36 = tail call i32 @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32 7, i32 1, i32 6, i32 2, i32 11) + tail call void @llvm.riscv.esp.vmulas.u16.xacc(i32 1, i32 7) + %37 = tail call i32 @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32 2, i32 4, i32 1, i32 16, i32 4) + %38 = tail call i32 @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32 12, i32 1, i32 7, i32 3, i32 5) + %39 = tail call i32 @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32 7, i32 6, i32 1, i32 2, i32 -64) + %40 = tail call i32 @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32 9, i32 2, i32 4, i32 1, i32 9) + tail call void @llvm.riscv.esp.vmulas.u8.qacc(i32 7, i32 4) + %41 = tail call i32 @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32 7, i32 6, i32 5, i32 -32, i32 1) + %42 = tail call i32 @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32 14, i32 6, i32 0, i32 8, i32 6) + %43 = tail call i32 @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32 6, i32 4, i32 0, i32 12, i32 96) + %44 = tail call i32 @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32 12, i32 2, i32 6, i32 5, i32 3) + tail call void @llvm.riscv.esp.vmulas.u8.xacc(i32 0, i32 2) + %45 = tail call i32 @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32 7, i32 6, i32 4, i32 -112, i32 2) + %46 = tail call i32 @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32 4, i32 4, i32 0, i32 9, i32 3) + %47 = tail call i32 @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32 1, i32 5, i32 3, i32 1, i32 -48) + %48 = tail call i32 @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32 5, i32 5, i32 7, i32 7, i32 6) + %49 = tail call i32 @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32 1, i32 2, i32 6, i32 7) + %50 = tail call i32 @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32 1, i32 0, i32 1, i32 2) + %51 = tail call i32 @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32 7, i32 6, i32 6, i32 2) + %52 = tail call i32 @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32 7, i32 7, i32 10, i32 4) + tail call void @llvm.riscv.esp.vsmulas.s16.qacc(i32 4, i32 2, i32 0) + %53 = tail call i32 @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32 7, i32 7, i32 0, i32 11, i32 1) + tail call void @llvm.riscv.esp.vsmulas.s8.qacc(i32 4, i32 0, i32 2) + %54 = tail call i32 @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32 5, i32 1, i32 4, i32 7, i32 5) + tail call void @llvm.riscv.esp.vsmulas.u16.qacc(i32 5, i32 5, i32 3) + %55 = tail call i32 @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32 7, i32 5, i32 11, i32 14, i32 1) + tail call void @llvm.riscv.esp.vsmulas.u8.qacc(i32 2, i32 4, i32 2) + %56 = tail call i32 @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32 3, i32 1, i32 6, i32 14, i32 5) + tail call void @llvm.riscv.esp.cmul.s16(i32 3, i32 2, i32 1, i32 5) + %57 = tail call i32 @llvm.riscv.esp.cmul.s16.ld.incp(i32 6, i32 3, i32 4, i32 2, i32 0, i32 7) + %58 = tail call i32 @llvm.riscv.esp.cmul.s16.st.incp(i32 1, i32 7, i32 0, i32 9, i32 2, i32 5) + tail call void @llvm.riscv.esp.cmul.s8(i32 7, i32 6, i32 3, i32 3) + %59 = tail call i32 @llvm.riscv.esp.cmul.s8.ld.incp(i32 5, i32 0, i32 1, i32 3, i32 3, i32 1) + %60 = tail call i32 @llvm.riscv.esp.cmul.s8.st.incp(i32 1, i32 3, i32 1, i32 2, i32 2, i32 2) + tail call void @llvm.riscv.esp.cmul.u16(i32 1, i32 1, i32 2, i32 5) + %61 = tail call i32 @llvm.riscv.esp.cmul.u16.ld.incp(i32 5, i32 6, i32 12, i32 2, i32 2, i32 1) + %62 = tail call i32 @llvm.riscv.esp.cmul.u16.st.incp(i32 2, i32 5, i32 2, i32 14, i32 1, i32 4) + tail call void @llvm.riscv.esp.cmul.u8(i32 1, i32 3, i32 1, i32 6) + %63 = tail call i32 @llvm.riscv.esp.cmul.u8.ld.incp(i32 2, i32 2, i32 14, i32 3, i32 3, i32 5) + %64 = tail call i32 @llvm.riscv.esp.cmul.u8.st.incp(i32 1, i32 7, i32 7, i32 10, i32 3, i32 7) + tail call void @llvm.riscv.esp.max.s16.a(i32 0, i32 6) + tail call void @llvm.riscv.esp.max.s32.a(i32 1, i32 14) + tail call void @llvm.riscv.esp.max.s8.a(i32 3, i32 6) + tail call void @llvm.riscv.esp.max.u16.a(i32 6, i32 14) + tail call void @llvm.riscv.esp.max.u32.a(i32 7, i32 4) + tail call void @llvm.riscv.esp.max.u8.a(i32 5, i32 9) + tail call void @llvm.riscv.esp.min.s16.a(i32 3, i32 2) + tail call void @llvm.riscv.esp.min.s32.a(i32 6, i32 9) + tail call void @llvm.riscv.esp.min.s8.a(i32 0, i32 10) + tail call void @llvm.riscv.esp.min.u16.a(i32 1, i32 7) + tail call void @llvm.riscv.esp.min.u32.a(i32 0, i32 3) + tail call void @llvm.riscv.esp.min.u8.a(i32 3, i32 10) + tail call void @llvm.riscv.esp.vabs.16(i32 7, i32 5) + tail call void @llvm.riscv.esp.vabs.32(i32 3, i32 1) + tail call void @llvm.riscv.esp.vabs.8(i32 5, i32 4) + tail call void @llvm.riscv.esp.vadd.s16(i32 1, i32 5, i32 4) + %65 = tail call i32 @llvm.riscv.esp.vadd.s16.ld.incp(i32 2, i32 3, i32 10, i32 7, i32 5) + %66 = tail call i32 @llvm.riscv.esp.vadd.s16.st.incp(i32 0, i32 7, i32 0, i32 4, i32 4) + tail call void @llvm.riscv.esp.vadd.s32(i32 0, i32 5, i32 4) + %67 = tail call i32 @llvm.riscv.esp.vadd.s32.ld.incp(i32 3, i32 6, i32 1, i32 0, i32 5) + %68 = tail call i32 @llvm.riscv.esp.vadd.s32.st.incp(i32 3, i32 1, i32 7, i32 0, i32 4) + tail call void @llvm.riscv.esp.vadd.s8(i32 4, i32 4, i32 7) + %69 = tail call i32 @llvm.riscv.esp.vadd.s8.ld.incp(i32 6, i32 4, i32 13, i32 7, i32 6) + %70 = tail call i32 @llvm.riscv.esp.vadd.s8.st.incp(i32 2, i32 6, i32 5, i32 0, i32 1) + tail call void @llvm.riscv.esp.vadd.u16(i32 4, i32 6, i32 0) + %71 = tail call i32 @llvm.riscv.esp.vadd.u16.ld.incp(i32 0, i32 1, i32 14, i32 7, i32 1) + %72 = tail call i32 @llvm.riscv.esp.vadd.u16.st.incp(i32 0, i32 3, i32 3, i32 5, i32 4) + tail call void @llvm.riscv.esp.vadd.u32(i32 7, i32 5, i32 3) + %73 = tail call i32 @llvm.riscv.esp.vadd.u32.ld.incp(i32 3, i32 5, i32 11, i32 7, i32 3) + %74 = tail call i32 @llvm.riscv.esp.vadd.u32.st.incp(i32 0, i32 6, i32 0, i32 10, i32 6) + tail call void @llvm.riscv.esp.vadd.u8(i32 3, i32 0, i32 5) + %75 = tail call i32 @llvm.riscv.esp.vadd.u8.ld.incp(i32 4, i32 2, i32 12, i32 4, i32 4) + %76 = tail call i32 @llvm.riscv.esp.vadd.u8.st.incp(i32 5, i32 1, i32 7, i32 3, i32 6) + tail call void @llvm.riscv.esp.vclamp.s16(i32 0, i32 10, i32 6) + tail call void @llvm.riscv.esp.vmax.s16(i32 3, i32 1, i32 5) + %77 = tail call i32 @llvm.riscv.esp.vmax.s16.ld.incp(i32 6, i32 5, i32 1, i32 3, i32 7) + %78 = tail call i32 @llvm.riscv.esp.vmax.s16.st.incp(i32 5, i32 4, i32 0, i32 8, i32 6) + tail call void @llvm.riscv.esp.vmax.s32(i32 1, i32 3, i32 7) + %79 = tail call i32 @llvm.riscv.esp.vmax.s32.ld.incp(i32 0, i32 1, i32 9, i32 0, i32 5) + %80 = tail call i32 @llvm.riscv.esp.vmax.s32.st.incp(i32 0, i32 3, i32 3, i32 5, i32 5) + tail call void @llvm.riscv.esp.vmax.s8(i32 1, i32 6, i32 4) + %81 = tail call i32 @llvm.riscv.esp.vmax.s8.ld.incp(i32 6, i32 2, i32 0, i32 3, i32 3) + %82 = tail call i32 @llvm.riscv.esp.vmax.s8.st.incp(i32 6, i32 2, i32 7, i32 6, i32 3) + tail call void @llvm.riscv.esp.vmax.u16(i32 1, i32 4, i32 5) + %83 = tail call i32 @llvm.riscv.esp.vmax.u16.ld.incp(i32 6, i32 1, i32 0, i32 4, i32 0) + %84 = tail call i32 @llvm.riscv.esp.vmax.u16.st.incp(i32 6, i32 6, i32 2, i32 10, i32 3) + tail call void @llvm.riscv.esp.vmax.u32(i32 6, i32 5, i32 0) + %85 = tail call i32 @llvm.riscv.esp.vmax.u32.ld.incp(i32 6, i32 3, i32 8, i32 6, i32 4) + %86 = tail call i32 @llvm.riscv.esp.vmax.u32.st.incp(i32 3, i32 2, i32 7, i32 11, i32 5) + tail call void @llvm.riscv.esp.vmax.u8(i32 6, i32 4, i32 5) + %87 = tail call i32 @llvm.riscv.esp.vmax.u8.ld.incp(i32 3, i32 5, i32 12, i32 2, i32 2) + %88 = tail call i32 @llvm.riscv.esp.vmax.u8.st.incp(i32 7, i32 1, i32 7, i32 5, i32 0) + tail call void @llvm.riscv.esp.vmin.s16(i32 7, i32 6, i32 2) + %89 = tail call i32 @llvm.riscv.esp.vmin.s16.ld.incp(i32 0, i32 3, i32 7, i32 1, i32 6) + %90 = tail call i32 @llvm.riscv.esp.vmin.s16.st.incp(i32 1, i32 6, i32 1, i32 12, i32 2) + tail call void @llvm.riscv.esp.vmin.s32(i32 6, i32 3, i32 3) + %91 = tail call i32 @llvm.riscv.esp.vmin.s32.ld.incp(i32 1, i32 4, i32 9, i32 5, i32 3) + %92 = tail call i32 @llvm.riscv.esp.vmin.s32.st.incp(i32 2, i32 6, i32 3, i32 1, i32 3) + tail call void @llvm.riscv.esp.vmin.s8(i32 2, i32 2, i32 4) + %93 = tail call i32 @llvm.riscv.esp.vmin.s8.ld.incp(i32 2, i32 6, i32 6, i32 6, i32 4) + %94 = tail call i32 @llvm.riscv.esp.vmin.s8.st.incp(i32 5, i32 0, i32 5, i32 11, i32 1) + tail call void @llvm.riscv.esp.vmin.u16(i32 1, i32 5, i32 5) + %95 = tail call i32 @llvm.riscv.esp.vmin.u16.ld.incp(i32 7, i32 2, i32 6, i32 2, i32 2) + %96 = tail call i32 @llvm.riscv.esp.vmin.u16.st.incp(i32 5, i32 5, i32 0, i32 14, i32 4) + tail call void @llvm.riscv.esp.vmin.u32(i32 6, i32 0, i32 5) + %97 = tail call i32 @llvm.riscv.esp.vmin.u32.ld.incp(i32 3, i32 4, i32 11, i32 2, i32 5) + %98 = tail call i32 @llvm.riscv.esp.vmin.u32.st.incp(i32 3, i32 5, i32 1, i32 1, i32 2) + tail call void @llvm.riscv.esp.vmin.u8(i32 6, i32 6, i32 0) + %99 = tail call i32 @llvm.riscv.esp.vmin.u8.ld.incp(i32 4, i32 7, i32 1, i32 5, i32 3) + %100 = tail call i32 @llvm.riscv.esp.vmin.u8.st.incp(i32 2, i32 5, i32 4, i32 6, i32 7) + tail call void @llvm.riscv.esp.vmul.s16(i32 7, i32 6, i32 6) + %101 = tail call i32 @llvm.riscv.esp.vmul.s16.ld.incp(i32 5, i32 2, i32 3, i32 1, i32 6) + tail call void @llvm.riscv.esp.vmul.s16.s8xs8(i32 0, i32 6, i32 2, i32 3) + %102 = tail call i32 @llvm.riscv.esp.vmul.s16.st.incp(i32 6, i32 5, i32 6, i32 1, i32 0) + tail call void @llvm.riscv.esp.vmul.s32.s16xs16(i32 2, i32 0, i32 4, i32 7) + tail call void @llvm.riscv.esp.vmul.s8(i32 3, i32 7, i32 7) + %103 = tail call i32 @llvm.riscv.esp.vmul.s8.ld.incp(i32 0, i32 5, i32 6, i32 7, i32 5) + %104 = tail call i32 @llvm.riscv.esp.vmul.s8.st.incp(i32 2, i32 5, i32 1, i32 6, i32 3) + tail call void @llvm.riscv.esp.vmul.u16(i32 1, i32 4, i32 6) + %105 = tail call i32 @llvm.riscv.esp.vmul.u16.ld.incp(i32 2, i32 4, i32 3, i32 6, i32 0) + %106 = tail call i32 @llvm.riscv.esp.vmul.u16.st.incp(i32 1, i32 3, i32 1, i32 10, i32 6) + tail call void @llvm.riscv.esp.vmul.u8(i32 7, i32 1, i32 6) + %107 = tail call i32 @llvm.riscv.esp.vmul.u8.ld.incp(i32 0, i32 2, i32 14, i32 4, i32 5) + %108 = tail call i32 @llvm.riscv.esp.vmul.u8.st.incp(i32 0, i32 5, i32 1, i32 4, i32 4) + tail call void @llvm.riscv.esp.vprelu.s16(i32 0, i32 2, i32 2, i32 4) + tail call void @llvm.riscv.esp.vprelu.s8(i32 11, i32 4, i32 6, i32 5) + tail call void @llvm.riscv.esp.vrelu.s16(i32 3, i32 8, i32 6) + tail call void @llvm.riscv.esp.vrelu.s8(i32 5, i32 6, i32 0) + tail call void @llvm.riscv.esp.vsadds.s16(i32 0, i32 4, i32 7) + tail call void @llvm.riscv.esp.vsadds.s8(i32 12, i32 2, i32 2) + tail call void @llvm.riscv.esp.vsadds.u16(i32 7, i32 2, i32 7) + tail call void @llvm.riscv.esp.vsadds.u8(i32 4, i32 0, i32 4) + tail call void @llvm.riscv.esp.vsat.s16(i32 8, i32 7, i32 5, i32 1) + tail call void @llvm.riscv.esp.vsat.s32(i32 0, i32 6, i32 6, i32 7) + tail call void @llvm.riscv.esp.vsat.s8(i32 6, i32 11, i32 4, i32 1) + tail call void @llvm.riscv.esp.vsat.u16(i32 9, i32 13, i32 5, i32 4) + tail call void @llvm.riscv.esp.vsat.u32(i32 11, i32 10, i32 6, i32 6) + tail call void @llvm.riscv.esp.vsat.u8(i32 14, i32 7, i32 6, i32 1) + tail call void @llvm.riscv.esp.vssubs.s16(i32 5, i32 0, i32 5) + tail call void @llvm.riscv.esp.vssubs.s8(i32 8, i32 2, i32 5) + tail call void @llvm.riscv.esp.vssubs.u16(i32 10, i32 7, i32 6) + tail call void @llvm.riscv.esp.vssubs.u8(i32 0, i32 7, i32 0) + tail call void @llvm.riscv.esp.vsub.s16(i32 7, i32 1, i32 1) + %109 = tail call i32 @llvm.riscv.esp.vsub.s16.ld.incp(i32 5, i32 2, i32 5, i32 1, i32 2) + %110 = tail call i32 @llvm.riscv.esp.vsub.s16.st.incp(i32 4, i32 5, i32 2, i32 8, i32 2) + tail call void @llvm.riscv.esp.vsub.s32(i32 7, i32 0, i32 0) + %111 = tail call i32 @llvm.riscv.esp.vsub.s32.ld.incp(i32 5, i32 7, i32 2, i32 5, i32 6) + %112 = tail call i32 @llvm.riscv.esp.vsub.s32.st.incp(i32 3, i32 2, i32 2, i32 10, i32 2) + tail call void @llvm.riscv.esp.vsub.s8(i32 7, i32 2, i32 0) + %113 = tail call i32 @llvm.riscv.esp.vsub.s8.ld.incp(i32 4, i32 0, i32 5, i32 6, i32 4) + %114 = tail call i32 @llvm.riscv.esp.vsub.s8.st.incp(i32 1, i32 6, i32 2, i32 13, i32 7) + tail call void @llvm.riscv.esp.vsub.u16(i32 6, i32 7, i32 0) + %115 = tail call i32 @llvm.riscv.esp.vsub.u16.ld.incp(i32 1, i32 6, i32 11, i32 5, i32 7) + %116 = tail call i32 @llvm.riscv.esp.vsub.u16.st.incp(i32 4, i32 7, i32 1, i32 3, i32 0) + tail call void @llvm.riscv.esp.vsub.u32(i32 5, i32 7, i32 6) + %117 = tail call i32 @llvm.riscv.esp.vsub.u32.ld.incp(i32 3, i32 0, i32 11, i32 4, i32 0) + %118 = tail call i32 @llvm.riscv.esp.vsub.u32.st.incp(i32 0, i32 4, i32 4, i32 9, i32 4) + tail call void @llvm.riscv.esp.vsub.u8(i32 6, i32 7, i32 6) + %119 = tail call i32 @llvm.riscv.esp.vsub.u8.ld.incp(i32 3, i32 5, i32 8, i32 3, i32 3) + %120 = tail call i32 @llvm.riscv.esp.vsub.u8.st.incp(i32 6, i32 6, i32 7, i32 11, i32 0) + tail call void @llvm.riscv.esp.addx2(i32 5, i32 6, i32 4) + tail call void @llvm.riscv.esp.addx4(i32 2, i32 9, i32 4) + %121 = tail call i32 @llvm.riscv.esp.sat(i32 14, i32 8, i32 10) + tail call void @llvm.riscv.esp.subx2(i32 10, i32 11, i32 11) + tail call void @llvm.riscv.esp.subx4(i32 7, i32 13, i32 4) + tail call void @llvm.riscv.esp.andq(i32 6, i32 6, i32 3) + tail call void @llvm.riscv.esp.notq(i32 3, i32 0) + tail call void @llvm.riscv.esp.orq(i32 2, i32 6, i32 6) + tail call void @llvm.riscv.esp.xorq(i32 2, i32 2, i32 6) + tail call void @llvm.riscv.esp.vcmp.eq.s16(i32 2, i32 6, i32 2) + tail call void @llvm.riscv.esp.vcmp.eq.s32(i32 1, i32 1, i32 5) + tail call void @llvm.riscv.esp.vcmp.eq.s8(i32 5, i32 3, i32 7) + tail call void @llvm.riscv.esp.vcmp.eq.u16(i32 5, i32 2, i32 5) + tail call void @llvm.riscv.esp.vcmp.eq.u32(i32 6, i32 2, i32 5) + tail call void @llvm.riscv.esp.vcmp.eq.u8(i32 1, i32 2, i32 0) + tail call void @llvm.riscv.esp.vcmp.gt.s16(i32 2, i32 1, i32 5) + tail call void @llvm.riscv.esp.vcmp.gt.s32(i32 6, i32 4, i32 4) + tail call void @llvm.riscv.esp.vcmp.gt.s8(i32 4, i32 1, i32 6) + tail call void @llvm.riscv.esp.vcmp.gt.u16(i32 1, i32 7, i32 5) + tail call void @llvm.riscv.esp.vcmp.gt.u32(i32 5, i32 3, i32 5) + tail call void @llvm.riscv.esp.vcmp.gt.u8(i32 1, i32 5, i32 2) + tail call void @llvm.riscv.esp.vcmp.lt.s16(i32 2, i32 6, i32 1) + tail call void @llvm.riscv.esp.vcmp.lt.s32(i32 4, i32 0, i32 3) + tail call void @llvm.riscv.esp.vcmp.lt.s8(i32 6, i32 3, i32 7) + tail call void @llvm.riscv.esp.vcmp.lt.u16(i32 6, i32 0, i32 3) + tail call void @llvm.riscv.esp.vcmp.lt.u32(i32 1, i32 6, i32 2) + tail call void @llvm.riscv.esp.vcmp.lt.u8(i32 5, i32 3, i32 0) + tail call void @llvm.riscv.esp.mov.s16.qacc(i32 1) + tail call void @llvm.riscv.esp.mov.s8.qacc(i32 6) + tail call void @llvm.riscv.esp.mov.u16.qacc(i32 2) + tail call void @llvm.riscv.esp.mov.u8.qacc(i32 2) + tail call void @llvm.riscv.esp.movi.16.a(i32 6, i32 12, i32 7) + tail call void @llvm.riscv.esp.movi.16.q(i32 7, i32 6, i32 5) + tail call void @llvm.riscv.esp.movi.32.a(i32 3, i32 1, i32 4) + tail call void @llvm.riscv.esp.movi.32.q(i32 12, i32 3, i32 7) + tail call void @llvm.riscv.esp.movi.8.a(i32 1, i32 9, i32 14) + tail call void @llvm.riscv.esp.movi.8.q(i32 14, i32 9, i32 5) + tail call void @llvm.riscv.esp.movx.r.fft.bit.width(i32 8) + tail call void @llvm.riscv.esp.movx.r.perf(i32 0, i32 5) + tail call void @llvm.riscv.esp.movx.r.sar(i32 6) + tail call void @llvm.riscv.esp.movx.r.sar.bytes(i32 8) + tail call void @llvm.riscv.esp.movx.r.xacc.h(i32 0) + tail call void @llvm.riscv.esp.movx.r.xacc.l(i32 2) + tail call void @llvm.riscv.esp.movx.w.cfg(i32 5) + tail call void @llvm.riscv.esp.movx.w.fft.bit.width(i32 10) + tail call void @llvm.riscv.esp.movx.w.perf(i32 1) + tail call void @llvm.riscv.esp.movx.w.sar(i32 9) + tail call void @llvm.riscv.esp.movx.w.sar.bytes(i32 5) + tail call void @llvm.riscv.esp.movx.w.xacc.h(i32 2) + tail call void @llvm.riscv.esp.movx.w.xacc.l(i32 5) + tail call void @llvm.riscv.esp.vext.s16(i32 6, i32 6, i32 1) + tail call void @llvm.riscv.esp.vext.s8(i32 0, i32 6, i32 0) + tail call void @llvm.riscv.esp.vext.u16(i32 7, i32 3, i32 0) + tail call void @llvm.riscv.esp.vext.u8(i32 3, i32 3, i32 0) + tail call void @llvm.riscv.esp.vunzip.16(i32 1, i32 5) + tail call void @llvm.riscv.esp.vunzip.32(i32 6, i32 1) + tail call void @llvm.riscv.esp.vunzip.8(i32 5, i32 6) + tail call void @llvm.riscv.esp.vunzipt.16(i32 2, i32 2, i32 5) + tail call void @llvm.riscv.esp.vunzipt.8(i32 6, i32 0, i32 7) + tail call void @llvm.riscv.esp.vzip.16(i32 1, i32 2) + tail call void @llvm.riscv.esp.vzip.32(i32 6, i32 3) + tail call void @llvm.riscv.esp.vzip.8(i32 1, i32 0) + tail call void @llvm.riscv.esp.vzipt.16(i32 7, i32 7, i32 4) + tail call void @llvm.riscv.esp.vzipt.8(i32 4, i32 5, i32 2) + tail call void @llvm.riscv.esp.zero.q(i32 0) tail call void @llvm.riscv.esp.zero.qacc() tail call void @llvm.riscv.esp.zero.xacc() - tail call void @llvm.riscv.esp.fft.ams.s16.ld.incp(i32 3, i32 1, i32 0, i32 3, i32 0, i32 6, i32 6, i32 0) - tail call void @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32 3, i32 0, i32 1, i32 5, i32 0, i32 3, i32 0, i32 2) - tail call void @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32 3, i32 5, i32 1, i32 3, i32 1, i32 7, i32 0, i32 6) - tail call void @llvm.riscv.esp.fft.ams.s16.st.incp(i32 5, i32 6, i32 3, i32 5, i32 4, i32 2, i32 0, i32 7) - tail call void @llvm.riscv.esp.fft.bitrev(i32 2, i32 7) - tail call void @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32 1, i32 7, i32 7, i32 4, i32 1, i32 3, i32 2) - tail call void @llvm.riscv.esp.fft.cmul.s16.st.xp(i32 2, i32 0, i32 7, i32 4, i32 4, i32 1, i32 3, i32 4) - tail call void @llvm.riscv.esp.fft.r2bf.s16(i32 5, i32 1, i32 0, i32 7, i32 3) - tail call void @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32 7, i32 4, i32 10, i32 2, i32 7) - tail call void @llvm.riscv.esp.fft.vst.r32.decp(i32 5, i32 7, i32 1) - tail call void @llvm.riscv.esp.ld.128.usar.ip(i32 4, i32 608, i32 1) - tail call void @llvm.riscv.esp.ld.128.usar.xp(i32 8, i32 1, i32 2) - tail call void @llvm.riscv.esp.ld.xacc.ip(i32 6, i32 400) - tail call void @llvm.riscv.esp.ldqa.s16.128.ip(i32 13, i32 912) - tail call void @llvm.riscv.esp.ldqa.s16.128.xp(i32 10, i32 2) - tail call void @llvm.riscv.esp.ldqa.s8.128.ip(i32 1, i32 1824) - tail call void @llvm.riscv.esp.ldqa.s8.128.xp(i32 4, i32 9) - tail call void @llvm.riscv.esp.ldqa.u16.128.ip(i32 4, i32 -1904) - tail call void @llvm.riscv.esp.ldqa.u16.128.xp(i32 14, i32 6) - tail call void @llvm.riscv.esp.ldqa.u8.128.ip(i32 3, i32 1216) - tail call void @llvm.riscv.esp.ldqa.u8.128.xp(i32 6, i32 2) - tail call void @llvm.riscv.esp.vldbc.16.ip(i32 9, i32 -448, i32 7) - tail call void @llvm.riscv.esp.vldbc.16.xp(i32 9, i32 5, i32 3) - tail call void @llvm.riscv.esp.vldbc.32.ip(i32 0, i32 220, i32 3) - tail call void @llvm.riscv.esp.vldbc.32.xp(i32 2, i32 12, i32 7) - tail call void @llvm.riscv.esp.vldbc.8.ip(i32 12, i32 396, i32 2) - tail call void @llvm.riscv.esp.vldbc.8.xp(i32 9, i32 4, i32 7) - tail call void @llvm.riscv.esp.vldext.s16.ip(i32 13, i32 16, i32 7, i32 4) - tail call void @llvm.riscv.esp.vldext.s16.xp(i32 8, i32 0, i32 5, i32 0) - tail call void @llvm.riscv.esp.vldext.s8.ip(i32 4, i32 80, i32 3, i32 6) - tail call void @llvm.riscv.esp.vldext.s8.xp(i32 6, i32 3, i32 1, i32 1) - tail call void @llvm.riscv.esp.vldext.u16.ip(i32 14, i32 48, i32 2, i32 5) - tail call void @llvm.riscv.esp.vldext.u16.xp(i32 12, i32 7, i32 2, i32 0) - tail call void @llvm.riscv.esp.vldext.u8.ip(i32 13, i32 64, i32 7, i32 2) - tail call void @llvm.riscv.esp.vldext.u8.xp(i32 6, i32 6, i32 7, i32 2) - tail call void @llvm.riscv.esp.vldhbc.16.incp(i32 1, i32 4, i32 7) - tail call void @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32 6, i32 512) - tail call void @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32 5, i32 -784) - tail call void @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32 10, i32 -800) - tail call void @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32 10, i32 -1952) - tail call void @llvm.riscv.esp.ld.ua.state.ip(i32 8, i32 -752) - tail call void @llvm.riscv.esp.ldxq.32(i32 0, i32 4, i32 2, i32 4, i32 7) - tail call void @llvm.riscv.esp.st.qacc.h.h.128.ip(i32 13, i32 -336) - tail call void @llvm.riscv.esp.st.qacc.h.l.128.ip(i32 8, i32 1568) - tail call void @llvm.riscv.esp.st.qacc.l.h.128.ip(i32 4, i32 16) - tail call void @llvm.riscv.esp.st.qacc.l.l.128.ip(i32 8, i32 416) - tail call void @llvm.riscv.esp.st.ua.state.ip(i32 7, i32 -1360) - tail call void @llvm.riscv.esp.stxq.32(i32 6, i32 6, i32 0, i32 2, i32 5) - tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 3) - tail call void @llvm.riscv.esp.vld.128.xp(i32 9, i32 7, i32 3) - tail call void @llvm.riscv.esp.vld.h.64.ip(i32 0, i32 -352, i32 0) - tail call void @llvm.riscv.esp.vld.h.64.xp(i32 14, i32 2, i32 2) - tail call void @llvm.riscv.esp.vld.l.64.ip(i32 6, i32 56, i32 2) - tail call void @llvm.riscv.esp.vld.l.64.xp(i32 1, i32 9, i32 5) - tail call void @llvm.riscv.esp.vst.128.ip(i32 5, i32 6, i32 -960) - tail call void @llvm.riscv.esp.vst.128.xp(i32 7, i32 6, i32 13) - tail call void @llvm.riscv.esp.vst.h.64.ip(i32 7, i32 4, i32 944) - tail call void @llvm.riscv.esp.vst.h.64.xp(i32 10, i32 7, i32 7) - tail call void @llvm.riscv.esp.vst.l.64.ip(i32 5, i32 3, i32 984) - tail call void @llvm.riscv.esp.vst.l.64.xp(i32 12, i32 5, i32 3) - tail call void @llvm.riscv.esp.slci.2q(i32 1, i32 5, i32 12) - tail call void @llvm.riscv.esp.slcxxp.2q(i32 5, i32 5, i32 2, i32 3) - tail call void @llvm.riscv.esp.src.q(i32 3, i32 1, i32 2) - tail call void @llvm.riscv.esp.src.q.ld.ip(i32 5, i32 0, i32 5, i32 -272, i32 0) - tail call void @llvm.riscv.esp.src.q.ld.xp(i32 3, i32 6, i32 12, i32 7, i32 1) - tail call void @llvm.riscv.esp.src.q.qup(i32 4, i32 7, i32 3) - tail call void @llvm.riscv.esp.srci.2q(i32 2, i32 3, i32 7) - tail call void @llvm.riscv.esp.srcmb.s16.q.qacc(i32 4, i32 0, i32 2) - tail call void @llvm.riscv.esp.srcmb.s16.qacc(i32 13, i32 1, i32 5) - tail call void @llvm.riscv.esp.srcmb.s8.q.qacc(i32 4, i32 0, i32 5) - tail call void @llvm.riscv.esp.srcmb.s8.qacc(i32 8, i32 1, i32 1) - tail call void @llvm.riscv.esp.srcmb.u16.q.qacc(i32 3, i32 1, i32 0) - tail call void @llvm.riscv.esp.srcmb.u16.qacc(i32 13, i32 0, i32 7) - tail call void @llvm.riscv.esp.srcmb.u8.q.qacc(i32 5, i32 1, i32 3) - tail call void @llvm.riscv.esp.srcmb.u8.qacc(i32 5, i32 0, i32 0) - tail call void @llvm.riscv.esp.srcq.128.st.incp(i32 5, i32 0, i32 12) - tail call void @llvm.riscv.esp.srcxxp.2q(i32 12, i32 6, i32 4, i32 6) - tail call void @llvm.riscv.esp.srs.s.xacc(i32 6, i32 13) - tail call void @llvm.riscv.esp.srs.u.xacc(i32 3, i32 12) - tail call void @llvm.riscv.esp.vsl.32(i32 2, i32 5) - tail call void @llvm.riscv.esp.vsld.16(i32 3, i32 7, i32 3) - tail call void @llvm.riscv.esp.vsld.32(i32 7, i32 1, i32 3) - tail call void @llvm.riscv.esp.vsld.8(i32 1, i32 5, i32 0) - tail call void @llvm.riscv.esp.vsr.s32(i32 0, i32 3) - tail call void @llvm.riscv.esp.vsr.u32(i32 2, i32 1) - tail call void @llvm.riscv.esp.vsrd.16(i32 3, i32 0, i32 4) - tail call void @llvm.riscv.esp.vsrd.32(i32 6, i32 3, i32 0) - tail call void @llvm.riscv.esp.vsrd.8(i32 4, i32 1, i32 5) - tail call void @llvm.riscv.esp.st.s.xacc.ip(i32 8, i32 80) - tail call void @llvm.riscv.esp.st.u.xacc.ip(i32 6, i32 -464) + %122 = tail call i32 @llvm.riscv.esp.fft.ams.s16.ld.incp(i32 7, i32 0, i32 6, i32 13, i32 1, i32 6, i32 2, i32 1) + %123 = tail call i32 @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32 7, i32 0, i32 7, i32 4, i32 1, i32 7, i32 6, i32 3) + %124 = tail call i32 @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32 3, i32 1, i32 7, i32 1, i32 0, i32 4, i32 2, i32 6) + tail call void @llvm.riscv.esp.fft.ams.s16.st.incp(i32 3, i32 7, i32 1, i32 6, i32 8, i32 14, i32 1, i32 0) + %125 = tail call i32 @llvm.riscv.esp.fft.bitrev(i32 14, i32 2) + %126 = tail call i32 @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32 1, i32 7, i32 1, i32 7, i32 2, i32 6, i32 4) + %127 = tail call i32 @llvm.riscv.esp.fft.cmul.s16.st.xp(i32 10, i32 5, i32 0, i32 4, i32 13, i32 1, i32 0, i32 2) + tail call void @llvm.riscv.esp.fft.r2bf.s16(i32 3, i32 0, i32 0, i32 3, i32 7) + %128 = tail call i32 @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32 5, i32 0, i32 1, i32 0, i32 1) + %129 = tail call i32 @llvm.riscv.esp.fft.vst.r32.decp(i32 3, i32 0, i32 0) + %130 = tail call i32 @llvm.riscv.esp.ld.128.usar.ip(i32 12, i32 -1776, i32 3) + %131 = tail call i32 @llvm.riscv.esp.ld.128.usar.xp(i32 0, i32 13, i32 0) + %132 = tail call i32 @llvm.riscv.esp.ld.xacc.ip(i32 2, i32 -488) + %133 = tail call i32 @llvm.riscv.esp.ldqa.s16.128.ip(i32 13, i32 -1488) + %134 = tail call i32 @llvm.riscv.esp.ldqa.s16.128.xp(i32 0, i32 13) + %135 = tail call i32 @llvm.riscv.esp.ldqa.s8.128.ip(i32 1, i32 -256) + %136 = tail call i32 @llvm.riscv.esp.ldqa.s8.128.xp(i32 14, i32 14) + %137 = tail call i32 @llvm.riscv.esp.ldqa.u16.128.ip(i32 7, i32 -1936) + %138 = tail call i32 @llvm.riscv.esp.ldqa.u16.128.xp(i32 9, i32 11) + %139 = tail call i32 @llvm.riscv.esp.ldqa.u8.128.ip(i32 6, i32 688) + %140 = tail call i32 @llvm.riscv.esp.ldqa.u8.128.xp(i32 3, i32 4) + %141 = tail call i32 @llvm.riscv.esp.vldbc.16.ip(i32 10, i32 80, i32 1) + %142 = tail call i32 @llvm.riscv.esp.vldbc.16.xp(i32 3, i32 2, i32 2) + %143 = tail call i32 @llvm.riscv.esp.vldbc.32.ip(i32 8, i32 396, i32 5) + %144 = tail call i32 @llvm.riscv.esp.vldbc.32.xp(i32 14, i32 6, i32 3) + %145 = tail call i32 @llvm.riscv.esp.vldbc.8.ip(i32 6, i32 -492, i32 4) + %146 = tail call i32 @llvm.riscv.esp.vldbc.8.xp(i32 14, i32 9, i32 6) + %147 = tail call i32 @llvm.riscv.esp.vldext.s16.ip(i32 10, i32 32, i32 6, i32 2) + %148 = tail call i32 @llvm.riscv.esp.vldext.s16.xp(i32 11, i32 0, i32 2, i32 6) + %149 = tail call i32 @llvm.riscv.esp.vldext.s8.ip(i32 9, i32 -112, i32 1, i32 0) + %150 = tail call i32 @llvm.riscv.esp.vldext.s8.xp(i32 14, i32 12, i32 7, i32 0) + %151 = tail call i32 @llvm.riscv.esp.vldext.u16.ip(i32 8, i32 48, i32 3, i32 1) + %152 = tail call i32 @llvm.riscv.esp.vldext.u16.xp(i32 4, i32 5, i32 0, i32 1) + %153 = tail call i32 @llvm.riscv.esp.vldext.u8.ip(i32 2, i32 -48, i32 5, i32 0) + %154 = tail call i32 @llvm.riscv.esp.vldext.u8.xp(i32 0, i32 14, i32 0, i32 6) + %155 = tail call i32 @llvm.riscv.esp.vldhbc.16.incp(i32 2, i32 6, i32 5) + %156 = tail call i32 @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32 14, i32 -1296) + %157 = tail call i32 @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32 1, i32 -64) + %158 = tail call i32 @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32 10, i32 608) + %159 = tail call i32 @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32 3, i32 656) + %160 = tail call i32 @llvm.riscv.esp.ld.ua.state.ip(i32 2, i32 1392) + tail call void @llvm.riscv.esp.ldxq.32(i32 8, i32 2, i32 2, i32 2, i32 5) + %161 = tail call i32 @llvm.riscv.esp.st.qacc.h.h.128.ip(i32 1, i32 -432) + %162 = tail call i32 @llvm.riscv.esp.st.qacc.h.l.128.ip(i32 14, i32 -1792) + %163 = tail call i32 @llvm.riscv.esp.st.qacc.l.h.128.ip(i32 10, i32 320) + %164 = tail call i32 @llvm.riscv.esp.st.qacc.l.l.128.ip(i32 8, i32 -496) + %165 = tail call i32 @llvm.riscv.esp.st.ua.state.ip(i32 8, i32 1856) + tail call void @llvm.riscv.esp.stxq.32(i32 4, i32 5, i32 3, i32 2, i32 1) + %166 = tail call i32 @llvm.riscv.esp.vld.128.ip(i32 0, i32 496, i32 0) + %167 = tail call i32 @llvm.riscv.esp.vld.128.xp(i32 4, i32 14, i32 2) + %168 = tail call i32 @llvm.riscv.esp.vld.h.64.ip(i32 14, i32 -88, i32 2) + %169 = tail call i32 @llvm.riscv.esp.vld.h.64.xp(i32 2, i32 11, i32 3) + %170 = tail call i32 @llvm.riscv.esp.vld.l.64.ip(i32 13, i32 240, i32 2) + %171 = tail call i32 @llvm.riscv.esp.vld.l.64.xp(i32 1, i32 12, i32 3) + %172 = tail call i32 @llvm.riscv.esp.vst.128.ip(i32 6, i32 14, i32 -512) + %173 = tail call i32 @llvm.riscv.esp.vst.128.xp(i32 4, i32 2, i32 14) + %174 = tail call i32 @llvm.riscv.esp.vst.h.64.ip(i32 5, i32 13, i32 56) + %175 = tail call i32 @llvm.riscv.esp.vst.h.64.xp(i32 13, i32 1, i32 13) + %176 = tail call i32 @llvm.riscv.esp.vst.l.64.ip(i32 3, i32 7, i32 952) + %177 = tail call i32 @llvm.riscv.esp.vst.l.64.xp(i32 9, i32 2, i32 8) + tail call void @llvm.riscv.esp.slci.2q(i32 1, i32 3, i32 6) + tail call void @llvm.riscv.esp.slcxxp.2q(i32 6, i32 4, i32 4, i32 3) + tail call void @llvm.riscv.esp.src.q(i32 5, i32 1, i32 4) + %178 = tail call i32 @llvm.riscv.esp.src.q.ld.ip(i32 1, i32 4, i32 2, i32 -1776, i32 2) + %179 = tail call i32 @llvm.riscv.esp.src.q.ld.xp(i32 2, i32 6, i32 5, i32 1, i32 1) + tail call void @llvm.riscv.esp.src.q.qup(i32 2, i32 0, i32 6) + tail call void @llvm.riscv.esp.srci.2q(i32 3, i32 3, i32 12) + tail call void @llvm.riscv.esp.srcmb.s16.q.qacc(i32 5, i32 1, i32 1) + tail call void @llvm.riscv.esp.srcmb.s16.qacc(i32 4, i32 0, i32 3) + tail call void @llvm.riscv.esp.srcmb.s8.q.qacc(i32 5, i32 1, i32 2) + tail call void @llvm.riscv.esp.srcmb.s8.qacc(i32 0, i32 0, i32 0) + tail call void @llvm.riscv.esp.srcmb.u16.q.qacc(i32 4, i32 1, i32 0) + tail call void @llvm.riscv.esp.srcmb.u16.qacc(i32 8, i32 0, i32 6) + tail call void @llvm.riscv.esp.srcmb.u8.q.qacc(i32 6, i32 1, i32 5) + tail call void @llvm.riscv.esp.srcmb.u8.qacc(i32 7, i32 0, i32 6) + %180 = tail call i32 @llvm.riscv.esp.srcq.128.st.incp(i32 4, i32 0, i32 14) + tail call void @llvm.riscv.esp.srcxxp.2q(i32 0, i32 5, i32 5, i32 0) + tail call void @llvm.riscv.esp.srs.s.xacc(i32 2, i32 8) + tail call void @llvm.riscv.esp.srs.u.xacc(i32 6, i32 8) + tail call void @llvm.riscv.esp.vsl.32(i32 6, i32 1) + tail call void @llvm.riscv.esp.vsld.16(i32 0, i32 2, i32 3) + tail call void @llvm.riscv.esp.vsld.32(i32 1, i32 1, i32 0) + tail call void @llvm.riscv.esp.vsld.8(i32 2, i32 1, i32 1) + tail call void @llvm.riscv.esp.vsr.s32(i32 7, i32 3) + tail call void @llvm.riscv.esp.vsr.u32(i32 5, i32 4) + tail call void @llvm.riscv.esp.vsrd.16(i32 5, i32 3, i32 1) + tail call void @llvm.riscv.esp.vsrd.32(i32 1, i32 2, i32 4) + tail call void @llvm.riscv.esp.vsrd.8(i32 7, i32 4, i32 4) + %181 = tail call i32 @llvm.riscv.esp.st.s.xacc.ip(i32 1, i32 -720) + %182 = tail call i32 @llvm.riscv.esp.st.u.xacc.ip(i32 13, i32 -576) + %183 = tail call i32 @llvm.riscv.esp.movx.r.cfg() + %184 = or i32 %183, 2 + tail call void @llvm.riscv.esp.movx.w.cfg(i32 %184) ret void } declare void @llvm.riscv.esp.vcmulas.s16.qacc.h(i32, i32) nounwind -declare void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32, i32) nounwind -declare void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32, i32) nounwind -declare void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32, i32) nounwind -declare void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmulas.s16.qacc(i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmulas.s16.xacc(i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmulas.s8.qacc(i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmulas.s8.xacc(i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmulas.u16.qacc(i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmulas.u16.xacc(i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmulas.u8.qacc(i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmulas.u8.xacc(i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsmulas.s16.qacc(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsmulas.s8.qacc(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsmulas.u16.qacc(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsmulas.u8.qacc(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.cmul.s16(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.cmul.s16.ld.incp(i32, i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.cmul.s16.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.cmul.s16.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.cmul.s16.st.incp(i32, i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.cmul.s8(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.cmul.s8.ld.incp(i32, i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.cmul.s8.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.cmul.s8.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.cmul.s8.st.incp(i32, i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.cmul.u16(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.cmul.u16.ld.incp(i32, i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.cmul.u16.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.cmul.u16.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.cmul.u16.st.incp(i32, i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.cmul.u8(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.cmul.u8.ld.incp(i32, i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.cmul.u8.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.cmul.u8.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.cmul.u8.st.incp(i32, i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.max.s16.a(i32, i32) nounwind declare void @llvm.riscv.esp.max.s32.a(i32, i32) nounwind declare void @llvm.riscv.esp.max.s8.a(i32, i32) nounwind @@ -1025,74 +1027,74 @@ declare void @llvm.riscv.esp.vabs.16(i32, i32) nounwind declare void @llvm.riscv.esp.vabs.32(i32, i32) nounwind declare void @llvm.riscv.esp.vabs.8(i32, i32) nounwind declare void @llvm.riscv.esp.vadd.s16(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.s16.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.s16.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vadd.s32(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.s32.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.s32.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vadd.s8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.s8.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.s8.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vadd.u16(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.u16.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.u16.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vadd.u32(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.u32.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.u32.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vadd.u8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.u8.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vadd.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vadd.u8.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vclamp.s16(i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmax.s16(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.s16.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.s16.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmax.s32(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.s32.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.s32.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmax.s8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.s8.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.s8.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmax.u16(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.u16.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.u16.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmax.u32(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.u32.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.u32.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmax.u8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.u8.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmax.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmax.u8.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmin.s16(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.s16.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.s16.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmin.s32(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.s32.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.s32.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmin.s8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.s8.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.s8.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmin.u16(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.u16.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.u16.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmin.u32(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.u32.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.u32.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmin.u8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.u8.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmin.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmin.u8.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmul.s16(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmul.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmul.s16.ld.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmul.s16.s8xs8(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmul.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmul.s16.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmul.s32.s16xs16(i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmul.s8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmul.s8.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmul.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmul.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmul.s8.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmul.u16(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmul.u16.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmul.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmul.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmul.u16.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vmul.u8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmul.u8.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vmul.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmul.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vmul.u8.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vprelu.s16(i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vprelu.s8(i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vrelu.s16(i32, i32, i32) nounwind @@ -1112,26 +1114,26 @@ declare void @llvm.riscv.esp.vssubs.s8(i32, i32, i32) nounwind declare void @llvm.riscv.esp.vssubs.u16(i32, i32, i32) nounwind declare void @llvm.riscv.esp.vssubs.u8(i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsub.s16(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.s16.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.s16.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsub.s32(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.s32.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.s32.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsub.s8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.s8.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.s8.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsub.u16(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.u16.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.u16.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsub.u32(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.u32.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.u32.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsub.u8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.u8.ld.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vsub.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vsub.u8.st.incp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.addx2(i32, i32, i32) nounwind declare void @llvm.riscv.esp.addx4(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.sat(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.sat(i32, i32, i32) nounwind declare void @llvm.riscv.esp.subx2(i32, i32, i32) nounwind declare void @llvm.riscv.esp.subx4(i32, i32, i32) nounwind declare void @llvm.riscv.esp.andq(i32, i32, i32) nounwind @@ -1166,7 +1168,7 @@ declare void @llvm.riscv.esp.movi.32.a(i32, i32, i32) nounwind declare void @llvm.riscv.esp.movi.32.q(i32, i32, i32) nounwind declare void @llvm.riscv.esp.movi.8.a(i32, i32, i32) nounwind declare void @llvm.riscv.esp.movi.8.q(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.movx.r.cfg(i32) nounwind +declare i32 @llvm.riscv.esp.movx.r.cfg() nounwind declare void @llvm.riscv.esp.movx.r.fft.bit.width(i32) nounwind declare void @llvm.riscv.esp.movx.r.perf(i32, i32) nounwind declare void @llvm.riscv.esp.movx.r.sar(i32) nounwind @@ -1197,71 +1199,71 @@ declare void @llvm.riscv.esp.vzipt.8(i32, i32, i32) nounwind declare void @llvm.riscv.esp.zero.q(i32) nounwind declare void @llvm.riscv.esp.zero.qacc() nounwind declare void @llvm.riscv.esp.zero.xacc() nounwind -declare void @llvm.riscv.esp.fft.ams.s16.ld.incp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32, i32, i32, i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.fft.ams.s16.ld.incp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.fft.ams.s16.st.incp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.fft.bitrev(i32, i32) nounwind -declare void @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32, i32, i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.fft.cmul.s16.st.xp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.fft.bitrev(i32, i32) nounwind +declare i32 @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32, i32, i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.fft.cmul.s16.st.xp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.fft.r2bf.s16(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.fft.vst.r32.decp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.ld.128.usar.ip(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.ld.128.usar.xp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.ld.xacc.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.ldqa.s16.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.ldqa.s16.128.xp(i32, i32) nounwind -declare void @llvm.riscv.esp.ldqa.s8.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.ldqa.s8.128.xp(i32, i32) nounwind -declare void @llvm.riscv.esp.ldqa.u16.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.ldqa.u16.128.xp(i32, i32) nounwind -declare void @llvm.riscv.esp.ldqa.u8.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.ldqa.u8.128.xp(i32, i32) nounwind -declare void @llvm.riscv.esp.vldbc.16.ip(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldbc.16.xp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldbc.32.ip(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldbc.32.xp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldbc.8.ip(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldbc.8.xp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldext.s16.ip(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldext.s16.xp(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldext.s8.ip(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldext.s8.xp(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldext.u16.ip(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldext.u16.xp(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldext.u8.ip(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldext.u8.xp(i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vldhbc.16.incp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.ld.ua.state.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.fft.vst.r32.decp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.ld.128.usar.ip(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.ld.128.usar.xp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.ld.xacc.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ldqa.s16.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ldqa.s16.128.xp(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ldqa.s8.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ldqa.s8.128.xp(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ldqa.u16.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ldqa.u16.128.xp(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ldqa.u8.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ldqa.u8.128.xp(i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldbc.16.ip(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldbc.16.xp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldbc.32.ip(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldbc.32.xp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldbc.8.ip(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldbc.8.xp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldext.s16.ip(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldext.s16.xp(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldext.s8.ip(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldext.s8.xp(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldext.u16.ip(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldext.u16.xp(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldext.u8.ip(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldext.u8.xp(i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vldhbc.16.incp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.ld.ua.state.ip(i32, i32) nounwind declare void @llvm.riscv.esp.ldxq.32(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.st.qacc.h.h.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.st.qacc.h.l.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.st.qacc.l.h.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.st.qacc.l.l.128.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.st.ua.state.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.st.qacc.h.h.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.st.qacc.h.l.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.st.qacc.l.h.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.st.qacc.l.l.128.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.st.ua.state.ip(i32, i32) nounwind declare void @llvm.riscv.esp.stxq.32(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vld.128.ip(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vld.128.xp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vld.h.64.ip(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vld.h.64.xp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vld.l.64.ip(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vld.l.64.xp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vst.128.ip(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vst.128.xp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vst.h.64.ip(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vst.h.64.xp(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vst.l.64.ip(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.vst.l.64.xp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vld.128.ip(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vld.128.xp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vld.h.64.ip(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vld.h.64.xp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vld.l.64.ip(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vld.l.64.xp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vst.128.ip(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vst.128.xp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vst.h.64.ip(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vst.h.64.xp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vst.l.64.ip(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.vst.l.64.xp(i32, i32, i32) nounwind declare void @llvm.riscv.esp.slci.2q(i32, i32, i32) nounwind declare void @llvm.riscv.esp.slcxxp.2q(i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.src.q(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.src.q.ld.ip(i32, i32, i32, i32, i32) nounwind -declare void @llvm.riscv.esp.src.q.ld.xp(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.src.q.ld.ip(i32, i32, i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.src.q.ld.xp(i32, i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.src.q.qup(i32, i32, i32) nounwind declare void @llvm.riscv.esp.srci.2q(i32, i32, i32) nounwind declare void @llvm.riscv.esp.srcmb.s16.q.qacc(i32, i32, i32) nounwind @@ -1272,7 +1274,7 @@ declare void @llvm.riscv.esp.srcmb.u16.q.qacc(i32, i32, i32) nounwind declare void @llvm.riscv.esp.srcmb.u16.qacc(i32, i32, i32) nounwind declare void @llvm.riscv.esp.srcmb.u8.q.qacc(i32, i32, i32) nounwind declare void @llvm.riscv.esp.srcmb.u8.qacc(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.srcq.128.st.incp(i32, i32, i32) nounwind +declare i32 @llvm.riscv.esp.srcq.128.st.incp(i32, i32, i32) nounwind declare void @llvm.riscv.esp.srcxxp.2q(i32, i32, i32, i32) nounwind declare void @llvm.riscv.esp.srs.s.xacc(i32, i32) nounwind declare void @llvm.riscv.esp.srs.u.xacc(i32, i32) nounwind @@ -1285,5 +1287,5 @@ declare void @llvm.riscv.esp.vsr.u32(i32, i32) nounwind declare void @llvm.riscv.esp.vsrd.16(i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsrd.32(i32, i32, i32) nounwind declare void @llvm.riscv.esp.vsrd.8(i32, i32, i32) nounwind -declare void @llvm.riscv.esp.st.s.xacc.ip(i32, i32) nounwind -declare void @llvm.riscv.esp.st.u.xacc.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.st.s.xacc.ip(i32, i32) nounwind +declare i32 @llvm.riscv.esp.st.u.xacc.ip(i32, i32) nounwind