-
Notifications
You must be signed in to change notification settings - Fork 12.9k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[llvm][RISCV] Add RISCV vector tuple type to value types(MVT) #97993
[llvm][RISCV] Add RISCV vector tuple type to value types(MVT) #97993
Conversation
@llvm/pr-subscribers-llvm-analysis @llvm/pr-subscribers-backend-risc-v Author: Brandon Wu (4vtomat) Changes
Patch is 94.54 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/97993.diff 1013 Files Affected:
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index a0820e2093bc2..67f480dec0fe3 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -762,8 +762,10 @@ multiclass RVVUnitStridedSegLoadTuple<string op> {
[]<string>)),
ManualCodegen = [{
{
- llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
- IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops[0]->getType(), Ops.back()->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
bool NoPassthru =
@@ -772,11 +774,10 @@ multiclass RVVUnitStridedSegLoadTuple<string op> {
unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
if (NoPassthru) { // Push poison into passthru
- Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
} else { // Push intrinsics operands into passthru
llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ Operands.push_back(PassthruOperand);
}
Operands.push_back(Ops[Offset]); // Ptr
@@ -785,6 +786,7 @@ multiclass RVVUnitStridedSegLoadTuple<string op> {
Operands.push_back(Ops[Offset + 1]); // VL
if (IsMasked)
Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
@@ -828,24 +830,24 @@ multiclass RVVUnitStridedSegStoreTuple<string op> {
{
// Masked
// Builtin: (mask, ptr, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, mask, vl)
+ // Intrinsic: (tuple, ptr, mask, vl)
// Unmasked
// Builtin: (ptr, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, vl)
+ // Intrinsic: (tuple, ptr, vl)
unsigned Offset = IsMasked ? 1 : 0;
- llvm::Value *VTupleOperand = Ops[Offset + 1];
SmallVector<llvm::Value*, 12> Operands;
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
- Operands.push_back(V);
- }
+ Operands.push_back(Ops[Offset + 1]); // tuple
Operands.push_back(Ops[Offset]); // Ptr
if (IsMasked)
Operands.push_back(Ops[0]);
Operands.push_back(Ops[Offset + 2]); // VL
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
- IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {Operands[0]->getType(), Ops[0]->getType(), Operands.back()->getType()};
+ else
+ IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
return Builder.CreateCall(F, Operands, "");
}
@@ -880,8 +882,10 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
[]<string>)),
ManualCodegen = [{
{
- llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
- IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[0]->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
bool NoPassthru =
@@ -890,11 +894,10 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
if (NoPassthru) { // Push poison into passthru
- Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
} else { // Push intrinsics operands into passthru
llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ Operands.push_back(PassthruOperand);
}
Operands.push_back(Ops[Offset]); // Ptr
@@ -903,6 +906,7 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
Operands.push_back(Ops[Offset + 2]); // vl
if (IsMasked)
Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
@@ -911,14 +915,10 @@ multiclass RVVUnitStridedSegLoadFFTuple<string op> {
clang::CharUnits Align =
CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType());
- llvm::Value *ReturnTuple = llvm::PoisonValue::get(ResultType);
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *V = Builder.CreateExtractValue(LoadValue, {I});
- ReturnTuple = Builder.CreateInsertValue(ReturnTuple, V, {I});
- }
+ llvm::Value *ReturnTuple = Builder.CreateExtractValue(LoadValue, 0);
// Store new_vl
- llvm::Value *V = Builder.CreateExtractValue(LoadValue, {NF});
+ llvm::Value *V = Builder.CreateExtractValue(LoadValue, 1);
Builder.CreateStore(V, Address(Ops[Offset + 1], V->getType(), Align));
if (ReturnValue.isNull())
@@ -957,8 +957,10 @@ multiclass RVVStridedSegLoadTuple<string op> {
[]<string>)),
ManualCodegen = [{
{
- llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
- IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[0]->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
bool NoPassthru =
@@ -967,11 +969,10 @@ multiclass RVVStridedSegLoadTuple<string op> {
unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
if (NoPassthru) { // Push poison into passthru
- Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
} else { // Push intrinsics operands into passthru
llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ Operands.push_back(PassthruOperand);
}
Operands.push_back(Ops[Offset]); // Ptr
@@ -981,6 +982,7 @@ multiclass RVVStridedSegLoadTuple<string op> {
Operands.push_back(Ops[Offset + 2]); // VL
if (IsMasked)
Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
@@ -1025,25 +1027,25 @@ multiclass RVVStridedSegStoreTuple<string op> {
{
// Masked
// Builtin: (mask, ptr, stride, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl)
+ // Intrinsic: (tuple, ptr, stride, mask, vl)
// Unmasked
// Builtin: (ptr, stride, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, stride, vl)
+ // Intrinsic: (tuple, ptr, stride, vl)
unsigned Offset = IsMasked ? 1 : 0;
- llvm::Value *VTupleOperand = Ops[Offset + 2];
SmallVector<llvm::Value*, 12> Operands;
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
- Operands.push_back(V);
- }
+ Operands.push_back(Ops[Offset + 2]); // tuple
Operands.push_back(Ops[Offset]); // Ptr
Operands.push_back(Ops[Offset + 1]); // Stride
if (IsMasked)
Operands.push_back(Ops[0]);
Operands.push_back(Ops[Offset + 3]); // VL
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
- IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType(), Ops[0]->getType()};
+ else
+ IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
return Builder.CreateCall(F, Operands, "");
}
@@ -1073,8 +1075,6 @@ multiclass RVVIndexedSegLoadTuple<string op> {
[]<string>)),
ManualCodegen = [{
{
- llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
- IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
bool NoPassthru =
@@ -1083,11 +1083,10 @@ multiclass RVVIndexedSegLoadTuple<string op> {
unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
if (NoPassthru) { // Push poison into passthru
- Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
} else { // Push intrinsics operands into passthru
llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ Operands.push_back(PassthruOperand);
}
Operands.push_back(Ops[Offset]); // Ptr
@@ -1097,9 +1096,15 @@ multiclass RVVIndexedSegLoadTuple<string op> {
Operands.push_back(Ops[Offset + 2]); // VL
if (IsMasked)
Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
- IntrinsicTypes = {ElementVectorType, Ops[Offset + 1]->getType(),
- Ops.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
+ Ops[0]->getType(),
+ Ops.back()->getType()};
+ else
+ IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
+ Ops.back()->getType()};
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
@@ -1139,26 +1144,28 @@ multiclass RVVIndexedSegStoreTuple<string op> {
{
// Masked
// Builtin: (mask, ptr, index, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, index, mask, vl)
+ // Intrinsic: (tuple, ptr, index, mask, vl)
// Unmasked
// Builtin: (ptr, index, v_tuple, vl)
- // Intrinsic: (val0, val1, ..., ptr, index, vl)
+ // Intrinsic: (tuple, ptr, index, vl)
unsigned Offset = IsMasked ? 1 : 0;
- llvm::Value *VTupleOperand = Ops[Offset + 2];
SmallVector<llvm::Value*, 12> Operands;
- for (unsigned I = 0; I < NF; ++I) {
- llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
- Operands.push_back(V);
- }
+ Operands.push_back(Ops[Offset + 2]); // tuple
Operands.push_back(Ops[Offset]); // Ptr
Operands.push_back(Ops[Offset + 1]); // Idx
if (IsMasked)
Operands.push_back(Ops[0]);
Operands.push_back(Ops[Offset + 3]); // VL
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), SegInstSEW));
- IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(),
- Operands.back()->getType()};
+ if (IsMasked)
+ IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(),
+ Ops[0]->getType(),
+ Operands.back()->getType()};
+ else
+ IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(),
+ Operands.back()->getType()};
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
return Builder.CreateCall(F, Operands, "");
}
@@ -2468,22 +2475,25 @@ let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vget_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
- if (isa<StructType>(Ops[0]->getType())) // For tuple type
- // Extract value from index (operand 1) of vtuple (operand 0)
- return Builder.CreateExtractValue(
- Ops[0],
- {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()});
auto *VecTy = cast<ScalableVectorType>(ResultType);
- auto *OpVecTy = cast<ScalableVectorType>(Ops[0]->getType());
// Mask to only valid indices.
- unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
- assert(isPowerOf2_32(MaxIndex));
Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
- Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
- Ops[1] = Builder.CreateMul(Ops[1],
- ConstantInt::get(Ops[1]->getType(),
- VecTy->getMinNumElements()));
- return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
+ if (auto *OpVecTy = dyn_cast<ScalableVectorType>(Ops[0]->getType())) {
+ unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
+ assert(isPowerOf2_32(MaxIndex));
+ Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
+ Ops[1] = Builder.CreateMul(Ops[1],
+ ConstantInt::get(Ops[1]->getType(),
+ VecTy->getMinNumElements()));
+ return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
+ }
+
+ bool IsRISCV64 = getTarget().getTriple().isRISCV64();
+ llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
+ Builder.getInt32Ty();
+ return Builder.CreateIntrinsic(Intrinsic::riscv_vector_extract,
+ {ResultType, Ops[0]->getType(), XLenTy},
+ {Ops[0], Ops[1]});
}
}] in {
foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
@@ -2500,22 +2510,25 @@ let HasMasked = false, HasVL = false, IRName = "" in {
let Name = "vset_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
- if (isa<StructType>(ResultType)) // For tuple type
- // Insert value (operand 2) into index (operand 1) of vtuple (operand 0)
- return Builder.CreateInsertValue(
- Ops[0], Ops[2],
- {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()});
- auto *ResVecTy = cast<ScalableVectorType>(ResultType);
auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
// Mask to only valid indices.
- unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
- assert(isPowerOf2_32(MaxIndex));
Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
- Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
- Ops[1] = Builder.CreateMul(Ops[1],
- ConstantInt::get(Ops[1]->getType(),
- VecTy->getMinNumElements()));
- return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
+ if (auto *ResVecTy = dyn_cast<ScalableVectorType>(ResultType)) {
+ unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
+ assert(isPowerOf2_32(MaxIndex));
+ Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
+ Ops[1] = Builder.CreateMul(Ops[1],
+ ConstantInt::get(Ops[1]->getType(),
+ VecTy->getMinNumElements()));
+ return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
+ }
+
+ bool IsRISCV64 = getTarget().getTriple().isRISCV64();
+ llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
+ Builder.getInt32Ty();
+ return Builder.CreateIntrinsic(Intrinsic::riscv_vector_insert,
+ {ResultType, Ops[2]->getType(), XLenTy},
+ {Ops[0], Ops[2], Ops[1]});
}
}] in {
foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
@@ -2539,22 +2552,26 @@ let HasMasked = false, HasVL = false, IRName = "" in {
SupportOverloading = false,
ManualCodegen = [{
{
- if (isa<StructType>(ResultType)) {
- unsigned NF = cast<StructType>(ResultType)->getNumElements();
- llvm::Value *ReturnTuple = llvm::PoisonValue::get(ResultType);
- for (unsigned I = 0; I < NF; ++I) {
- ReturnTuple = Builder.CreateInsertValue(ReturnTuple, Ops[I], {I});
- }
- return ReturnTuple;
- }
llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType);
auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType());
+ bool IsRISCV64 = getTarget().getTriple().isRISCV64();
+ llvm::Type *XLenTy = IsRISCV64 ? Builder.getInt64Ty() :
+ Builder.getInt32Ty();
for (unsigned I = 0, N = Ops.size(); I < N; ++I) {
llvm::Value *Idx =
ConstantInt::get(Builder.getInt64Ty(),
- VecTy->getMinNumElements() * I);
- ReturnVector =
- Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
+ ResultType->isScalableTy() ?
+ VecTy->getMinNumElements() * I : I);
+
+ if (ResultType->isScalableTy())
+ ReturnVector =
+ Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
+ else
+ ReturnVector =
+ Builder.CreateIntrinsic(Intrinsic::riscv_vector_insert,
+ {ResultType, Ops[I]->getType(), XLenTy},
+ {ReturnVector, Ops[I], Idx});
+
}
return ReturnVector;
}
diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index 97493bae5656e..3386578904156 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -429,6 +429,7 @@ class RVVIntrinsic {
bool hasBuiltinAlias() const { return HasBuiltinAlias; }
bool hasManualCodegen() const { return !ManualCodegen.empty(); }
bool isMasked() const { return IsMasked; }
+ llvm::StringRef getOverloadedName() const { return OverloadedName; }
llvm::StringRef getIRName() const { return IRName; }
llvm::StringRef getManualCodegen() const { return ManualCodegen; }
PolicyScheme getPolicyScheme() const { return Scheme; }
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 5b92f1837980c..1c7d1f81e9bcc 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -21751,13 +21751,14 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
}
Intrinsic::ID ID = Intrinsic::not_intrinsic;
- unsigned NF = 1;
// The 0th bit simulates the `vta` of RVV
// The 1st bit simulates the `vma` of RVV
constexpr unsigned RVV_VTA = 0x1;
constexpr unsigned RVV_VMA = 0x2;
int PolicyAttrs = 0;
bool IsMasked = false;
+ // This is used by segment load/store to determine it's llvm type.
+ unsigned SegInstSEW = 8;
// Required for overloaded intrinsics.
llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index d823c336e39bf..49f...
[truncated]
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
d8fa6c4
to
ce6ad93
Compare
We add too many MVTs here. Along with #97010, we definitely will exceed the maximum number of MVT. But I don't have a better way to fix this than extending MVT to 16 bits now. |
You are right, we should be careful for adding MVTs, but I think we ultimately need to extend the MVT to be 16bits if more and more architectures or extensions are added. Btw, I have estimated the memory usage increase for extending MVT to 16bits including changing the matcher tables and those that will be impacted, it only increases approximately 1% of total memory usage for compiling a program~ |
Great! Can you create a PR for extending MVT to 16 bits? It has been lying on my TODO list for a long time! |
Sure~ I'm willing to do it! |
ce6ad93
to
45fd7c7
Compare
5f8aba8
to
b6848f7
Compare
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
This patch handles the types(MVT) in `selectionDAG` for RISCV vector tuples. As described in previous patch handling llvm types, the MVTs also have 32 variants: ``` riscv_nxv1i8x2, riscv_nxv1i8x3, riscv_nxv1i8x4, riscv_nxv1i8x5, riscv_nxv1i8x6, riscv_nxv1i8x7, riscv_nxv1i8x8, riscv_nxv2i8x2, riscv_nxv2i8x3, riscv_nxv2i8x4, riscv_nxv2i8x5, riscv_nxv2i8x6, riscv_nxv2i8x7, riscv_nxv2i8x8, riscv_nxv4i8x2, riscv_nxv4i8x3, riscv_nxv4i8x4, riscv_nxv4i8x5, riscv_nxv4i8x6, riscv_nxv4i8x7, riscv_nxv4i8x8, riscv_nxv8i8x2, riscv_nxv8i8x3, riscv_nxv8i8x4, riscv_nxv8i8x5, riscv_nxv8i8x6, riscv_nxv8i8x7, riscv_nxv8i8x8, riscv_nxv16i8x2, riscv_nxv16i8x3, riscv_nxv16i8x4, riscv_nxv32i8x2. ``` An intuitive way to model vector tuple type is using nested scalable vector, e.g. `nElts=NF, EltTy=nxv2i32`. However it's not compatible to what we've done to handle scalable vector in TargetLowering, so it would need more effort to change the code to handle this concept. Another approach is encoding the `MinNumElts` info in `sz` of `MVT`, e.g. `nElts=NF, sz=(NF*MinNumElts*8)`, this makes it much easier to handle and changes less code. This patch adopts the latter approach.
b6848f7
to
8a6d23b
Compare
Summary:
This patch handles the types(MVT) in
selectionDAG
for RISCV vector tuples.As described in previous patch handling llvm types, the MVTs also have
32 variants:
Detail:
An intuitive way to model vector tuple type is using nested scalable
vector, e.g.
nElts=NF, EltTy=nxv2i32
. However it's not compatible towhat we've done to handle scalable vector in TargetLowering, so it would
need more effort to change the code to handle this concept.
Another approach is encoding the
MinNumElts
info insz
ofMVT
, e.g.nElts=NF, sz=(NF*MinNumElts*8)
, this makes it much easier to handle andchanges less code.
This patch adopts the latter approach.
Stacked on #97992