-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[X86][CodeGen] Support lowering for NDD ADD/SUB/ADC/SBB/OR/XOR/NEG/NOT/INC/DEC/IMUL #77564
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-backend-x86 Author: Shengchen Kan (KanRobert) ChangesPatch is 132.32 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/77564.diff 13 Files Affected:
diff --git a/llvm/lib/Target/X86/X86InstrArithmetic.td b/llvm/lib/Target/X86/X86InstrArithmetic.td
index 5cfa95e085e34a..d493e5db878267 100644
--- a/llvm/lib/Target/X86/X86InstrArithmetic.td
+++ b/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -1107,43 +1107,85 @@ def : Pat<(store (X86adc_flag GR64:$src, (loadi64 addr:$dst), EFLAGS),
// Patterns for basic arithmetic ops with relocImm for the immediate field.
multiclass ArithBinOp_RF_relocImm_Pats<SDNode OpNodeFlag, SDNode OpNode> {
- def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2),
- (!cast<Instruction>(NAME#"8ri") GR8:$src1, relocImm8_su:$src2)>;
- def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2),
- (!cast<Instruction>(NAME#"16ri") GR16:$src1, relocImm16_su:$src2)>;
- def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2),
- (!cast<Instruction>(NAME#"32ri") GR32:$src1, relocImm32_su:$src2)>;
- def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2),
- (!cast<Instruction>(NAME#"64ri32") GR64:$src1, i64relocImmSExt32_su:$src2)>;
-
- def : Pat<(store (OpNode (load addr:$dst), relocImm8_su:$src), addr:$dst),
- (!cast<Instruction>(NAME#"8mi") addr:$dst, relocImm8_su:$src)>;
- def : Pat<(store (OpNode (load addr:$dst), relocImm16_su:$src), addr:$dst),
- (!cast<Instruction>(NAME#"16mi") addr:$dst, relocImm16_su:$src)>;
- def : Pat<(store (OpNode (load addr:$dst), relocImm32_su:$src), addr:$dst),
- (!cast<Instruction>(NAME#"32mi") addr:$dst, relocImm32_su:$src)>;
- def : Pat<(store (OpNode (load addr:$dst), i64relocImmSExt32_su:$src), addr:$dst),
- (!cast<Instruction>(NAME#"64mi32") addr:$dst, i64relocImmSExt32_su:$src)>;
+ let Predicates = [NoNDD] in {
+ def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2),
+ (!cast<Instruction>(NAME#"8ri") GR8:$src1, relocImm8_su:$src2)>;
+ def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2),
+ (!cast<Instruction>(NAME#"16ri") GR16:$src1, relocImm16_su:$src2)>;
+ def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2),
+ (!cast<Instruction>(NAME#"32ri") GR32:$src1, relocImm32_su:$src2)>;
+ def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2),
+ (!cast<Instruction>(NAME#"64ri32") GR64:$src1, i64relocImmSExt32_su:$src2)>;
+
+ def : Pat<(store (OpNode (load addr:$dst), relocImm8_su:$src), addr:$dst),
+ (!cast<Instruction>(NAME#"8mi") addr:$dst, relocImm8_su:$src)>;
+ def : Pat<(store (OpNode (load addr:$dst), relocImm16_su:$src), addr:$dst),
+ (!cast<Instruction>(NAME#"16mi") addr:$dst, relocImm16_su:$src)>;
+ def : Pat<(store (OpNode (load addr:$dst), relocImm32_su:$src), addr:$dst),
+ (!cast<Instruction>(NAME#"32mi") addr:$dst, relocImm32_su:$src)>;
+ def : Pat<(store (OpNode (load addr:$dst), i64relocImmSExt32_su:$src), addr:$dst),
+ (!cast<Instruction>(NAME#"64mi32") addr:$dst, i64relocImmSExt32_su:$src)>;
+ }
+ let Predicates = [HasNDD] in {
+ def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2),
+ (!cast<Instruction>(NAME#"8ri_ND") GR8:$src1, relocImm8_su:$src2)>;
+ def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2),
+ (!cast<Instruction>(NAME#"16ri_ND") GR16:$src1, relocImm16_su:$src2)>;
+ def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2),
+ (!cast<Instruction>(NAME#"32ri_ND") GR32:$src1, relocImm32_su:$src2)>;
+ def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2),
+ (!cast<Instruction>(NAME#"64ri32_ND") GR64:$src1, i64relocImmSExt32_su:$src2)>;
+
+ def : Pat<(OpNode (load addr:$dst), relocImm8_su:$src),
+ (!cast<Instruction>(NAME#"8mi_ND") addr:$dst, relocImm8_su:$src)>;
+ def : Pat<(OpNode (load addr:$dst), relocImm16_su:$src),
+ (!cast<Instruction>(NAME#"16mi_ND") addr:$dst, relocImm16_su:$src)>;
+ def : Pat<(OpNode (load addr:$dst), relocImm32_su:$src),
+ (!cast<Instruction>(NAME#"32mi_ND") addr:$dst, relocImm32_su:$src)>;
+ def : Pat<(OpNode (load addr:$dst), i64relocImmSExt32_su:$src),
+ (!cast<Instruction>(NAME#"64mi32_ND") addr:$dst, i64relocImmSExt32_su:$src)>;
+ }
}
multiclass ArithBinOp_RFF_relocImm_Pats<SDNode OpNodeFlag> {
- def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2, EFLAGS),
- (!cast<Instruction>(NAME#"8ri") GR8:$src1, relocImm8_su:$src2)>;
- def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2, EFLAGS),
- (!cast<Instruction>(NAME#"16ri") GR16:$src1, relocImm16_su:$src2)>;
- def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2, EFLAGS),
- (!cast<Instruction>(NAME#"32ri") GR32:$src1, relocImm32_su:$src2)>;
- def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2, EFLAGS),
- (!cast<Instruction>(NAME#"64ri32") GR64:$src1, i64relocImmSExt32_su:$src2)>;
-
- def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm8_su:$src, EFLAGS), addr:$dst),
- (!cast<Instruction>(NAME#"8mi") addr:$dst, relocImm8_su:$src)>;
- def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm16_su:$src, EFLAGS), addr:$dst),
- (!cast<Instruction>(NAME#"16mi") addr:$dst, relocImm16_su:$src)>;
- def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm32_su:$src, EFLAGS), addr:$dst),
- (!cast<Instruction>(NAME#"32mi") addr:$dst, relocImm32_su:$src)>;
- def : Pat<(store (OpNodeFlag (load addr:$dst), i64relocImmSExt32_su:$src, EFLAGS), addr:$dst),
- (!cast<Instruction>(NAME#"64mi32") addr:$dst, i64relocImmSExt32_su:$src)>;
+ let Predicates = [NoNDD] in {
+ def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2, EFLAGS),
+ (!cast<Instruction>(NAME#"8ri") GR8:$src1, relocImm8_su:$src2)>;
+ def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2, EFLAGS),
+ (!cast<Instruction>(NAME#"16ri") GR16:$src1, relocImm16_su:$src2)>;
+ def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2, EFLAGS),
+ (!cast<Instruction>(NAME#"32ri") GR32:$src1, relocImm32_su:$src2)>;
+ def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2, EFLAGS),
+ (!cast<Instruction>(NAME#"64ri32") GR64:$src1, i64relocImmSExt32_su:$src2)>;
+
+ def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm8_su:$src, EFLAGS), addr:$dst),
+ (!cast<Instruction>(NAME#"8mi") addr:$dst, relocImm8_su:$src)>;
+ def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm16_su:$src, EFLAGS), addr:$dst),
+ (!cast<Instruction>(NAME#"16mi") addr:$dst, relocImm16_su:$src)>;
+ def : Pat<(store (OpNodeFlag (load addr:$dst), relocImm32_su:$src, EFLAGS), addr:$dst),
+ (!cast<Instruction>(NAME#"32mi") addr:$dst, relocImm32_su:$src)>;
+ def : Pat<(store (OpNodeFlag (load addr:$dst), i64relocImmSExt32_su:$src, EFLAGS), addr:$dst),
+ (!cast<Instruction>(NAME#"64mi32") addr:$dst, i64relocImmSExt32_su:$src)>;
+ }
+ let Predicates = [HasNDD] in {
+ def : Pat<(OpNodeFlag GR8:$src1, relocImm8_su:$src2, EFLAGS),
+ (!cast<Instruction>(NAME#"8ri_ND") GR8:$src1, relocImm8_su:$src2)>;
+ def : Pat<(OpNodeFlag GR16:$src1, relocImm16_su:$src2, EFLAGS),
+ (!cast<Instruction>(NAME#"16ri_ND") GR16:$src1, relocImm16_su:$src2)>;
+ def : Pat<(OpNodeFlag GR32:$src1, relocImm32_su:$src2, EFLAGS),
+ (!cast<Instruction>(NAME#"32ri_ND") GR32:$src1, relocImm32_su:$src2)>;
+ def : Pat<(OpNodeFlag GR64:$src1, i64relocImmSExt32_su:$src2, EFLAGS),
+ (!cast<Instruction>(NAME#"64ri32_ND") GR64:$src1, i64relocImmSExt32_su:$src2)>;
+
+ def : Pat<(OpNodeFlag (load addr:$dst), relocImm8_su:$src, EFLAGS),
+ (!cast<Instruction>(NAME#"8mi_ND") addr:$dst, relocImm8_su:$src)>;
+ def : Pat<(OpNodeFlag (load addr:$dst), relocImm16_su:$src, EFLAGS),
+ (!cast<Instruction>(NAME#"16mi_ND") addr:$dst, relocImm16_su:$src)>;
+ def : Pat<(OpNodeFlag (load addr:$dst), relocImm32_su:$src, EFLAGS),
+ (!cast<Instruction>(NAME#"32mi_ND") addr:$dst, relocImm32_su:$src)>;
+ def : Pat<(OpNodeFlag (load addr:$dst), i64relocImmSExt32_su:$src, EFLAGS),
+ (!cast<Instruction>(NAME#"64mi32_ND") addr:$dst, i64relocImmSExt32_su:$src)>;
+ }
}
multiclass ArithBinOp_F_relocImm_Pats<SDNode OpNodeFlag> {
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index c77c77ee4a3eeb..84457e4e161502 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1550,13 +1550,24 @@ def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
// AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
let AddedComplexity = 1 in {
-def : Pat<(and GR64:$src, i64immZExt32:$imm),
- (SUBREG_TO_REG
- (i64 0),
- (AND32ri
- (EXTRACT_SUBREG GR64:$src, sub_32bit),
- (i32 (GetLo32XForm imm:$imm))),
- sub_32bit)>;
+ let Predicates = [NoNDD] in {
+ def : Pat<(and GR64:$src, i64immZExt32:$imm),
+ (SUBREG_TO_REG
+ (i64 0),
+ (AND32ri
+ (EXTRACT_SUBREG GR64:$src, sub_32bit),
+ (i32 (GetLo32XForm imm:$imm))),
+ sub_32bit)>;
+ }
+ let Predicates = [HasNDD] in {
+ def : Pat<(and GR64:$src, i64immZExt32:$imm),
+ (SUBREG_TO_REG
+ (i64 0),
+ (AND32ri_ND
+ (EXTRACT_SUBREG GR64:$src, sub_32bit),
+ (i32 (GetLo32XForm imm:$imm))),
+ sub_32bit)>;
+ }
} // AddedComplexity = 1
@@ -1762,10 +1773,18 @@ def : Pat<(X86xor_flag (i8 (trunc GR32:$src)),
// where the least significant bit is not 0. However, the probability of this
// happening is considered low enough that this is officially not a
// "real problem".
-def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
-def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
-def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
-def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
+let Predicates = [HasNDD] in {
+ def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr_ND GR8 :$src1, GR8 :$src1)>;
+ def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr_ND GR16:$src1, GR16:$src1)>;
+ def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr_ND GR32:$src1, GR32:$src1)>;
+ def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr_ND GR64:$src1, GR64:$src1)>;
+}
+let Predicates = [NoNDD] in {
+ def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
+ def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
+ def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
+ def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
+}
// Shift amount is implicitly masked.
multiclass MaskedShiftAmountPats<SDNode frag, string name> {
@@ -1937,75 +1956,179 @@ defm : one_bit_patterns<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>;
// EFLAGS-defining Patterns
//===----------------------------------------------------------------------===//
-// add reg, reg
-def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
-def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
-def : Pat<(add GR64:$src1, GR64:$src2), (ADD64rr GR64:$src1, GR64:$src2)>;
-
-// add reg, mem
-def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
- (ADD8rm GR8:$src1, addr:$src2)>;
-def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
- (ADD16rm GR16:$src1, addr:$src2)>;
-def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
- (ADD32rm GR32:$src1, addr:$src2)>;
-def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
- (ADD64rm GR64:$src1, addr:$src2)>;
-
-// add reg, imm
-def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
-def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
-def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
-def : Pat<(add GR64:$src1, i64immSExt32:$src2), (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
-
-// sub reg, reg
-def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
-def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
-def : Pat<(sub GR64:$src1, GR64:$src2), (SUB64rr GR64:$src1, GR64:$src2)>;
-
-// sub reg, mem
-def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
- (SUB8rm GR8:$src1, addr:$src2)>;
-def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
- (SUB16rm GR16:$src1, addr:$src2)>;
-def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
- (SUB32rm GR32:$src1, addr:$src2)>;
-def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
- (SUB64rm GR64:$src1, addr:$src2)>;
-
-// sub reg, imm
-def : Pat<(sub GR8:$src1, imm:$src2),
- (SUB8ri GR8:$src1, imm:$src2)>;
-def : Pat<(sub GR16:$src1, imm:$src2),
- (SUB16ri GR16:$src1, imm:$src2)>;
-def : Pat<(sub GR32:$src1, imm:$src2),
- (SUB32ri GR32:$src1, imm:$src2)>;
-def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
- (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
-
-// sub 0, reg
-def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
-def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
-def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
-def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
-
-// mul reg, reg
-def : Pat<(mul GR16:$src1, GR16:$src2),
- (IMUL16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(mul GR32:$src1, GR32:$src2),
- (IMUL32rr GR32:$src1, GR32:$src2)>;
-def : Pat<(mul GR64:$src1, GR64:$src2),
- (IMUL64rr GR64:$src1, GR64:$src2)>;
-
-// mul reg, mem
-def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
- (IMUL16rm GR16:$src1, addr:$src2)>;
-def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
- (IMUL32rm GR32:$src1, addr:$src2)>;
-def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
- (IMUL64rm GR64:$src1, addr:$src2)>;
+multiclass EFLAGSDefiningPats<string suffix, Predicate p> {
+ let Predicates = [p] in {
+ // add reg, reg
+ def : Pat<(add GR8 :$src1, GR8 :$src2), (!cast<Instruction>(ADD8rr#suffix) GR8 :$src1, GR8 :$src2)>;
+ def : Pat<(add GR16:$src1, GR16:$src2), (!cast<Instruction>(ADD16rr#suffix) GR16:$src1, GR16:$src2)>;
+ def : Pat<(add GR32:$src1, GR32:$src2), (!cast<Instruction>(ADD32rr#suffix) GR32:$src1, GR32:$src2)>;
+ def : Pat<(add GR64:$src1, GR64:$src2), (!cast<Instruction>(ADD64rr#suffix) GR64:$src1, GR64:$src2)>;
+
+ // add reg, mem
+ def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
+ (!cast<Instruction>(ADD8rm#suffix) GR8:$src1, addr:$src2)>;
+ def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
+ (!cast<Instruction>(ADD16rm#suffix) GR16:$src1, addr:$src2)>;
+ def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
+ (!cast<Instruction>(ADD32rm#suffix) GR32:$src1, addr:$src2)>;
+ def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
+ (!cast<Instruction>(ADD64rm#suffix) GR64:$src1, addr:$src2)>;
+
+ // add reg, imm
+ def : Pat<(add GR8 :$src1, imm:$src2), (!cast<Instruction>(ADD8ri#suffix) GR8:$src1 , imm:$src2)>;
+ def : Pat<(add GR16:$src1, imm:$src2), (!cast<Instruction>(ADD16ri#suffix) GR16:$src1, imm:$src2)>;
+ def : Pat<(add GR32:$src1, imm:$src2), (!cast<Instruction>(ADD32ri#suffix) GR32:$src1, imm:$src2)>;
+ def : Pat<(add GR64:$src1, i64immSExt32:$src2), (!cast<Instruction>(ADD64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
+
+ // sub reg, reg
+ def : Pat<(sub GR8 :$src1, GR8 :$src2), (!cast<Instruction>(SUB8rr#suffix) GR8 :$src1, GR8 :$src2)>;
+ def : Pat<(sub GR16:$src1, GR16:$src2), (!cast<Instruction>(SUB16rr#suffix) GR16:$src1, GR16:$src2)>;
+ def : Pat<(sub GR32:$src1, GR32:$src2), (!cast<Instruction>(SUB32rr#suffix) GR32:$src1, GR32:$src2)>;
+ def : Pat<(sub GR64:$src1, GR64:$src2), (!cast<Instruction>(SUB64rr#suffix) GR64:$src1, GR64:$src2)>;
+
+ // sub reg, mem
+ def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
+ (!cast<Instruction>(SUB8rm#suffix) GR8:$src1, addr:$src2)>;
+ def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
+ (!cast<Instruction>(SUB16rm#suffix) GR16:$src1, addr:$src2)>;
+ def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
+ (!cast<Instruction>(SUB32rm#suffix) GR32:$src1, addr:$src2)>;
+ def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
+ (!cast<Instruction>(SUB64rm#suffix) GR64:$src1, addr:$src2)>;
+
+ // sub reg, imm
+ def : Pat<(sub GR8:$src1, imm:$src2),
+ (!cast<Instruction>(SUB8ri#suffix) GR8:$src1, imm:$src2)>;
+ def : Pat<(sub GR16:$src1, imm:$src2),
+ (!cast<Instruction>(SUB16ri#suffix) GR16:$src1, imm:$src2)>;
+ def : Pat<(sub GR32:$src1, imm:$src2),
+ (!cast<Instruction>(SUB32ri#suffix) GR32:$src1, imm:$src2)>;
+ def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
+ (!cast<Instruction>(SUB64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
+
+ // sub 0, reg
+ def : Pat<(X86sub_flag 0, GR8 :$src), (!cast<Instruction>(NEG8r#suffix) GR8 :$src)>;
+ def : Pat<(X86sub_flag 0, GR16:$src), (!cast<Instruction>(NEG16r#suffix) GR16:$src)>;
+ def : Pat<(X86sub_flag 0, GR32:$src), (!cast<Instruction>(NEG32r#suffix) GR32:$src)>;
+ def : Pat<(X86sub_flag 0, GR64:$src), (!cast<Instruction>(NEG64r#suffix) GR64:$src)>;
+
+ // mul reg, reg
+ def : Pat<(mul GR16:$src1, GR16:$src2),
+ (!cast<Instruction>(IMUL16rr#suffix) GR16:$src1, GR16:$src2)>;
+ def : Pat<(mul GR32:$src1, GR32:$src2),
+ (!cast<Instruction>(IMUL32rr#suffix) GR32:$src1, GR32:$src2)>;
+ def : Pat<(mul GR64:$src1, GR64:$src2),
+ (!cast<Instruction>(IMUL64rr#suffix) GR64:$src1, GR64:$src2)>;
+
+ // mul reg, mem
+ def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
+ (!cast<Instruction>(IMUL16rm#suffix) GR16:$src1, addr:$src2)>;
+ def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
+ (!cast<Instruction>(IMUL32rm#suffix) GR32:$src1, addr:$src2)>;
+ def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
+ (!cast<Instruction>(IMUL64rm#suffix) GR64:$src1, addr:$src2)>;
+
+ // or reg/reg.
+ def : Pat<(or GR8 :$src1, GR8 :$src2), (!cast<Instruction>(OR8rr#suffix) GR8 :$src1, GR8 :$src2)>;
+ def : Pat<(or GR16:$src1, GR16:$src2), (!cast<Instruction>(OR16rr#suffix) GR16:$src1, GR16:$src2)>;
+ def : Pat<(or GR32:$src1, GR32:$src2), (!cast<Instruction>(OR32rr#suffix) GR32:$src1, GR32:$src2)>;
+ def : Pat<(or GR64:$src1, GR64:$src2), (!cast<Instruction>(OR64rr#suffix) GR64:$src1, GR64:$src2)>;
+
+ // or reg/mem
+ def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
+ (!cast<Instruction>(OR8rm#suffix) GR8:$src1, addr:$src2)>;
+ def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
+ (!cast<Instruction>(OR16rm#suffix) GR16:$src1, addr:$src2)>;
+ def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
+ (!cast<Instruction>(OR32rm#suffix) GR32:$src1, addr:$src2)>;
+ def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
+ (!cast<Instruction>(OR64rm#suffix) GR64:$src1, addr:$src2)>;
+
+ // or reg/imm
+ def : Pat<(or GR8:$src1 , imm:$src2), (!cast<Instruction>(OR8ri#suffix) GR8 :$src1, imm:$src2)>;
+ def : Pat<(or GR16:$src1, imm:$src2), (!cast<Instruction>(OR16ri#suffix) GR16:$src1, imm:$src2)>;
+ def : Pat<(or GR32:$src1, imm:$src2), (!cast<Instruction>(OR32ri#suffix) GR32:$src1, imm:$src2)>;
+ def : Pat<(or GR64:$src1, i64immSExt32:$src2),
+ (!cast<Instruction>(OR64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
+
+ // xor reg/reg
+ def : Pat<(xor GR8 :$src1, GR8 :$src2), (!cast<Instruction>(XOR8rr#suffix) GR8 :$src1, GR8 :$src2)>;
+ def : Pat<(xor GR16:$src1, GR16:$src2), (!cast<Instruction>(XOR16rr#suffix) GR16:$src1, GR16:$src2)>;
+ def : Pat<(xor GR32:$src1, GR32:$src2), (!cast<Instruction>(XOR32rr#suffix) GR32:$src1, GR32:$src2)>;
+ def : Pat<(xor GR64:$src1, GR64:$src2), (!cast<Instruction>(XOR64rr#suffix) GR64:$src1, GR64:$src2)>;
+
+ // xor reg/mem
+ def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
+ (!cast<Instruction>(XOR8rm#suffix) GR8:$src1, addr:$src2)>;
+ def : Pat<(xor...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM.
Thanks @e-kud @phoebewang ! |
…ic instructions (llvm#78406) llvm#77564 added lowering tests for NDD arithmetic instructions. It would be great to add `--show-mc-encoding` to check the NDD variant is selected first.
…T/INC/DEC/IMUL (llvm#77564) We supported encoding/decoding for these instructions in llvm#76319 llvm#76721 llvm#76919
We supported encoding/decoding for APX AND in llvm#76319 This test should be added in llvm#77564 but was missing.
We supported encoding/decoding for these instructions in
#76319
#76721
#76919