@@ -759,38 +759,38 @@ defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
759
759
* extremely late to prevent them from being accidentally reordered in the backend
760
760
* (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
761
761
*/
762
- multiclass RELEASE_BINOP_MI<string op> {
762
+ multiclass RELEASE_BINOP_MI<SDNode op> {
763
763
def NAME#8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
764
764
"#BINOP "#NAME#"8mi PSEUDO!",
765
- [(atomic_store_8 addr:$dst, (!cast<PatFrag>(op)
765
+ [(atomic_store_8 addr:$dst, (op
766
766
(atomic_load_8 addr:$dst), (i8 imm:$src)))]>;
767
767
def NAME#8mr : I<0, Pseudo, (outs), (ins i8mem:$dst, GR8:$src),
768
768
"#BINOP "#NAME#"8mr PSEUDO!",
769
- [(atomic_store_8 addr:$dst, (!cast<PatFrag>(op)
769
+ [(atomic_store_8 addr:$dst, (op
770
770
(atomic_load_8 addr:$dst), GR8:$src))]>;
771
771
// NAME#16 is not generated as 16-bit arithmetic instructions are considered
772
772
// costly and avoided as far as possible by this backend anyway
773
773
def NAME#32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
774
774
"#BINOP "#NAME#"32mi PSEUDO!",
775
- [(atomic_store_32 addr:$dst, (!cast<PatFrag>(op)
775
+ [(atomic_store_32 addr:$dst, (op
776
776
(atomic_load_32 addr:$dst), (i32 imm:$src)))]>;
777
777
def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
778
778
"#BINOP "#NAME#"32mr PSEUDO!",
779
- [(atomic_store_32 addr:$dst, (!cast<PatFrag>(op)
779
+ [(atomic_store_32 addr:$dst, (op
780
780
(atomic_load_32 addr:$dst), GR32:$src))]>;
781
781
def NAME#64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
782
782
"#BINOP "#NAME#"64mi32 PSEUDO!",
783
- [(atomic_store_64 addr:$dst, (!cast<PatFrag>(op)
783
+ [(atomic_store_64 addr:$dst, (op
784
784
(atomic_load_64 addr:$dst), (i64immSExt32:$src)))]>;
785
785
def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
786
786
"#BINOP "#NAME#"64mr PSEUDO!",
787
- [(atomic_store_64 addr:$dst, (!cast<PatFrag>(op)
787
+ [(atomic_store_64 addr:$dst, (op
788
788
(atomic_load_64 addr:$dst), GR64:$src))]>;
789
789
}
790
- defm RELEASE_ADD : RELEASE_BINOP_MI<" add" >;
791
- defm RELEASE_AND : RELEASE_BINOP_MI<" and" >;
792
- defm RELEASE_OR : RELEASE_BINOP_MI<"or" >;
793
- defm RELEASE_XOR : RELEASE_BINOP_MI<" xor" >;
790
+ defm RELEASE_ADD : RELEASE_BINOP_MI<add>;
791
+ defm RELEASE_AND : RELEASE_BINOP_MI<and>;
792
+ defm RELEASE_OR : RELEASE_BINOP_MI<or >;
793
+ defm RELEASE_XOR : RELEASE_BINOP_MI<xor>;
794
794
// Note: we don't deal with sub, because substractions of constants are
795
795
// optimized into additions before this code can run
796
796
@@ -799,21 +799,21 @@ defm RELEASE_XOR : RELEASE_BINOP_MI<"xor">;
799
799
// FIXME: Version that doesn't clobber $src, using AVX's VADDSS.
800
800
// FIXME: This could also handle SIMD operations with *ps and *pd instructions.
801
801
let usesCustomInserter = 1 in {
802
- multiclass RELEASE_FP_BINOP_MI<string op> {
802
+ multiclass RELEASE_FP_BINOP_MI<SDNode op> {
803
803
def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, FR32:$src),
804
804
"#BINOP "#NAME#"32mr PSEUDO!",
805
805
[(atomic_store_32 addr:$dst,
806
- (i32 (bitconvert (!cast<PatFrag>(op)
806
+ (i32 (bitconvert (op
807
807
(f32 (bitconvert (i32 (atomic_load_32 addr:$dst)))),
808
808
FR32:$src))))]>, Requires<[HasSSE1]>;
809
809
def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, FR64:$src),
810
810
"#BINOP "#NAME#"64mr PSEUDO!",
811
811
[(atomic_store_64 addr:$dst,
812
- (i64 (bitconvert (!cast<PatFrag>(op)
812
+ (i64 (bitconvert (op
813
813
(f64 (bitconvert (i64 (atomic_load_64 addr:$dst)))),
814
814
FR64:$src))))]>, Requires<[HasSSE2]>;
815
815
}
816
- defm RELEASE_FADD : RELEASE_FP_BINOP_MI<" fadd" >;
816
+ defm RELEASE_FADD : RELEASE_FP_BINOP_MI<fadd>;
817
817
// FIXME: Add fsub, fmul, fdiv, ...
818
818
}
819
819
0 commit comments