@@ -86,3 +86,163 @@ define void @call_volatile_load_store_as_4(ptr addrspace(4) %p1, ptr addrspace(4
8686 call void @volatile_load_store_as_1 (ptr %p2.cast )
8787 ret void
8888}
89+
90+ define internal void @can_infer_cmpxchg (ptr %word ) {
91+ ; CHECK-LABEL: define internal void @can_infer_cmpxchg(
92+ ; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0:[0-9]+]] {
93+ ; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
94+ ; CHECK-NEXT: [[CMPXCHG_0:%.*]] = cmpxchg ptr addrspace(1) [[TMP1]], i32 0, i32 4 monotonic monotonic, align 4
95+ ; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
96+ ; CHECK-NEXT: [[CMPXCHG_1:%.*]] = cmpxchg ptr addrspace(1) [[TMP2]], i32 0, i32 5 acq_rel monotonic, align 4
97+ ; CHECK-NEXT: [[TMP3:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
98+ ; CHECK-NEXT: [[CMPXCHG_2:%.*]] = cmpxchg ptr addrspace(1) [[TMP3]], i32 0, i32 6 acquire monotonic, align 4
99+ ; CHECK-NEXT: [[TMP4:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
100+ ; CHECK-NEXT: [[CMPXCHG_3:%.*]] = cmpxchg ptr addrspace(1) [[TMP4]], i32 0, i32 7 release monotonic, align 4
101+ ; CHECK-NEXT: [[TMP5:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
102+ ; CHECK-NEXT: [[CMPXCHG_4:%.*]] = cmpxchg ptr addrspace(1) [[TMP5]], i32 0, i32 8 seq_cst monotonic, align 4
103+ ; CHECK-NEXT: [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
104+ ; CHECK-NEXT: [[CMPXCHG_5:%.*]] = cmpxchg weak ptr addrspace(1) [[TMP6]], i32 0, i32 9 seq_cst monotonic, align 4
105+ ; CHECK-NEXT: [[TMP7:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
106+ ; CHECK-NEXT: [[CMPXCHG_6:%.*]] = cmpxchg volatile ptr addrspace(1) [[TMP7]], i32 0, i32 10 seq_cst monotonic, align 4
107+ ; CHECK-NEXT: [[TMP8:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
108+ ; CHECK-NEXT: [[CMPXCHG_7:%.*]] = cmpxchg weak volatile ptr addrspace(1) [[TMP8]], i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
109+ ; CHECK-NEXT: ret void
110+ ;
111+ %cmpxchg.0 = cmpxchg ptr %word , i32 0 , i32 4 monotonic monotonic , align 4
112+ %cmpxchg.1 = cmpxchg ptr %word , i32 0 , i32 5 acq_rel monotonic , align 4
113+ %cmpxchg.2 = cmpxchg ptr %word , i32 0 , i32 6 acquire monotonic , align 4
114+ %cmpxchg.3 = cmpxchg ptr %word , i32 0 , i32 7 release monotonic , align 4
115+ %cmpxchg.4 = cmpxchg ptr %word , i32 0 , i32 8 seq_cst monotonic , align 4
116+ %cmpxchg.5 = cmpxchg weak ptr %word , i32 0 , i32 9 seq_cst monotonic , align 4
117+ %cmpxchg.6 = cmpxchg volatile ptr %word , i32 0 , i32 10 seq_cst monotonic , align 4
118+ %cmpxchg.7 = cmpxchg weak volatile ptr %word , i32 0 , i32 11 syncscope("singlethread" ) seq_cst monotonic , align 4
119+ ret void
120+ }
121+
122+ define internal void @can_not_infer_cmpxchg (ptr %word ) {
123+ ; CHECK-LABEL: define internal void @can_not_infer_cmpxchg(
124+ ; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
125+ ; CHECK-NEXT: [[CMPXCHG_0:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 4 monotonic monotonic, align 4
126+ ; CHECK-NEXT: [[CMPXCHG_1:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 5 acq_rel monotonic, align 4
127+ ; CHECK-NEXT: [[CMPXCHG_2:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 6 acquire monotonic, align 4
128+ ; CHECK-NEXT: [[CMPXCHG_3:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 7 release monotonic, align 4
129+ ; CHECK-NEXT: [[CMPXCHG_4:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 8 seq_cst monotonic, align 4
130+ ; CHECK-NEXT: [[CMPXCHG_5:%.*]] = cmpxchg weak ptr [[WORD]], i32 0, i32 9 seq_cst monotonic, align 4
131+ ; CHECK-NEXT: [[CMPXCHG_6:%.*]] = cmpxchg volatile ptr [[WORD]], i32 0, i32 10 seq_cst monotonic, align 4
132+ ; CHECK-NEXT: [[CMPXCHG_7:%.*]] = cmpxchg weak volatile ptr [[WORD]], i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
133+ ; CHECK-NEXT: ret void
134+ ;
135+ %cmpxchg.0 = cmpxchg ptr %word , i32 0 , i32 4 monotonic monotonic , align 4
136+ %cmpxchg.1 = cmpxchg ptr %word , i32 0 , i32 5 acq_rel monotonic , align 4
137+ %cmpxchg.2 = cmpxchg ptr %word , i32 0 , i32 6 acquire monotonic , align 4
138+ %cmpxchg.3 = cmpxchg ptr %word , i32 0 , i32 7 release monotonic , align 4
139+ %cmpxchg.4 = cmpxchg ptr %word , i32 0 , i32 8 seq_cst monotonic , align 4
140+ %cmpxchg.5 = cmpxchg weak ptr %word , i32 0 , i32 9 seq_cst monotonic , align 4
141+ %cmpxchg.6 = cmpxchg volatile ptr %word , i32 0 , i32 10 seq_cst monotonic , align 4
142+ %cmpxchg.7 = cmpxchg weak volatile ptr %word , i32 0 , i32 11 syncscope("singlethread" ) seq_cst monotonic , align 4
143+ ret void
144+ }
145+
146+ define internal void @can_infer_atomicrmw (ptr %word ) {
147+ ; CHECK-LABEL: define internal void @can_infer_atomicrmw(
148+ ; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
149+ ; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
150+ ; CHECK-NEXT: [[ATOMICRMW_XCHG:%.*]] = atomicrmw xchg ptr addrspace(1) [[TMP1]], i32 12 monotonic, align 4
151+ ; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
152+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%.*]] = atomicrmw add ptr addrspace(1) [[TMP2]], i32 13 monotonic, align 4
153+ ; CHECK-NEXT: [[TMP3:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
154+ ; CHECK-NEXT: [[ATOMICRMW_SUB:%.*]] = atomicrmw sub ptr addrspace(1) [[TMP3]], i32 14 monotonic, align 4
155+ ; CHECK-NEXT: [[TMP4:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
156+ ; CHECK-NEXT: [[ATOMICRMW_AND:%.*]] = atomicrmw and ptr addrspace(1) [[TMP4]], i32 15 monotonic, align 4
157+ ; CHECK-NEXT: [[TMP5:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
158+ ; CHECK-NEXT: [[ATOMICRMW_NAND:%.*]] = atomicrmw nand ptr addrspace(1) [[TMP5]], i32 16 monotonic, align 4
159+ ; CHECK-NEXT: [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
160+ ; CHECK-NEXT: [[ATOMICRMW_OR:%.*]] = atomicrmw or ptr addrspace(1) [[TMP6]], i32 17 monotonic, align 4
161+ ; CHECK-NEXT: [[TMP7:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
162+ ; CHECK-NEXT: [[ATOMICRMW_XOR:%.*]] = atomicrmw xor ptr addrspace(1) [[TMP7]], i32 18 monotonic, align 4
163+ ; CHECK-NEXT: [[TMP8:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
164+ ; CHECK-NEXT: [[ATOMICRMW_MAX:%.*]] = atomicrmw max ptr addrspace(1) [[TMP8]], i32 19 monotonic, align 4
165+ ; CHECK-NEXT: [[TMP9:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
166+ ; CHECK-NEXT: [[ATOMICRMW_MIN:%.*]] = atomicrmw volatile min ptr addrspace(1) [[TMP9]], i32 20 monotonic, align 4
167+ ; CHECK-NEXT: [[TMP10:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
168+ ; CHECK-NEXT: [[ATOMICRMW_UMAX:%.*]] = atomicrmw umax ptr addrspace(1) [[TMP10]], i32 21 syncscope("singlethread") monotonic, align 4
169+ ; CHECK-NEXT: [[TMP11:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
170+ ; CHECK-NEXT: [[ATOMICRMW_UMIN:%.*]] = atomicrmw volatile umin ptr addrspace(1) [[TMP11]], i32 22 syncscope("singlethread") monotonic, align 4
171+ ; CHECK-NEXT: ret void
172+ ;
173+ %atomicrmw.xchg = atomicrmw xchg ptr %word , i32 12 monotonic , align 4
174+ %atomicrmw.add = atomicrmw add ptr %word , i32 13 monotonic , align 4
175+ %atomicrmw.sub = atomicrmw sub ptr %word , i32 14 monotonic , align 4
176+ %atomicrmw.and = atomicrmw and ptr %word , i32 15 monotonic , align 4
177+ %atomicrmw.nand = atomicrmw nand ptr %word , i32 16 monotonic , align 4
178+ %atomicrmw.or = atomicrmw or ptr %word , i32 17 monotonic , align 4
179+ %atomicrmw.xor = atomicrmw xor ptr %word , i32 18 monotonic , align 4
180+ %atomicrmw.max = atomicrmw max ptr %word , i32 19 monotonic , align 4
181+ %atomicrmw.min = atomicrmw volatile min ptr %word , i32 20 monotonic , align 4
182+ %atomicrmw.umax = atomicrmw umax ptr %word , i32 21 syncscope("singlethread" ) monotonic , align 4
183+ %atomicrmw.umin = atomicrmw volatile umin ptr %word , i32 22 syncscope("singlethread" ) monotonic , align 4
184+ ret void
185+ }
186+
187+ define internal void @can_not_infer_atomicrmw (ptr %word ) {
188+ ; CHECK-LABEL: define internal void @can_not_infer_atomicrmw(
189+ ; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
190+ ; CHECK-NEXT: [[ATOMICRMW_XCHG:%.*]] = atomicrmw xchg ptr [[WORD]], i32 12 monotonic, align 4
191+ ; CHECK-NEXT: [[ATOMICRMW_ADD:%.*]] = atomicrmw add ptr [[WORD]], i32 13 monotonic, align 4
192+ ; CHECK-NEXT: [[ATOMICRMW_SUB:%.*]] = atomicrmw sub ptr [[WORD]], i32 14 monotonic, align 4
193+ ; CHECK-NEXT: [[ATOMICRMW_AND:%.*]] = atomicrmw and ptr [[WORD]], i32 15 monotonic, align 4
194+ ; CHECK-NEXT: [[ATOMICRMW_NAND:%.*]] = atomicrmw nand ptr [[WORD]], i32 16 monotonic, align 4
195+ ; CHECK-NEXT: [[ATOMICRMW_OR:%.*]] = atomicrmw or ptr [[WORD]], i32 17 monotonic, align 4
196+ ; CHECK-NEXT: [[ATOMICRMW_XOR:%.*]] = atomicrmw xor ptr [[WORD]], i32 18 monotonic, align 4
197+ ; CHECK-NEXT: [[ATOMICRMW_MAX:%.*]] = atomicrmw max ptr [[WORD]], i32 19 monotonic, align 4
198+ ; CHECK-NEXT: [[ATOMICRMW_MIN:%.*]] = atomicrmw volatile min ptr [[WORD]], i32 20 monotonic, align 4
199+ ; CHECK-NEXT: [[ATOMICRMW_UMAX:%.*]] = atomicrmw umax ptr [[WORD]], i32 21 syncscope("singlethread") monotonic, align 4
200+ ; CHECK-NEXT: [[ATOMICRMW_UMIN:%.*]] = atomicrmw volatile umin ptr [[WORD]], i32 22 syncscope("singlethread") monotonic, align 4
201+ ; CHECK-NEXT: ret void
202+ ;
203+ %atomicrmw.xchg = atomicrmw xchg ptr %word , i32 12 monotonic , align 4
204+ %atomicrmw.add = atomicrmw add ptr %word , i32 13 monotonic , align 4
205+ %atomicrmw.sub = atomicrmw sub ptr %word , i32 14 monotonic , align 4
206+ %atomicrmw.and = atomicrmw and ptr %word , i32 15 monotonic , align 4
207+ %atomicrmw.nand = atomicrmw nand ptr %word , i32 16 monotonic , align 4
208+ %atomicrmw.or = atomicrmw or ptr %word , i32 17 monotonic , align 4
209+ %atomicrmw.xor = atomicrmw xor ptr %word , i32 18 monotonic , align 4
210+ %atomicrmw.max = atomicrmw max ptr %word , i32 19 monotonic , align 4
211+ %atomicrmw.min = atomicrmw volatile min ptr %word , i32 20 monotonic , align 4
212+ %atomicrmw.umax = atomicrmw umax ptr %word , i32 21 syncscope("singlethread" ) monotonic , align 4
213+ %atomicrmw.umin = atomicrmw volatile umin ptr %word , i32 22 syncscope("singlethread" ) monotonic , align 4
214+ ret void
215+ }
216+
217+ define void @foo (ptr addrspace (3 ) %val ) {
218+ ; CHECK-LABEL: define void @foo(
219+ ; CHECK-SAME: ptr addrspace(3) [[VAL:%.*]]) #[[ATTR1:[0-9]+]] {
220+ ; CHECK-NEXT: [[VAL_CAST:%.*]] = addrspacecast ptr addrspace(3) [[VAL]] to ptr
221+ ; CHECK-NEXT: call void @can_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
222+ ; CHECK-NEXT: call void @can_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
223+ ; CHECK-NEXT: call void @can_not_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
224+ ; CHECK-NEXT: call void @can_not_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
225+ ; CHECK-NEXT: call void @can_not_infer_cmpxchg(ptr [[VAL_CAST]])
226+ ; CHECK-NEXT: call void @can_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
227+ ; CHECK-NEXT: call void @can_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
228+ ; CHECK-NEXT: call void @can_not_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
229+ ; CHECK-NEXT: call void @can_not_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
230+ ; CHECK-NEXT: call void @can_not_infer_atomicrmw(ptr [[VAL_CAST]])
231+ ; CHECK-NEXT: ret void
232+ ;
233+ %g1.cast = addrspacecast ptr addrspace (1 ) @g1 to ptr
234+ %g2.cast = addrspacecast ptr addrspace (1 ) @g2 to ptr
235+ %val.cast = addrspacecast ptr addrspace (3 ) %val to ptr
236+ call void @can_infer_cmpxchg (ptr %g1.cast )
237+ call void @can_infer_cmpxchg (ptr %g2.cast )
238+ call void @can_not_infer_cmpxchg (ptr %g1.cast )
239+ call void @can_not_infer_cmpxchg (ptr %g2.cast )
240+ call void @can_not_infer_cmpxchg (ptr %val.cast )
241+ call void @can_infer_atomicrmw (ptr %g1.cast )
242+ call void @can_infer_atomicrmw (ptr %g2.cast )
243+ call void @can_not_infer_atomicrmw (ptr %g1.cast )
244+ call void @can_not_infer_atomicrmw (ptr %g2.cast )
245+ call void @can_not_infer_atomicrmw (ptr %val.cast )
246+ ret void
247+ }
248+
0 commit comments