diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td index 1e77016001e48..d64a66fc9d9cf 100644 --- a/clang/include/clang/Basic/Builtins.td +++ b/clang/include/clang/Basic/Builtins.td @@ -1977,16 +1977,16 @@ def AtomicNandFetch : AtomicBuiltin { let Prototype = "void(...)"; } -def AtomicTestAndSet : AtomicBuiltin { +def AtomicTestAndSet : Builtin { let Spellings = ["__atomic_test_and_set"]; - let Attributes = [NoThrow, CustomTypeChecking]; - let Prototype = "void(...)"; + let Attributes = [NoThrow]; + let Prototype = "bool(void volatile*, int)"; } -def AtomicClear : AtomicBuiltin { +def AtomicClear : Builtin { let Spellings = ["__atomic_clear"]; - let Attributes = [NoThrow, CustomTypeChecking]; - let Prototype = "void(...)"; + let Attributes = [NoThrow]; + let Prototype = "void(void volatile*, int)"; } def AtomicThreadFence : Builtin { diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index 7e6cb53064ff2..8c8ccdb61dc01 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -5070,8 +5070,6 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) { case AO__opencl_atomic_init: case AO__c11_atomic_load: case AO__atomic_load_n: - case AO__atomic_test_and_set: - case AO__atomic_clear: return 2; case AO__scoped_atomic_load_n: diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index 3adb2a7ad207f..f6cb2ad421e90 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -723,24 +723,6 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__scoped_atomic_fetch_nand: Op = llvm::AtomicRMWInst::Nand; break; - - case AtomicExpr::AO__atomic_test_and_set: { - llvm::AtomicRMWInst *RMWI = - CGF.emitAtomicRMWInst(llvm::AtomicRMWInst::Xchg, Ptr, - CGF.Builder.getInt8(1), Order, Scope, E); - RMWI->setVolatile(E->isVolatile()); - llvm::Value *Result = CGF.Builder.CreateIsNotNull(RMWI, "tobool"); - CGF.Builder.CreateStore(Result, Dest); - return; - } - - case AtomicExpr::AO__atomic_clear: { - llvm::StoreInst *Store = - CGF.Builder.CreateStore(CGF.Builder.getInt8(0), Ptr); - Store->setAtomic(Order, Scope); - Store->setVolatile(E->isVolatile()); - return; - } } llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1); @@ -896,8 +878,6 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__opencl_atomic_load: case AtomicExpr::AO__hip_atomic_load: - case AtomicExpr::AO__atomic_test_and_set: - case AtomicExpr::AO__atomic_clear: break; case AtomicExpr::AO__atomic_load: @@ -1220,8 +1200,6 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__opencl_atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_max_fetch: - case AtomicExpr::AO__atomic_test_and_set: - case AtomicExpr::AO__atomic_clear: llvm_unreachable("Integral atomic operations always become atomicrmw!"); } @@ -1261,8 +1239,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { E->getOp() == AtomicExpr::AO__atomic_store || E->getOp() == AtomicExpr::AO__atomic_store_n || E->getOp() == AtomicExpr::AO__scoped_atomic_store || - E->getOp() == AtomicExpr::AO__scoped_atomic_store_n || - E->getOp() == AtomicExpr::AO__atomic_clear; + E->getOp() == AtomicExpr::AO__scoped_atomic_store_n; bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || E->getOp() == AtomicExpr::AO__opencl_atomic_load || E->getOp() == AtomicExpr::AO__hip_atomic_load || diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 0ea2ee4c264ae..4d4b7428abd50 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -5099,6 +5099,147 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, ReturnValueSlot(), Args); } + case Builtin::BI__atomic_test_and_set: { + // Look at the argument type to determine whether this is a volatile + // operation. The parameter type is always volatile. + QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); + bool Volatile = + PtrTy->castAs()->getPointeeType().isVolatileQualified(); + + Address Ptr = + EmitPointerWithAlignment(E->getArg(0)).withElementType(Int8Ty); + + Value *NewVal = Builder.getInt8(1); + Value *Order = EmitScalarExpr(E->getArg(1)); + if (isa(Order)) { + int ord = cast(Order)->getZExtValue(); + AtomicRMWInst *Result = nullptr; + switch (ord) { + case 0: // memory_order_relaxed + default: // invalid order + Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, + llvm::AtomicOrdering::Monotonic); + break; + case 1: // memory_order_consume + case 2: // memory_order_acquire + Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, + llvm::AtomicOrdering::Acquire); + break; + case 3: // memory_order_release + Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, + llvm::AtomicOrdering::Release); + break; + case 4: // memory_order_acq_rel + + Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, + llvm::AtomicOrdering::AcquireRelease); + break; + case 5: // memory_order_seq_cst + Result = Builder.CreateAtomicRMW( + llvm::AtomicRMWInst::Xchg, Ptr, NewVal, + llvm::AtomicOrdering::SequentiallyConsistent); + break; + } + Result->setVolatile(Volatile); + return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); + } + + llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); + + llvm::BasicBlock *BBs[5] = { + createBasicBlock("monotonic", CurFn), + createBasicBlock("acquire", CurFn), + createBasicBlock("release", CurFn), + createBasicBlock("acqrel", CurFn), + createBasicBlock("seqcst", CurFn) + }; + llvm::AtomicOrdering Orders[5] = { + llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, + llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, + llvm::AtomicOrdering::SequentiallyConsistent}; + + Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); + llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); + + Builder.SetInsertPoint(ContBB); + PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); + + for (unsigned i = 0; i < 5; ++i) { + Builder.SetInsertPoint(BBs[i]); + AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, + Ptr, NewVal, Orders[i]); + RMW->setVolatile(Volatile); + Result->addIncoming(RMW, BBs[i]); + Builder.CreateBr(ContBB); + } + + SI->addCase(Builder.getInt32(0), BBs[0]); + SI->addCase(Builder.getInt32(1), BBs[1]); + SI->addCase(Builder.getInt32(2), BBs[1]); + SI->addCase(Builder.getInt32(3), BBs[2]); + SI->addCase(Builder.getInt32(4), BBs[3]); + SI->addCase(Builder.getInt32(5), BBs[4]); + + Builder.SetInsertPoint(ContBB); + return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); + } + + case Builtin::BI__atomic_clear: { + QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); + bool Volatile = + PtrTy->castAs()->getPointeeType().isVolatileQualified(); + + Address Ptr = EmitPointerWithAlignment(E->getArg(0)); + Ptr = Ptr.withElementType(Int8Ty); + Value *NewVal = Builder.getInt8(0); + Value *Order = EmitScalarExpr(E->getArg(1)); + if (isa(Order)) { + int ord = cast(Order)->getZExtValue(); + StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); + switch (ord) { + case 0: // memory_order_relaxed + default: // invalid order + Store->setOrdering(llvm::AtomicOrdering::Monotonic); + break; + case 3: // memory_order_release + Store->setOrdering(llvm::AtomicOrdering::Release); + break; + case 5: // memory_order_seq_cst + Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); + break; + } + return RValue::get(nullptr); + } + + llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); + + llvm::BasicBlock *BBs[3] = { + createBasicBlock("monotonic", CurFn), + createBasicBlock("release", CurFn), + createBasicBlock("seqcst", CurFn) + }; + llvm::AtomicOrdering Orders[3] = { + llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, + llvm::AtomicOrdering::SequentiallyConsistent}; + + Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); + llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); + + for (unsigned i = 0; i < 3; ++i) { + Builder.SetInsertPoint(BBs[i]); + StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); + Store->setOrdering(Orders[i]); + Builder.CreateBr(ContBB); + } + + SI->addCase(Builder.getInt32(0), BBs[0]); + SI->addCase(Builder.getInt32(3), BBs[1]); + SI->addCase(Builder.getInt32(5), BBs[2]); + + Builder.SetInsertPoint(ContBB); + return RValue::get(nullptr); + } + case Builtin::BI__atomic_thread_fence: case Builtin::BI__atomic_signal_fence: case Builtin::BI__c11_atomic_thread_fence: diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index f6c4def289255..e703a62ff9cf1 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -3631,7 +3631,6 @@ static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { case AtomicExpr::AO__atomic_store_n: case AtomicExpr::AO__scoped_atomic_store: case AtomicExpr::AO__scoped_atomic_store_n: - case AtomicExpr::AO__atomic_clear: return OrderingCABI != llvm::AtomicOrderingCABI::consume && OrderingCABI != llvm::AtomicOrderingCABI::acquire && OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; @@ -3684,18 +3683,12 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, C11CmpXchg, // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) - GNUCmpXchg, - - // bool __atomic_test_and_set(A *, int) - TestAndSet, - - // void __atomic_clear(A *, int) - Clear, + GNUCmpXchg } Form = Init; - const unsigned NumForm = Clear + 1; - const unsigned NumArgs[] = {2, 2, 3, 3, 3, 3, 4, 5, 6, 2, 2}; - const unsigned NumVals[] = {1, 0, 1, 1, 1, 1, 2, 2, 3, 0, 0}; + const unsigned NumForm = GNUCmpXchg + 1; + const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; + const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; // where: // C is an appropriate type, // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, @@ -3856,14 +3849,6 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, case AtomicExpr::AO__scoped_atomic_compare_exchange_n: Form = GNUCmpXchg; break; - - case AtomicExpr::AO__atomic_test_and_set: - Form = TestAndSet; - break; - - case AtomicExpr::AO__atomic_clear: - Form = Clear; - break; } unsigned AdjustedNumArgs = NumArgs[Form]; @@ -4009,10 +3994,10 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, ValType.removeLocalVolatile(); ValType.removeLocalConst(); QualType ResultType = ValType; - if (Form == Copy || Form == LoadCopy || Form == GNUXchg || Form == Init || - Form == Clear) + if (Form == Copy || Form == LoadCopy || Form == GNUXchg || + Form == Init) ResultType = Context.VoidTy; - else if (Form == C11CmpXchg || Form == GNUCmpXchg || Form == TestAndSet) + else if (Form == C11CmpXchg || Form == GNUCmpXchg) ResultType = Context.BoolTy; // The type of a parameter passed 'by value'. In the GNU atomics, such @@ -4057,10 +4042,6 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, APIOrderedArgs.push_back(Args[1]); // Order APIOrderedArgs.push_back(Args[3]); // OrderFail break; - case TestAndSet: - case Clear: - APIOrderedArgs.push_back(Args[1]); // Order - break; } } else APIOrderedArgs.append(Args.begin(), Args.end()); @@ -4146,8 +4127,6 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SubExprs.push_back(APIOrderedArgs[1]); // Val1 break; case Load: - case TestAndSet: - case Clear: SubExprs.push_back(APIOrderedArgs[1]); // Order break; case LoadCopy: diff --git a/clang/test/CodeGen/atomic-test-and-set.c b/clang/test/CodeGen/atomic-test-and-set.c deleted file mode 100644 index bb05623f89755..0000000000000 --- a/clang/test/CodeGen/atomic-test-and-set.c +++ /dev/null @@ -1,250 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 -// RUN: %clang_cc1 %s -emit-llvm -o - -triple=aarch64-none-elf | FileCheck %s -// REQUIRES: aarch64-registered-target - -#include - -// CHECK-LABEL: define dso_local void @clear_relaxed( -// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] monotonic, align 1 -// CHECK-NEXT: ret void -// -void clear_relaxed(char *ptr) { - __atomic_clear(ptr, memory_order_relaxed); -} - -// CHECK-LABEL: define dso_local void @clear_seq_cst( -// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] seq_cst, align 1 -// CHECK-NEXT: ret void -// -void clear_seq_cst(char *ptr) { - __atomic_clear(ptr, memory_order_seq_cst); -} - -// CHECK-LABEL: define dso_local void @clear_release( -// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] release, align 1 -// CHECK-NEXT: ret void -// -void clear_release(char *ptr) { - __atomic_clear(ptr, memory_order_release); -} - -// CHECK-LABEL: define dso_local void @clear_dynamic( -// CHECK-SAME: ptr noundef [[PTR:%.*]], i32 noundef [[ORDER:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: [[ORDER_ADDR:%.*]] = alloca i32, align 4 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: store i32 [[ORDER]], ptr [[ORDER_ADDR]], align 4 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ORDER_ADDR]], align 4 -// CHECK-NEXT: switch i32 [[TMP1]], label %[[MONOTONIC:.*]] [ -// CHECK-NEXT: i32 3, label %[[RELEASE:.*]] -// CHECK-NEXT: i32 5, label %[[SEQCST:.*]] -// CHECK-NEXT: ] -// CHECK: [[MONOTONIC]]: -// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] monotonic, align 1 -// CHECK-NEXT: br label %[[ATOMIC_CONTINUE:.*]] -// CHECK: [[RELEASE]]: -// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] release, align 1 -// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] -// CHECK: [[SEQCST]]: -// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] seq_cst, align 1 -// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] -// CHECK: [[ATOMIC_CONTINUE]]: -// CHECK-NEXT: ret void -// -void clear_dynamic(char *ptr, int order) { - __atomic_clear(ptr, order); -} - -// CHECK-LABEL: define dso_local void @test_and_set_relaxed( -// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 -// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 -// CHECK-NEXT: ret void -// -void test_and_set_relaxed(char *ptr) { - __atomic_test_and_set(ptr, memory_order_relaxed); -} - -// CHECK-LABEL: define dso_local void @test_and_set_consume( -// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 -// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 -// CHECK-NEXT: ret void -// -void test_and_set_consume(char *ptr) { - __atomic_test_and_set(ptr, memory_order_consume); -} - -// CHECK-LABEL: define dso_local void @test_and_set_acquire( -// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 -// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 -// CHECK-NEXT: ret void -// -void test_and_set_acquire(char *ptr) { - __atomic_test_and_set(ptr, memory_order_acquire); -} - -// CHECK-LABEL: define dso_local void @test_and_set_release( -// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1 -// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 -// CHECK-NEXT: ret void -// -void test_and_set_release(char *ptr) { - __atomic_test_and_set(ptr, memory_order_release); -} - -// CHECK-LABEL: define dso_local void @test_and_set_acq_rel( -// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1 -// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 -// CHECK-NEXT: ret void -// -void test_and_set_acq_rel(char *ptr) { - __atomic_test_and_set(ptr, memory_order_acq_rel); -} - -// CHECK-LABEL: define dso_local void @test_and_set_seq_cst( -// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1 -// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 -// CHECK-NEXT: ret void -// -void test_and_set_seq_cst(char *ptr) { - __atomic_test_and_set(ptr, memory_order_seq_cst); -} - -// CHECK-LABEL: define dso_local void @test_and_set_dynamic( -// CHECK-SAME: ptr noundef [[PTR:%.*]], i32 noundef [[ORDER:%.*]]) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK-NEXT: [[ORDER_ADDR:%.*]] = alloca i32, align 4 -// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 -// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: store i32 [[ORDER]], ptr [[ORDER_ADDR]], align 4 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ORDER_ADDR]], align 4 -// CHECK-NEXT: switch i32 [[TMP1]], label %[[MONOTONIC:.*]] [ -// CHECK-NEXT: i32 1, label %[[ACQUIRE:.*]] -// CHECK-NEXT: i32 2, label %[[ACQUIRE]] -// CHECK-NEXT: i32 3, label %[[RELEASE:.*]] -// CHECK-NEXT: i32 4, label %[[ACQREL:.*]] -// CHECK-NEXT: i32 5, label %[[SEQCST:.*]] -// CHECK-NEXT: ] -// CHECK: [[MONOTONIC]]: -// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 -// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP2]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: br label %[[ATOMIC_CONTINUE:.*]] -// CHECK: [[ACQUIRE]]: -// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 -// CHECK-NEXT: [[TOBOOL1:%.*]] = icmp ne i8 [[TMP3]], 0 -// CHECK-NEXT: store i1 [[TOBOOL1]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] -// CHECK: [[RELEASE]]: -// CHECK-NEXT: [[TMP4:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1 -// CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i8 [[TMP4]], 0 -// CHECK-NEXT: store i1 [[TOBOOL2]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] -// CHECK: [[ACQREL]]: -// CHECK-NEXT: [[TMP5:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1 -// CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i8 [[TMP5]], 0 -// CHECK-NEXT: store i1 [[TOBOOL3]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] -// CHECK: [[SEQCST]]: -// CHECK-NEXT: [[TMP6:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1 -// CHECK-NEXT: [[TOBOOL4:%.*]] = icmp ne i8 [[TMP6]], 0 -// CHECK-NEXT: store i1 [[TOBOOL4]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] -// CHECK: [[ATOMIC_CONTINUE]]: -// CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1 -// CHECK-NEXT: ret void -// -void test_and_set_dynamic(char *ptr, int order) { - __atomic_test_and_set(ptr, order); -} - -// CHECK-LABEL: define dso_local void @test_and_set_array( -// CHECK-SAME: ) #[[ATTR0]] { -// CHECK-NEXT: [[ENTRY:.*:]] -// CHECK-NEXT: [[X:%.*]] = alloca [10 x i32], align 4 -// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4 -// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x i32], ptr [[X]], i64 0, i64 0 -// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw volatile xchg ptr [[ARRAYDECAY]], i8 1 seq_cst, align 4 -// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP0]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 4 -// CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 4 -// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP1]] to i1 -// CHECK-NEXT: ret void -// -void test_and_set_array() { - volatile int x[10]; - __atomic_test_and_set(x, memory_order_seq_cst); -} diff --git a/clang/test/Sema/atomic-ops.c b/clang/test/Sema/atomic-ops.c index c3837cf865df8..2405f804d0da5 100644 --- a/clang/test/Sema/atomic-ops.c +++ b/clang/test/Sema/atomic-ops.c @@ -284,15 +284,11 @@ void f(_Atomic(int) *i, const _Atomic(int) *ci, const volatile int flag_k = 0; volatile int flag = 0; - (void)(int)__atomic_test_and_set(&flag_k, memory_order_seq_cst); // expected-error {{address argument to atomic operation must be a pointer to non-const type ('const volatile int *' invalid)}} + (void)(int)__atomic_test_and_set(&flag_k, memory_order_seq_cst); // expected-warning {{passing 'const volatile int *' to parameter of type 'volatile void *'}} (void)(int)__atomic_test_and_set(&flag, memory_order_seq_cst); - __atomic_clear(&flag_k, memory_order_seq_cst); // expected-error {{address argument to atomic operation must be a pointer to non-const type ('const volatile int *' invalid)}} + __atomic_clear(&flag_k, memory_order_seq_cst); // expected-warning {{passing 'const volatile int *' to parameter of type 'volatile void *'}} __atomic_clear(&flag, memory_order_seq_cst); (int)__atomic_clear(&flag, memory_order_seq_cst); // expected-error {{operand of type 'void'}} - __atomic_clear(0x8000, memory_order_seq_cst); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}} - __atomic_clear(&flag, memory_order_consume); // expected-warning {{memory order argument to atomic operation is invalid}} - __atomic_clear(&flag, memory_order_acquire); // expected-warning {{memory order argument to atomic operation is invalid}} - __atomic_clear(&flag, memory_order_acq_rel); // expected-warning {{memory order argument to atomic operation is invalid}} __c11_atomic_init(ci, 0); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}} __c11_atomic_store(ci, 0, memory_order_release); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}