diff --git a/include/swift/ABI/System.h b/include/swift/ABI/System.h index 3bad3622c9877..bd7e84d95c334 100644 --- a/include/swift/ABI/System.h +++ b/include/swift/ABI/System.h @@ -56,16 +56,36 @@ /// ``pointer & SWIFT_ABI_XXX_OBJC_RESERVED_BITS_MASK == 0 && /// pointer & SWIFT_ABI_XXX_SWIFT_SPARE_BITS_MASK != 0``. +// Weak references use a marker to tell when they are controlled by +// the ObjC runtime and when they are controlled by the Swift runtime. +// Non-ObjC platforms don't use this marker. +#define SWIFT_ABI_DEFAULT_OBJC_WEAK_REFERENCE_MARKER_MASK 0 +#define SWIFT_ABI_DEFAULT_OBJC_WEAK_REFERENCE_MARKER_VALUE 0 + /*********************************** i386 *************************************/ // Heap objects are pointer-aligned, so the low two bits are unused. #define SWIFT_ABI_I386_SWIFT_SPARE_BITS_MASK 0x00000003U +// ObjC weak reference discriminator is the LSB. +#define SWIFT_ABI_I386_OBJC_WEAK_REFERENCE_MARKER_MASK \ + (SWIFT_ABI_DEFAULT_OBJC_RESERVED_BITS_MASK | \ + 1< #include #include "swift/Runtime/Config.h" -#include "swift/Runtime/Metadata.h" #include "swift/Runtime/Unreachable.h" #ifdef SWIFT_HAVE_CRASHREPORTERCLIENT @@ -63,6 +62,12 @@ static void CRSetCrashLogMessage(const char *) {} namespace swift { +// Duplicated from Metadata.h. We want to use this header +// in places that cannot themselves include Metadata.h. +struct InProcess; +template struct TargetMetadata; +using Metadata = TargetMetadata; + // swift::crash() halts with a crash log message, // but otherwise tries not to disturb register state. @@ -87,11 +92,6 @@ static inline void _failCorruptType(const Metadata *type) { LLVM_ATTRIBUTE_NORETURN extern void fatalError(uint32_t flags, const char *format, ...); - -struct InProcess; - -template struct TargetMetadata; -using Metadata = TargetMetadata; // swift_dynamicCastFailure halts using fatalError() // with a description of a failed cast's types. @@ -112,6 +112,14 @@ swift_dynamicCastFailure(const void *sourceType, const char *sourceName, SWIFT_RUNTIME_EXPORT void swift_reportError(uint32_t flags, const char *message); +// Halt due to an overflow in swift_retain(). +LLVM_ATTRIBUTE_NORETURN LLVM_ATTRIBUTE_NOINLINE +void swift_abortRetainOverflow(); + +// Halt due to reading an unowned reference to a dead object. +LLVM_ATTRIBUTE_NORETURN LLVM_ATTRIBUTE_NOINLINE +void swift_abortRetainUnowned(const void *object); + // namespace swift } diff --git a/include/swift/Runtime/HeapObject.h b/include/swift/Runtime/HeapObject.h index b40370d5fc642..e21d417e2197d 100644 --- a/include/swift/Runtime/HeapObject.h +++ b/include/swift/Runtime/HeapObject.h @@ -219,18 +219,6 @@ SWIFT_RUNTIME_EXPORT void (*SWIFT_CC(RegisterPreservingCC) _swift_nonatomic_retain_n)(HeapObject *object, uint32_t n); -static inline void _swift_retain_inlined(HeapObject *object) { - if (object) { - object->refCount.increment(); - } -} - -static inline void _swift_nonatomic_retain_inlined(HeapObject *object) { - if (object) { - object->refCount.incrementNonAtomic(); - } -} - /// Atomically increments the reference count of an object, unless it has /// already been destroyed. Returns nil if the object is dead. SWIFT_RT_ENTRY_VISIBILITY @@ -512,22 +500,22 @@ struct UnownedReference { HeapObject *Value; }; -/// Increment the weak/unowned retain count. +/// Increment the unowned retain count. SWIFT_RT_ENTRY_VISIBILITY void swift_unownedRetain(HeapObject *value) SWIFT_CC(RegisterPreservingCC); -/// Decrement the weak/unowned retain count. +/// Decrement the unowned retain count. SWIFT_RT_ENTRY_VISIBILITY void swift_unownedRelease(HeapObject *value) SWIFT_CC(RegisterPreservingCC); -/// Increment the weak/unowned retain count by n. +/// Increment the unowned retain count by n. SWIFT_RT_ENTRY_VISIBILITY void swift_unownedRetain_n(HeapObject *value, int n) SWIFT_CC(RegisterPreservingCC); -/// Decrement the weak/unowned retain count by n. +/// Decrement the unowned retain count by n. SWIFT_RT_ENTRY_VISIBILITY void swift_unownedRelease_n(HeapObject *value, int n) SWIFT_CC(RegisterPreservingCC); @@ -540,7 +528,7 @@ void swift_unownedRetainStrong(HeapObject *value) /// Increment the strong retain count of an object which may have been /// deallocated, aborting if it has been deallocated, and decrement its -/// weak/unowned reference count. +/// unowned reference count. SWIFT_RT_ENTRY_VISIBILITY void swift_unownedRetainStrongAndRelease(HeapObject *value) SWIFT_CC(RegisterPreservingCC); @@ -614,16 +602,8 @@ static inline void swift_unownedTakeAssign(UnownedReference *dest, /****************************** WEAK REFERENCES ******************************/ /*****************************************************************************/ -/// A weak reference value object. This is ABI. -struct WeakReference { - uintptr_t Value; -}; - -/// Return true if this is a native weak reference -/// -/// \param ref - never null -/// \return true if ref is a native weak reference -bool isNativeSwiftWeakReference(WeakReference *ref); +// Defined in Runtime/WeakReference.h +class WeakReference; /// Initialize a weak reference. /// diff --git a/include/swift/Runtime/Metadata.h b/include/swift/Runtime/Metadata.h index 2e2d5e6820953..555a2b0f0bf3b 100644 --- a/include/swift/Runtime/Metadata.h +++ b/include/swift/Runtime/Metadata.h @@ -136,7 +136,7 @@ using TargetFarRelativeIndirectablePointer = typename Runtime::template FarRelativeIndirectablePointer; struct HeapObject; -struct WeakReference; +class WeakReference; template struct TargetMetadata; using Metadata = TargetMetadata; @@ -1832,17 +1832,6 @@ struct TargetObjCClassWrapperMetadata : public TargetMetadata { using ObjCClassWrapperMetadata = TargetObjCClassWrapperMetadata; -// FIXME: Workaround for rdar://problem/18889711. 'Consume' does not require -// a barrier on ARM64, but LLVM doesn't know that. Although 'relaxed' -// is formally UB by C++11 language rules, we should be OK because neither -// the processor model nor the optimizer can realistically reorder our uses -// of 'consume'. -#if __arm64__ || __arm__ -# define SWIFT_MEMORY_ORDER_CONSUME (std::memory_order_relaxed) -#else -# define SWIFT_MEMORY_ORDER_CONSUME (std::memory_order_consume) -#endif - /// The structure of metadata for foreign types where the source /// language doesn't provide any sort of more interesting metadata for /// us to use. diff --git a/stdlib/public/SwiftShims/HeapObject.h b/stdlib/public/SwiftShims/HeapObject.h index 4ad048dd3480d..4762d024b7f7b 100644 --- a/stdlib/public/SwiftShims/HeapObject.h +++ b/stdlib/public/SwiftShims/HeapObject.h @@ -31,8 +31,7 @@ typedef struct HeapMetadata HeapMetadata; // The members of the HeapObject header that are not shared by a // standard Objective-C instance #define SWIFT_HEAPOBJECT_NON_OBJC_MEMBERS \ - StrongRefCount refCount; \ - WeakRefCount weakRefCount + InlineRefCounts refCounts; /// The Swift heap-object header. struct HeapObject { @@ -48,8 +47,7 @@ struct HeapObject { // Initialize a HeapObject header as appropriate for a newly-allocated object. constexpr HeapObject(HeapMetadata const *newMetadata) : metadata(newMetadata) - , refCount(StrongRefCount::Initialized) - , weakRefCount(WeakRefCount::Initialized) + , refCounts(InlineRefCounts::Initialized) { } #endif }; @@ -59,6 +57,11 @@ static_assert(swift::IsTriviallyConstructible::value, "HeapObject must be trivially initializable"); static_assert(std::is_trivially_destructible::value, "HeapObject must be trivially destructible"); +// FIXME: small header for 32-bit +//static_assert(sizeof(HeapObject) == 2*sizeof(void*), +// "HeapObject must be two pointers long"); +static_assert(alignof(HeapObject) == alignof(void*), + "HeapObject must be pointer-aligned"); } // end namespace swift #endif diff --git a/stdlib/public/SwiftShims/RefCount.h b/stdlib/public/SwiftShims/RefCount.h index 5f3ddca5cf8d7..7b59508777409 100644 --- a/stdlib/public/SwiftShims/RefCount.h +++ b/stdlib/public/SwiftShims/RefCount.h @@ -20,24 +20,685 @@ #include "SwiftStdint.h" typedef struct { - __swift_uint32_t refCount __attribute__((__unavailable__)); -} StrongRefCount; - -typedef struct { - __swift_uint32_t weakRefCount __attribute__((__unavailable__)); -} WeakRefCount; + __swift_uint64_t refCounts __attribute__((__unavailable__)); +} InlineRefCounts; // not __cplusplus #else // __cplusplus #include +#include +#include #include #include +#include "llvm/Support/Compiler.h" #include "swift/Basic/type_traits.h" +#include "swift/Runtime/Config.h" +#include "swift/Runtime/Debug.h" + +// FIXME: Workaround for rdar://problem/18889711. 'Consume' does not require +// a barrier on ARM64, but LLVM doesn't know that. Although 'relaxed' +// is formally UB by C++11 language rules, we should be OK because neither +// the processor model nor the optimizer can realistically reorder our uses +// of 'consume'. +#if __arm64__ || __arm__ +# define SWIFT_MEMORY_ORDER_CONSUME (std::memory_order_relaxed) +#else +# define SWIFT_MEMORY_ORDER_CONSUME (std::memory_order_consume) +#endif + +/* + An object conceptually has three refcounts. These refcounts + are stored either "inline" in the field following the isa + or in a "side table entry" pointed to by the field following the isa. + + The strong RC counts strong references to the object. When the strong RC + reaches zero the object is deinited, unowned reference reads become errors, + and weak reference reads become nil. The strong RC is stored as an extra + count: when the physical field is 0 the logical value is 1. + + The unowned RC counts unowned references to the object. The unowned RC + also has an extra +1 on behalf of the strong references; this +1 is + decremented after deinit completes. When the unowned RC reaches zero + the object's allocation is freed. + + The weak RC counts weak references to the object. The weak RC also has an + extra +1 on behalf of the unowned references; this +1 is decremented + after the object's allocation is freed. When the weak RC reaches zero + the object's side table entry is freed. + + Objects initially start with no side table. They can gain a side table when: + * a weak reference is formed + and pending future implementation: + * strong RC or unowned RC overflows (inline RCs will be small on 32-bit) + * associated object storage is needed on an object + * etc + Gaining a side table entry is a one-way operation; an object with a side + table entry never loses it. This prevents some thread races. + + Strong and unowned variables point at the object. + Weak variables point at the object's side table. + + + Storage layout: + + HeapObject { + isa + InlineRefCounts { + atomic { + strong RC + unowned RC + flags + OR + HeapObjectSideTableEntry* + } + } + } + + HeapObjectSideTableEntry { + SideTableRefCounts { + object pointer + atomic { + strong RC + unowned RC + weak RC + flags + } + } + } + + InlineRefCounts and SideTableRefCounts share some implementation + via RefCounts. + + InlineRefCountBits and SideTableRefCountBits share some implementation + via RefCountBitsT. + + In general: The InlineRefCounts implementation tries to perform the + operation inline. If the object has a side table it calls the + HeapObjectSideTableEntry implementation which in turn calls the + SideTableRefCounts implementation. + Downside: this code is a bit twisted. + Upside: this code has less duplication than it might otherwise + + + Object lifecycle state machine: + + LIVE without side table + The object is alive. + Object's refcounts are initialized as 1 strong, 1 unowned, 1 weak. + No side table. No weak RC storage. + Strong variable operations work normally. + Unowned variable operations work normally. + Weak variable load can't happen. + Weak variable store adds the side table, becoming LIVE with side table. + When the strong RC reaches zero deinit() is called and the object + becomes DEINITING. + + LIVE with side table + Weak variable operations work normally. + Everything else is the same as LIVE. + + DEINITING without side table + deinit() is in progress on the object. + Strong variable operations have no effect. + Unowned variable load halts in swift_abortRetainUnowned(). + Unowned variable store works normally. + Weak variable load can't happen. + Weak variable store stores nil. + When deinit() completes, the generated code calls swift_deallocObject. + swift_deallocObject calls canBeFreedNow() checking for the fast path + of no weak or unowned references. + If canBeFreedNow() the object is freed and it becomes DEAD. + Otherwise, it decrements the unowned RC and the object becomes DEINITED. + + DEINITING with side table + Weak variable load returns nil. + Weak variable store stores nil. + canBeFreedNow() is always false, so it never transitions directly to DEAD. + Everything else is the same as DEINITING. + + DEINITED without side table + deinit() has completed but there are unowned references outstanding. + Strong variable operations can't happen. + Unowned variable store can't happen. + Unowned variable load halts in swift_abortRetainUnowned(). + Weak variable operations can't happen. + When the unowned RC reaches zero, the object is freed and it becomes DEAD. + + DEINITED with side table + Weak variable load returns nil. + Weak variable store can't happen. + When the unowned RC reaches zero, the object is freed, the weak RC is + decremented, and the object becomes FREED. + Everything else is the same as DEINITED. + + FREED without side table + This state never happens. + + FREED with side table + The object is freed but there are weak refs to the side table outstanding. + Strong variable operations can't happen. + Unowned variable operations can't happen. + Weak variable load returns nil. + Weak variable store can't happen. + When the weak RC reaches zero, the side table entry is freed and + the object becomes DEAD. + + DEAD + The object and its side table are gone. +*/ + +namespace swift { + struct HeapObject; + class HeapObjectSideTableEntry; +} + +// FIXME: HACK: copied from HeapObject.cpp +extern "C" LLVM_LIBRARY_VISIBILITY void +_swift_release_dealloc(swift::HeapObject *object) + SWIFT_CC(RegisterPreservingCC_IMPL) + __attribute__((__noinline__, __used__)); + +namespace swift { + +// RefCountIsInline: refcount stored in an object +// RefCountNotInline: refcount stored in an object's side table entry +enum RefCountInlinedness { RefCountNotInline = false, RefCountIsInline = true }; + +enum ClearPinnedFlag { DontClearPinnedFlag = false, DoClearPinnedFlag = true }; + +enum PerformDeinit { DontPerformDeinit = false, DoPerformDeinit = true }; + + +// Raw storage of refcount bits, depending on pointer size and inlinedness. +// 32-bit inline refcount is 32-bits. All others are 64-bits. + +template +struct RefCountBitsInt; + +// 64-bit inline +// 64-bit out of line +template +struct RefCountBitsInt { + typedef uint64_t Type; + typedef int64_t SignedType; +}; + +// 32-bit out of line +template <> +struct RefCountBitsInt { + typedef uint64_t Type; + typedef int64_t SignedType; +}; + +// 32-bit inline +template <> +struct RefCountBitsInt { + typedef uint32_t Type; + typedef int32_t SignedType; +}; + + +// Layout of refcount bits. +// field value = (bits & mask) >> shift +// FIXME: redo this abstraction more cleanly + +# define maskForField(name) (((uint64_t(1)< +struct RefCountBitOffsets; + +// 64-bit inline +// 64-bit out of line +// 32-bit out of line +template <> +struct RefCountBitOffsets<8> { + static const size_t IsPinnedShift = 0; + static const size_t IsPinnedBitCount = 1; + static const uint64_t IsPinnedMask = maskForField(IsPinned); + + static const size_t UnownedRefCountShift = shiftAfterField(IsPinned); + static const size_t UnownedRefCountBitCount = 31; + static const uint64_t UnownedRefCountMask = maskForField(UnownedRefCount); + + static const size_t IsDeinitingShift = shiftAfterField(UnownedRefCount); + static const size_t IsDeinitingBitCount = 1; + static const uint64_t IsDeinitingMask = maskForField(IsDeiniting); + + static const size_t StrongExtraRefCountShift = shiftAfterField(IsDeiniting); + static const size_t StrongExtraRefCountBitCount = 30; + static const uint64_t StrongExtraRefCountMask = maskForField(StrongExtraRefCount); + + static const size_t UseSlowRCShift = shiftAfterField(StrongExtraRefCount); + static const size_t UseSlowRCBitCount = 1; + static const uint64_t UseSlowRCMask = maskForField(UseSlowRC); + + static const size_t SideTableShift = 0; + static const size_t SideTableBitCount = 62; + static const uint64_t SideTableMask = maskForField(SideTable); + static const size_t SideTableUnusedLowBits = 3; + + static const size_t SideTableMarkShift = SideTableBitCount; + static const size_t SideTableMarkBitCount = 1; + static const uint64_t SideTableMarkMask = maskForField(SideTableMark); +}; + +// 32-bit inline +template <> +struct RefCountBitOffsets<4> { + static const size_t IsPinnedShift = 0; + static const size_t IsPinnedBitCount = 1; + static const uint32_t IsPinnedMask = maskForField(IsPinned); + + static const size_t UnownedRefCountShift = shiftAfterField(IsPinned); + static const size_t UnownedRefCountBitCount = 7; + static const uint32_t UnownedRefCountMask = maskForField(UnownedRefCount); + + static const size_t IsDeinitingShift = shiftAfterField(UnownedRefCount); + static const size_t IsDeinitingBitCount = 1; + static const uint32_t IsDeinitingMask = maskForField(IsDeiniting); + + static const size_t StrongExtraRefCountShift = shiftAfterField(IsDeiniting); + static const size_t StrongExtraRefCountBitCount = 22; + static const uint32_t StrongExtraRefCountMask = maskForField(StrongExtraRefCount); + + static const size_t UseSlowRCShift = shiftAfterField(StrongExtraRefCount); + static const size_t UseSlowRCBitCount = 1; + static const uint32_t UseSlowRCMask = maskForField(UseSlowRC); + + static const size_t SideTableShift = 0; + static const size_t SideTableBitCount = 30; + static const uint32_t SideTableMask = maskForField(SideTable); + static const size_t SideTableUnusedLowBits = 2; + + static const size_t SideTableMarkShift = SideTableBitCount; + static const size_t SideTableMarkBitCount = 1; + static const uint32_t SideTableMarkMask = maskForField(SideTableMark); +}; + + +/* + FIXME: reinstate these assertions + static_assert(StrongExtraRefCountShift == IsDeinitingShift + 1, + "IsDeiniting must be LSB-wards of StrongExtraRefCount"); + static_assert(UseSlowRCShift + UseSlowRCBitCount == sizeof(bits)*8, + "UseSlowRC must be MSB"); + static_assert(SideTableBitCount + SideTableMarkBitCount + + UseSlowRCBitCount == sizeof(bits)*8, + "wrong bit count for RefCountBits side table encoding"); + static_assert(UnownedRefCountBitCount + IsPinnedBitCount + + IsDeinitingBitCount + StrongExtraRefCountBitCount + + UseSlowRCBitCount == sizeof(bits)*8, + "wrong bit count for RefCountBits refcount encoding"); +*/ + + +// Basic encoding of refcount and flag data into the object's header. +template +class RefCountBitsT { + + friend class RefCountBitsT; + friend class RefCountBitsT; + + static const RefCountInlinedness Inlinedness = refcountIsInline; + + typedef typename RefCountBitsInt::Type + BitsType; + typedef typename RefCountBitsInt::SignedType + SignedBitsType; + typedef RefCountBitOffsets + Offsets; + + BitsType bits; + + // "Bitfield" accessors. + +# define getFieldIn(bits, offsets, name) \ + ((bits & offsets::name##Mask) >> offsets::name##Shift) +# define setFieldIn(bits, offsets, name, val) \ + bits = ((bits & ~offsets::name##Mask) | \ + (((BitsType(val) << offsets::name##Shift) & offsets::name##Mask))) + +# define getField(name) getFieldIn(bits, Offsets, name) +# define setField(name, val) setFieldIn(bits, Offsets, name, val) +# define copyFieldFrom(src, name) \ + setFieldIn(bits, Offsets, name, \ + getFieldIn(src.bits, decltype(src)::Offsets, name)) + + // RefCountBits uses always_inline everywhere + // to improve performance of debug builds. + + private: + LLVM_ATTRIBUTE_ALWAYS_INLINE + bool getUseSlowRC() const { + return bool(getField(UseSlowRC)); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void setUseSlowRC(bool value) { + setField(UseSlowRC, value); + } + + + // Returns true if the decrement is a fast-path result. + // Returns false if the decrement should fall back to some slow path + // (for example, because UseSlowRC is set + // or because the refcount is now zero and should deinit). + template + LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE + bool doDecrementStrongExtraRefCount(uint32_t dec) { +#ifndef NDEBUG + if (!hasSideTable()) { + // Can't check these assertions with side table present. + + // clearPinnedFlag assumes the flag is already set. + if (clearPinnedFlag) + assert(getIsPinned() && "unpinning reference that was not pinned"); + + if (getIsDeiniting()) + assert(getStrongExtraRefCount() >= dec && + "releasing reference whose refcount is already zero"); + else + assert(getStrongExtraRefCount() + 1 >= dec && + "releasing reference whose refcount is already zero"); + } +#endif + + BitsType unpin = (clearPinnedFlag + ? (BitsType(1) << Offsets::IsPinnedShift) + : 0); + // This deliberately underflows by borrowing from the UseSlowRC field. + bits -= unpin + (BitsType(dec) << Offsets::StrongExtraRefCountShift); + return (SignedBitsType(bits) >= 0); + } + + public: + + LLVM_ATTRIBUTE_ALWAYS_INLINE + RefCountBitsT() = default; + + LLVM_ATTRIBUTE_ALWAYS_INLINE + constexpr + RefCountBitsT(uint32_t strongExtraCount, uint32_t unownedCount) + : bits((BitsType(strongExtraCount) << Offsets::StrongExtraRefCountShift) | + (BitsType(unownedCount) << Offsets::UnownedRefCountShift)) + { } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + RefCountBitsT(HeapObjectSideTableEntry* side) + : bits((reinterpret_cast(side) >> Offsets::SideTableUnusedLowBits) + | (BitsType(1) << Offsets::UseSlowRCShift) + | (BitsType(1) << Offsets::SideTableMarkShift)) + { + assert(refcountIsInline); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + RefCountBitsT(RefCountBitsT newbits) { + bits = 0; + + if (refcountIsInline || sizeof(newbits) == sizeof(*this)) { + // this and newbits are both inline + // OR this is out-of-line but the same layout as inline. + // (FIXME: use something cleaner than sizeof for same-layout test) + // Copy the bits directly. + bits = newbits.bits; + } + else { + // this is out-of-line and not the same layout as inline newbits. + // Copy field-by-field. + copyFieldFrom(newbits, UnownedRefCount); + copyFieldFrom(newbits, IsPinned); + copyFieldFrom(newbits, IsDeiniting); + copyFieldFrom(newbits, StrongExtraRefCount); + copyFieldFrom(newbits, UseSlowRC); + } + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + bool hasSideTable() const { + // FIXME: change this when introducing immutable RC objects + bool hasSide = getUseSlowRC(); + + // Side table refcount must not point to another side table. + assert((refcountIsInline || !hasSide) && + "side table refcount must not have a side table entry of its own"); + + return hasSide; + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + HeapObjectSideTableEntry *getSideTable() const { + assert(hasSideTable()); + + // Stored value is a shifted pointer. + return reinterpret_cast + (uintptr_t(getField(SideTable)) << Offsets::SideTableUnusedLowBits); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + uint32_t getUnownedRefCount() const { + assert(!hasSideTable()); + return uint32_t(getField(UnownedRefCount)); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + bool getIsPinned() const { + assert(!hasSideTable()); + return bool(getField(IsPinned)); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + bool getIsDeiniting() const { + assert(!hasSideTable()); + return bool(getField(IsDeiniting)); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + uint32_t getStrongExtraRefCount() const { + assert(!hasSideTable()); + return uint32_t(getField(StrongExtraRefCount)); + } + + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void setHasSideTable(bool value) { + bits = 0; + setUseSlowRC(value); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void setSideTable(HeapObjectSideTableEntry *side) { + assert(hasSideTable()); + // Stored value is a shifted pointer. + uintptr_t value = reinterpret_cast(side); + uintptr_t storedValue = value >> Offsets::SideTableUnusedLowBits; + assert(storedValue << Offsets::SideTableUnusedLowBits == value); + setField(SideTable, storedValue); + setField(SideTableMark, 1); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void setUnownedRefCount(uint32_t value) { + assert(!hasSideTable()); + setField(UnownedRefCount, value); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void setIsPinned(bool value) { + assert(!hasSideTable()); + setField(IsPinned, value); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void setIsDeiniting(bool value) { + assert(!hasSideTable()); + setField(IsDeiniting, value); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void setStrongExtraRefCount(uint32_t value) { + assert(!hasSideTable()); + setField(StrongExtraRefCount, value); + } + + + // Returns true if the increment is a fast-path result. + // Returns false if the increment should fall back to some slow path + // (for example, because UseSlowRC is set or because the refcount overflowed). + LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE + bool incrementStrongExtraRefCount(uint32_t inc) { + // This deliberately overflows into the UseSlowRC field. + bits += BitsType(inc) << Offsets::StrongExtraRefCountShift; + return (SignedBitsType(bits) >= 0); + } + + // FIXME: I don't understand why I can't make clearPinned a template argument + // (compiler balks at calls from class RefCounts that way) + LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE + bool decrementStrongExtraRefCount(uint32_t dec, bool clearPinned = false) { + if (clearPinned) + return doDecrementStrongExtraRefCount(dec); + else + return doDecrementStrongExtraRefCount(dec); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void incrementUnownedRefCount(uint32_t inc) { + setUnownedRefCount(getUnownedRefCount() + inc); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void decrementUnownedRefCount(uint32_t dec) { + setUnownedRefCount(getUnownedRefCount() - dec); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + bool isUniquelyReferenced() { + static_assert(Offsets::IsPinnedBitCount + + Offsets::UnownedRefCountBitCount + + Offsets::IsDeinitingBitCount + + Offsets::StrongExtraRefCountBitCount + + Offsets::UseSlowRCBitCount == sizeof(bits)*8, + "inspect isUniquelyReferenced after adding fields"); + + // isPinned: don't care + // Unowned: don't care (FIXME: should care and redo initForNotFreeing) + // IsDeiniting: false + // StrongExtra: 0 + // UseSlowRC: false + + // Compiler is clever enough to optimize this. + return + !getUseSlowRC() && !getIsDeiniting() && getStrongExtraRefCount() == 0; + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + bool isUniquelyReferencedOrPinned() { + static_assert(Offsets::IsPinnedBitCount + + Offsets::UnownedRefCountBitCount + + Offsets::IsDeinitingBitCount + + Offsets::StrongExtraRefCountBitCount + + Offsets::UseSlowRCBitCount == sizeof(bits)*8, + "inspect isUniquelyReferencedOrPinned after adding fields"); + + // isPinned: don't care + // Unowned: don't care (FIXME: should care and redo initForNotFreeing) + // IsDeiniting: false + // isPinned/StrongExtra: true/any OR false/0 + // UseSlowRC: false + + // Compiler is not clever enough to optimize this. + // return (isUniquelyReferenced() || + // (!getUseSlowRC() && !getIsDeiniting() && getIsPinned())); + + // Bit twiddling solution: + // 1. Define the fields in this order: + // bits that must be zero when not pinned | bits to ignore | IsPinned + // 2. Rotate IsPinned into the sign bit: + // IsPinned | bits that must be zero when not pinned | bits to ignore + // 3. Perform a signed comparison against X = (1 << count of ignored bits). + // IsPinned makes the value negative and thus less than X. + // Zero in the must-be-zero bits makes the value less than X. + // Non-zero and not pinned makes the value greater or equal to X. + + // Count the ignored fields. + constexpr auto ignoredBitsCount = + Offsets::UnownedRefCountBitCount + Offsets::IsDeinitingBitCount; + // Make sure all fields are positioned as expected. + // -1 compensates for the rotation. + static_assert(Offsets::IsPinnedShift == 0, "IsPinned must be the LSB bit"); + static_assert( + shiftAfterField(Offsets::UnownedRefCount)-1 <= ignoredBitsCount && + shiftAfterField(Offsets::IsDeiniting)-1 <= ignoredBitsCount && + Offsets::StrongExtraRefCountShift-1 >= ignoredBitsCount && + Offsets::UseSlowRCShift-1 >= ignoredBitsCount, + "refcount bit layout incorrect for isUniquelyReferencedOrPinned"); + + BitsType X = BitsType(1) << ignoredBitsCount; + BitsType rotatedBits = ((bits >> 1) | (bits << (8*sizeof(bits) - 1))); + return SignedBitsType(rotatedBits) < SignedBitsType(X); + } + +# undef getFieldIn +# undef setFieldIn +# undef getField +# undef setField +# undef copyFieldFrom +}; + +# undef maskForField +# undef shiftAfterField + +typedef RefCountBitsT InlineRefCountBits; + +class SideTableRefCountBits : public RefCountBitsT +{ + uint32_t weakBits; + + public: + LLVM_ATTRIBUTE_ALWAYS_INLINE + SideTableRefCountBits() = default; + + LLVM_ATTRIBUTE_ALWAYS_INLINE + constexpr + SideTableRefCountBits(uint32_t strongExtraCount, uint32_t unownedCount) + : RefCountBitsT(strongExtraCount, unownedCount) + // weak refcount starts at 1 on behalf of the unowned count + , weakBits(1) + { } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + SideTableRefCountBits(HeapObjectSideTableEntry* side) = delete; + + LLVM_ATTRIBUTE_ALWAYS_INLINE + SideTableRefCountBits(InlineRefCountBits newbits) + : RefCountBitsT(newbits), weakBits(1) + { } + + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void incrementWeakRefCount() { + weakBits++; + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + bool decrementWeakRefCount() { + assert(weakBits > 0); + weakBits--; + return weakBits == 0; + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + uint32_t getWeakRefCount() { + return weakBits; + } + + // Side table ref count never has a side table of its own. + LLVM_ATTRIBUTE_ALWAYS_INLINE + bool hasSideTable() { + return false; + } +}; -// Strong reference count. // Barriers // @@ -46,70 +707,94 @@ typedef struct { // Strong refcount decrement is a release operation with respect to other // memory locations. When an object's reference count becomes zero, // an acquire fence is performed before beginning Swift deinit or ObjC -// dealloc code. This ensures that the deinit code sees all modifications +// -dealloc code. This ensures that the deinit code sees all modifications // of the object's contents that were made before the object was released. +// +// Unowned and weak increment and decrement are all unordered. +// There is no deinit equivalent for these counts so no fence is needed. +// +// Accessing the side table requires that refCounts be accessed with +// a load-consume. Only code that is guaranteed not to try dereferencing +// the side table may perform a load-relaxed of refCounts. +// Similarly, storing the new side table pointer into refCounts is a +// store-release, but most other stores into refCounts are store-relaxed. + +template +class RefCounts { + std::atomic refCounts; +#if !__LP64__ + // FIXME: hack - something somewhere is assuming a 3-word header on 32-bit + // See also other fixmes marked "small header for 32-bit" + uintptr_t unused __attribute__((unavailable)); +#endif -class StrongRefCount { - uint32_t refCount; + // Out-of-line slow paths. + + LLVM_ATTRIBUTE_NOINLINE + void incrementSlow(RefCountBits oldbits, uint32_t inc); - // The low bit is the pinned marker. - // The next bit is the deallocating marker. - // The remaining bits are the reference count. - // refCount == RC_ONE means reference count == 1. - enum : uint32_t { - RC_PINNED_FLAG = 0x1, - RC_DEALLOCATING_FLAG = 0x2, + LLVM_ATTRIBUTE_NOINLINE + void incrementNonAtomicSlow(RefCountBits oldbits, uint32_t inc); - RC_FLAGS_COUNT = 2, - RC_FLAGS_MASK = 3, - RC_COUNT_MASK = ~RC_FLAGS_MASK, + LLVM_ATTRIBUTE_NOINLINE + bool tryIncrementAndPinSlow(RefCountBits oldbits); - RC_ONE = RC_FLAGS_MASK + 1 - }; + LLVM_ATTRIBUTE_NOINLINE + bool tryIncrementAndPinNonAtomicSlow(RefCountBits); - static_assert(RC_ONE == RC_DEALLOCATING_FLAG << 1, - "deallocating bit must be adjacent to refcount bits"); - static_assert(RC_ONE == 1 << RC_FLAGS_COUNT, - "inconsistent refcount flags"); - static_assert(RC_ONE == 1 + RC_FLAGS_MASK, - "inconsistent refcount flags"); + LLVM_ATTRIBUTE_NOINLINE + bool tryIncrementSlow(RefCountBits oldbits); - public: + public: enum Initialized_t { Initialized }; - // StrongRefCount must be trivially constructible to avoid ObjC++ - // destruction overhead at runtime. Use StrongRefCount(Initialized) to produce - // an initialized instance. - StrongRefCount() = default; + // RefCounts must be trivially constructible to avoid ObjC++ + // destruction overhead at runtime. Use RefCounts(Initialized) + // to produce an initialized instance. + RefCounts() = default; // Refcount of a new object is 1. - constexpr StrongRefCount(Initialized_t) - : refCount(RC_ONE) { } + constexpr RefCounts(Initialized_t) + : refCounts(RefCountBits(0, 1)) { } void init() { - refCount = RC_ONE; + refCounts.store(RefCountBits(0, 1), std::memory_order_relaxed); } - // Increment the reference count. - void increment() { - __atomic_fetch_add(&refCount, RC_ONE, __ATOMIC_RELAXED); + // Initialize for a stack promoted object. This prevents that the final + // release frees the memory of the object. + // FIXME: need to mark these and assert they never get a side table, + // because the extra unowned ref will keep the side table alive forever + void initForNotFreeing() { + refCounts.store(RefCountBits(0, 2), std::memory_order_relaxed); } - void incrementNonAtomic() { - uint32_t val = __atomic_load_n(&refCount, __ATOMIC_RELAXED); - val += RC_ONE; - __atomic_store_n(&refCount, val, __ATOMIC_RELAXED); + // Initialize from another refcount bits. + // Only inline -> out-of-line is allowed (used for new side table entries). + void init(InlineRefCountBits newBits) { + refCounts.store(newBits, std::memory_order_relaxed); } - // Increment the reference count by n. - void increment(uint32_t n) { - __atomic_fetch_add(&refCount, n << RC_FLAGS_COUNT, __ATOMIC_RELAXED); + // Increment the reference count. + void increment(uint32_t inc = 1) { + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + RefCountBits newbits; + do { + newbits = oldbits; + bool fast = newbits.incrementStrongExtraRefCount(inc); + if (!fast) + return incrementSlow(oldbits, inc); + } while (!refCounts.compare_exchange_weak(oldbits, newbits, + std::memory_order_relaxed)); } - void incrementNonAtomic(uint32_t n) { - uint32_t val = __atomic_load_n(&refCount, __ATOMIC_RELAXED); - val += n << RC_FLAGS_COUNT; - __atomic_store_n(&refCount, val, __ATOMIC_RELAXED); + void incrementNonAtomic(uint32_t inc = 1) { + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + auto newbits = oldbits; + bool fast = newbits.incrementStrongExtraRefCount(inc); + if (!fast) + return incrementNonAtomicSlow(oldbits, inc); + refCounts.store(newbits, std::memory_order_relaxed); } // Try to simultaneously set the pinned flag and increment the @@ -122,370 +807,585 @@ class StrongRefCount { // // Postcondition: the flag is set. bool tryIncrementAndPin() { - uint32_t oldval = __atomic_load_n(&refCount, __ATOMIC_RELAXED); - while (true) { + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + RefCountBits newbits; + do { // If the flag is already set, just fail. - if (oldval & RC_PINNED_FLAG) { + if (!oldbits.hasSideTable() && oldbits.getIsPinned()) return false; - } // Try to simultaneously set the flag and increment the reference count. - uint32_t newval = oldval + (RC_PINNED_FLAG + RC_ONE); - if (__atomic_compare_exchange(&refCount, &oldval, &newval, 0, - __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { - return true; - } - - // Try again; oldval has been updated with the value we saw. - } + newbits = oldbits; + newbits.setIsPinned(true); + bool fast = newbits.incrementStrongExtraRefCount(1); + if (!fast) + return tryIncrementAndPinSlow(oldbits); + } while (!refCounts.compare_exchange_weak(oldbits, newbits, + std::memory_order_relaxed)); + return true; } bool tryIncrementAndPinNonAtomic() { - uint32_t oldval = __atomic_load_n(&refCount, __ATOMIC_RELAXED); + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + // If the flag is already set, just fail. - if (oldval & RC_PINNED_FLAG) { + if (!oldbits.hasSideTable() && oldbits.getIsPinned()) return false; - } // Try to simultaneously set the flag and increment the reference count. - uint32_t newval = oldval + (RC_PINNED_FLAG + RC_ONE); - __atomic_store_n(&refCount, newval, __ATOMIC_RELAXED); + auto newbits = oldbits; + newbits.setIsPinned(true); + bool fast = newbits.incrementStrongExtraRefCount(1); + if (!fast) + return tryIncrementAndPinNonAtomicSlow(oldbits); + refCounts.store(newbits, std::memory_order_relaxed); return true; } - // Increment the reference count, unless the object is deallocating. + // Increment the reference count, unless the object is deiniting. bool tryIncrement() { - // FIXME: this could be better on LL/SC architectures like arm64 - uint32_t oldval = __atomic_fetch_add(&refCount, RC_ONE, __ATOMIC_RELAXED); - if (oldval & RC_DEALLOCATING_FLAG) { - __atomic_fetch_sub(&refCount, RC_ONE, __ATOMIC_RELAXED); - return false; - } else { - return true; - } + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + RefCountBits newbits; + do { + if (!oldbits.hasSideTable() && oldbits.getIsDeiniting()) + return false; + + newbits = oldbits; + bool fast = newbits.incrementStrongExtraRefCount(1); + if (!fast) + return tryIncrementSlow(oldbits); + } while (!refCounts.compare_exchange_weak(oldbits, newbits, + std::memory_order_relaxed)); + return true; } // Simultaneously clear the pinned flag and decrement the reference - // count. + // count. Call _swift_release_dealloc() if the reference count goes to zero. // // Precondition: the pinned flag is set. - bool decrementAndUnpinShouldDeallocate() { - return doDecrementShouldDeallocate(); + LLVM_ATTRIBUTE_ALWAYS_INLINE + void decrementAndUnpinAndMaybeDeinit() { + doDecrement(1); } - bool decrementAndUnpinShouldDeallocateNonAtomic() { - return doDecrementShouldDeallocateNonAtomic(); + LLVM_ATTRIBUTE_ALWAYS_INLINE + void decrementAndUnpinAndMaybeDeinitNonAtomic() { + doDecrementNonAtomic(1); } // Decrement the reference count. - // Return true if the caller should now deallocate the object. - bool decrementShouldDeallocate() { - return doDecrementShouldDeallocate(); + // Return true if the caller should now deinit the object. + LLVM_ATTRIBUTE_ALWAYS_INLINE + bool decrementShouldDeinit(uint32_t dec) { + return doDecrement(dec); } - bool decrementShouldDeallocateNonAtomic() { - return doDecrementShouldDeallocateNonAtomic(); + LLVM_ATTRIBUTE_ALWAYS_INLINE + void decrementAndMaybeDeinit(uint32_t dec) { + doDecrement(dec); } - bool decrementShouldDeallocateN(uint32_t n) { - return doDecrementShouldDeallocateN(n); + LLVM_ATTRIBUTE_ALWAYS_INLINE + void decrementAndMaybeDeinitNonAtomic(uint32_t dec) { + doDecrementNonAtomic(dec); } - // Set the RC_DEALLOCATING_FLAG flag non-atomically. + // Non-atomically release the last strong reference and mark the + // object as deiniting. // // Precondition: the reference count must be 1 - void decrementFromOneAndDeallocateNonAtomic() { - assert(refCount == RC_ONE && "Expect a count of 1"); - __atomic_store_n(&refCount, RC_DEALLOCATING_FLAG, __ATOMIC_RELAXED); - } - - bool decrementShouldDeallocateNNonAtomic(uint32_t n) { - return doDecrementShouldDeallocateNNonAtomic(n); + void decrementFromOneNonAtomic() { + auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + if (bits.hasSideTable()) + return bits.getSideTable()->decrementFromOneNonAtomic(); + + assert(!bits.getIsDeiniting()); + assert(bits.getStrongExtraRefCount() == 0 && "Expect a refcount of 1"); + bits.setStrongExtraRefCount(0); + bits.setIsDeiniting(true); + refCounts.store(bits, std::memory_order_relaxed); } // Return the reference count. - // During deallocation the reference count is undefined. + // Once deinit begins the reference count is undefined. uint32_t getCount() const { - return __atomic_load_n(&refCount, __ATOMIC_RELAXED) >> RC_FLAGS_COUNT; + auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + if (bits.hasSideTable()) + return bits.getSideTable()->getCount(); + + assert(!bits.getIsDeiniting()); // FIXME: can we assert this? + return bits.getStrongExtraRefCount() + 1; } // Return whether the reference count is exactly 1. - // During deallocation the reference count is undefined. + // Once deinit begins the reference count is undefined. bool isUniquelyReferenced() const { - return getCount() == 1; + auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + if (bits.hasSideTable()) + return false; // FIXME: implement side table path if useful + + assert(!bits.getIsDeiniting()); + return bits.isUniquelyReferenced(); } // Return whether the reference count is exactly 1 or the pin flag - // is set. During deallocation the reference count is undefined. + // is set. Once deinit begins the reference count is undefined. bool isUniquelyReferencedOrPinned() const { - auto value = __atomic_load_n(&refCount, __ATOMIC_RELAXED); - // Rotating right by one sets the sign bit to the pinned bit. After - // rotation, the dealloc flag is the least significant bit followed by the - // reference count. A reference count of two or higher means that our value - // is bigger than 3 if the pinned bit is not set. If the pinned bit is set - // the value is negative. - // Note: Because we are using the sign bit for testing pinnedness it - // is important to do a signed comparison below. - static_assert(RC_PINNED_FLAG == 1, - "The pinned flag must be the lowest bit"); - auto rotateRightByOne = ((value >> 1) | (value << 31)); - return (int32_t)rotateRightByOne < (int32_t)RC_ONE; - } - - // Return true if the object is inside deallocation. - bool isDeallocating() const { - return __atomic_load_n(&refCount, __ATOMIC_RELAXED) & RC_DEALLOCATING_FLAG; - } - -private: - template - bool doDecrementShouldDeallocate() { - // If we're being asked to clear the pinned flag, we can assume - // it's already set. - constexpr uint32_t quantum = - (ClearPinnedFlag ? RC_ONE + RC_PINNED_FLAG : RC_ONE); - uint32_t newval = __atomic_sub_fetch(&refCount, quantum, __ATOMIC_RELEASE); - - assert((!ClearPinnedFlag || !(newval & RC_PINNED_FLAG)) && - "unpinning reference that was not pinned"); - assert(newval + quantum >= RC_ONE && - "releasing reference with a refcount of zero"); - - // If we didn't drop the reference count to zero, or if the - // deallocating flag is already set, we're done; don't start - // deallocation. We can assume that the pinned flag isn't set - // unless the refcount is nonzero, and or'ing it in gives us a - // more efficient mask: the check just becomes "is newval nonzero". - if ((newval & (RC_COUNT_MASK | RC_PINNED_FLAG | RC_DEALLOCATING_FLAG)) - != 0) { - // Refcount is not zero. We definitely do not need to deallocate. + auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + // FIXME: implement side table path if useful + // In the meantime we don't check it here. + // bits.isUniquelyReferencedOrPinned() checks it too, + // and the compiler optimizer does better if this check is not here. + // if (bits.hasSideTable()) + // return false; + + assert(!bits.getIsDeiniting()); + + // bits.isUniquelyReferencedOrPinned() also checks the side table bit + // and this path is optimized better if we don't check it here first. + if (bits.isUniquelyReferencedOrPinned()) return true; + if (!bits.hasSideTable()) return false; - } + return bits.getSideTable()->isUniquelyReferencedOrPinned(); + } - // Refcount is now 0 and is not already deallocating. Try to set - // the deallocating flag. This must be atomic because it can race - // with weak retains. - // - // This also performs the before-deinit acquire barrier if we set the flag. - static_assert(RC_FLAGS_COUNT == 2, - "fix decrementShouldDeallocate() if you add more flags"); - uint32_t oldval = 0; - newval = RC_DEALLOCATING_FLAG; - return __atomic_compare_exchange(&refCount, &oldval, &newval, 0, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); - } - - template - bool doDecrementShouldDeallocateNonAtomic() { - // If we're being asked to clear the pinned flag, we can assume - // it's already set. - constexpr uint32_t quantum = - (ClearPinnedFlag ? RC_ONE + RC_PINNED_FLAG : RC_ONE); - uint32_t val = __atomic_load_n(&refCount, __ATOMIC_RELAXED); - val -= quantum; - __atomic_store_n(&refCount, val, __ATOMIC_RELEASE); - uint32_t newval = refCount; - - assert((!ClearPinnedFlag || !(newval & RC_PINNED_FLAG)) && - "unpinning reference that was not pinned"); - assert(newval + quantum >= RC_ONE && - "releasing reference with a refcount of zero"); - - // If we didn't drop the reference count to zero, or if the - // deallocating flag is already set, we're done; don't start - // deallocation. We can assume that the pinned flag isn't set - // unless the refcount is nonzero, and or'ing it in gives us a - // more efficient mask: the check just becomes "is newval nonzero". - if ((newval & (RC_COUNT_MASK | RC_PINNED_FLAG | RC_DEALLOCATING_FLAG)) - != 0) { - // Refcount is not zero. We definitely do not need to deallocate. - return false; - } + // Return true if the object has started deiniting. + bool isDeiniting() const { + auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + if (bits.hasSideTable()) + return bits.getSideTable()->isDeiniting(); + else + return bits.getIsDeiniting(); + } - // Refcount is now 0 and is not already deallocating. Try to set - // the deallocating flag. This must be atomic because it can race - // with weak retains. - // - // This also performs the before-deinit acquire barrier if we set the flag. - static_assert(RC_FLAGS_COUNT == 2, - "fix decrementShouldDeallocate() if you add more flags"); - uint32_t oldval = 0; - newval = RC_DEALLOCATING_FLAG; - return __atomic_compare_exchange(&refCount, &oldval, &newval, 0, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); - } - - template - bool doDecrementShouldDeallocateN(uint32_t n) { - // If we're being asked to clear the pinned flag, we can assume - // it's already set. - uint32_t delta = (n << RC_FLAGS_COUNT) + (ClearPinnedFlag ? RC_PINNED_FLAG : 0); - uint32_t newval = __atomic_sub_fetch(&refCount, delta, __ATOMIC_RELEASE); - - assert((!ClearPinnedFlag || !(newval & RC_PINNED_FLAG)) && - "unpinning reference that was not pinned"); - assert(newval + delta >= RC_ONE && - "releasing reference with a refcount of zero"); - - // If we didn't drop the reference count to zero, or if the - // deallocating flag is already set, we're done; don't start - // deallocation. We can assume that the pinned flag isn't set - // unless the refcount is nonzero, and or'ing it in gives us a - // more efficient mask: the check just becomes "is newval nonzero". - if ((newval & (RC_COUNT_MASK | RC_PINNED_FLAG | RC_DEALLOCATING_FLAG)) - != 0) { - // Refcount is not zero. We definitely do not need to deallocate. - return false; - } + /// Return true if the object can be freed directly right now. + /// (transition DEINITING -> DEAD) + /// This is used in swift_deallocObject(). + /// Can be freed now means: + /// no side table + /// unowned reference count is 1 + /// The object is assumed to be deiniting with no strong references already. + bool canBeFreedNow() const { + auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + return (!bits.hasSideTable() && + bits.getIsDeiniting() && + bits.getStrongExtraRefCount() == 0 && + bits.getUnownedRefCount() == 1); + } - // Refcount is now 0 and is not already deallocating. Try to set - // the deallocating flag. This must be atomic because it can race - // with weak retains. - // - // This also performs the before-deinit acquire barrier if we set the flag. - static_assert(RC_FLAGS_COUNT == 2, - "fix decrementShouldDeallocate() if you add more flags"); - uint32_t oldval = 0; - newval = RC_DEALLOCATING_FLAG; - return __atomic_compare_exchange(&refCount, &oldval, &newval, 0, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); - } - - template - bool doDecrementShouldDeallocateNNonAtomic(uint32_t n) { - // If we're being asked to clear the pinned flag, we can assume - // it's already set. - uint32_t delta = (n << RC_FLAGS_COUNT) + (ClearPinnedFlag ? RC_PINNED_FLAG : 0); - uint32_t val = __atomic_load_n(&refCount, __ATOMIC_RELAXED); - val -= delta; - __atomic_store_n(&refCount, val, __ATOMIC_RELEASE); - uint32_t newval = val; - - assert((!ClearPinnedFlag || !(newval & RC_PINNED_FLAG)) && - "unpinning reference that was not pinned"); - assert(newval + delta >= RC_ONE && - "releasing reference with a refcount of zero"); - - // If we didn't drop the reference count to zero, or if the - // deallocating flag is already set, we're done; don't start - // deallocation. We can assume that the pinned flag isn't set - // unless the refcount is nonzero, and or'ing it in gives us a - // more efficient mask: the check just becomes "is newval nonzero". - if ((newval & (RC_COUNT_MASK | RC_PINNED_FLAG | RC_DEALLOCATING_FLAG)) - != 0) { - // Refcount is not zero. We definitely do not need to deallocate. - return false; + private: + + // Second slow path of doDecrement, where the + // object may have a side table entry. + template + bool doDecrementSideTable(RefCountBits oldbits, uint32_t dec); + + // First slow path of doDecrement, where the object may need to be deinited. + // Side table is handled in the second slow path, doDecrementSideTable(). + template + bool doDecrementSlow(RefCountBits oldbits, uint32_t dec) { + RefCountBits newbits; + + bool deinitNow; + do { + newbits = oldbits; + + bool fast = newbits.decrementStrongExtraRefCount(dec, clearPinnedFlag); + if (fast) { + // Decrement completed normally. New refcount is not zero. + deinitNow = false; + } + else if (oldbits.hasSideTable()) { + // Decrement failed because we're on some other slow path. + return doDecrementSideTable(oldbits, dec); + } + else { + // Decrement underflowed. Begin deinit. + // LIVE -> DEINITING + deinitNow = true; + assert(!oldbits.getIsDeiniting()); // FIXME: make this an error? + newbits = oldbits; // Undo failed decrement of newbits. + newbits.setStrongExtraRefCount(0); + newbits.setIsDeiniting(true); + if (clearPinnedFlag) + newbits.setIsPinned(false); + } + } while (!refCounts.compare_exchange_weak(oldbits, newbits, + std::memory_order_release, + std::memory_order_relaxed)); + if (performDeinit && deinitNow) { + std::atomic_thread_fence(std::memory_order_acquire); + _swift_release_dealloc(getHeapObject()); } - // Refcount is now 0 and is not already deallocating. Try to set - // the deallocating flag. This must be atomic because it can race - // with weak retains. - // - // This also performs the before-deinit acquire barrier if we set the flag. - static_assert(RC_FLAGS_COUNT == 2, - "fix decrementShouldDeallocate() if you add more flags"); - uint32_t oldval = 0; - newval = RC_DEALLOCATING_FLAG; - return __atomic_compare_exchange(&refCount, &oldval, &newval, 0, - __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); + return deinitNow; } -}; + + public: // FIXME: access control hack + + // Fast path of atomic strong decrement. + // + // Deinit is optionally handled directly instead of always deferring to + // the caller because the compiler can optimize this arrangement better. + template + bool doDecrement(uint32_t dec) { + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + RefCountBits newbits; + + do { + newbits = oldbits; + bool fast = newbits.decrementStrongExtraRefCount(dec, clearPinnedFlag); + if (!fast) + // Slow paths include side table; deinit; underflow + return doDecrementSlow(oldbits, dec); + } while (!refCounts.compare_exchange_weak(oldbits, newbits, + std::memory_order_release, + std::memory_order_relaxed)); + + return false; // don't deinit + } + + private: + // This is independently specialized below for inline and out-of-line use. + template + bool doDecrementNonAtomic(uint32_t dec); -// Weak reference count. -class WeakRefCount { - uint32_t refCount; + // UNOWNED + + public: + // Increment the unowned reference count. + void incrementUnowned(uint32_t inc) { + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + RefCountBits newbits; + do { + if (oldbits.hasSideTable()) + return oldbits.getSideTable()->incrementUnowned(inc); + + newbits = oldbits; + assert(newbits.getUnownedRefCount() != 0); + newbits.incrementUnownedRefCount(inc); + // FIXME: overflow check? + } while (!refCounts.compare_exchange_weak(oldbits, newbits, + std::memory_order_relaxed)); + } - enum : uint32_t { - // There isn't really a flag here. - RC_UNUSED_FLAG = 1, + // Decrement the unowned reference count. + // Return true if the caller should free the object. + bool decrementUnownedShouldFree(uint32_t dec) { + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + RefCountBits newbits; + + bool performFree; + do { + if (oldbits.hasSideTable()) + return oldbits.getSideTable()->decrementUnownedShouldFree(dec); + + newbits = oldbits; + newbits.decrementUnownedRefCount(dec); + if (newbits.getUnownedRefCount() == 0) { + // DEINITED -> FREED or DEINITED -> DEAD + // Caller will free the object. Weak decrement is handled by + // HeapObjectSideTableEntry::decrementUnownedShouldFree. + assert(newbits.getIsDeiniting()); + performFree = true; + } else { + performFree = false; + } + // FIXME: underflow check? + } while (!refCounts.compare_exchange_weak(oldbits, newbits, + std::memory_order_relaxed)); + return performFree; + } - RC_FLAGS_COUNT = 1, - RC_FLAGS_MASK = 1, - RC_COUNT_MASK = ~RC_FLAGS_MASK, + // Return unowned reference count. + // Note that this is not equal to the number of outstanding unowned pointers. + uint32_t getUnownedCount() const { + auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + if (bits.hasSideTable()) + return bits.getSideTable()->getUnownedCount(); + else + return bits.getUnownedRefCount(); + } - RC_ONE = RC_FLAGS_MASK + 1 - }; - static_assert(RC_ONE == 1 << RC_FLAGS_COUNT, - "inconsistent refcount flags"); - static_assert(RC_ONE == 1 + RC_FLAGS_MASK, - "inconsistent refcount flags"); + // WEAK + + public: + // Returns the object's side table entry (creating it if necessary) with + // its weak ref count incremented. + // Returns nullptr if the object is already deiniting. + // Use this when creating a new weak reference to an object. + HeapObjectSideTableEntry* formWeakReference(); + + // Increment the weak reference count. + void incrementWeak() { + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + RefCountBits newbits; + do { + newbits = oldbits; + assert(newbits.getWeakRefCount() != 0); + newbits.incrementWeakRefCount(); + // FIXME: overflow check + } while (!refCounts.compare_exchange_weak(oldbits, newbits, + std::memory_order_relaxed)); + } + + bool decrementWeakShouldCleanUp() { + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + RefCountBits newbits; + + bool performFree; + do { + newbits = oldbits; + performFree = newbits.decrementWeakRefCount(); + } while (!refCounts.compare_exchange_weak(oldbits, newbits, + std::memory_order_relaxed)); + + return performFree; + } + + // Return weak reference count. + // Note that this is not equal to the number of outstanding weak pointers. + uint32_t getWeakCount() const { + auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + if (bits.hasSideTable()) { + return bits.getSideTable()->getWeakCount(); + } else { + // No weak refcount storage. Return only the weak increment held + // on behalf of the unowned count. + return bits.getUnownedRefCount() ? 1 : 0; + } + } - public: - enum Initialized_t { Initialized }; - // WeakRefCount must be trivially constructible to avoid ObjC++ - // destruction overhead at runtime. Use WeakRefCount(Initialized) to produce - // an initialized instance. - WeakRefCount() = default; + private: + HeapObject *getHeapObject() const; - // Weak refcount of a new object is 1. - constexpr WeakRefCount(Initialized_t) - : refCount(RC_ONE) { } + HeapObjectSideTableEntry* allocateSideTable(); +}; - void init() { - refCount = RC_ONE; +typedef RefCounts InlineRefCounts; +typedef RefCounts SideTableRefCounts; + +static_assert(swift::IsTriviallyConstructible::value, + "InlineRefCounts must be trivially initializable"); +static_assert(std::is_trivially_destructible::value, + "InlineRefCounts must be trivially destructible"); + +/* FIXME: small header for 32-bit +static_assert(sizeof(InlineRefCounts) == sizeof(uintptr_t), + "InlineRefCounts must be pointer-sized"); +static_assert(alignof(InlineRefCounts) == alignof(uintptr_t), +"InlineRefCounts must be pointer-aligned"); +*/ + + +class HeapObjectSideTableEntry { + // FIXME: does object need to be atomic? + std::atomic object; + SideTableRefCounts refCounts; + + public: + HeapObjectSideTableEntry(HeapObject *newObject) + : object(newObject), refCounts() + { } + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Winvalid-offsetof" + static ptrdiff_t refCountsOffset() { + return offsetof(HeapObjectSideTableEntry, refCounts); } +#pragma clang diagnostic pop - /// Initialize for a stack promoted object. This prevents that the final - /// release frees the memory of the object. - void initForNotDeallocating() { - refCount = RC_ONE + RC_ONE; + HeapObject* tryRetain() { + if (refCounts.tryIncrement()) + return object.load(std::memory_order_relaxed); + else + return nullptr; } - // Increment the weak reference count. - void increment() { - uint32_t newval = __atomic_add_fetch(&refCount, RC_ONE, __ATOMIC_RELAXED); - assert(newval >= RC_ONE && "weak refcount overflow"); - (void)newval; + void initRefCounts(InlineRefCountBits newbits) { + refCounts.init(newbits); } - /// Increment the weak reference count by n. - void increment(uint32_t n) { - uint32_t addval = (n << RC_FLAGS_COUNT); - uint32_t newval = __atomic_add_fetch(&refCount, addval, __ATOMIC_RELAXED); - assert(newval >= addval && "weak refcount overflow"); - (void)newval; + HeapObject *unsafeGetObject() const { + return object.load(std::memory_order_relaxed); } - // Decrement the weak reference count. - // Return true if the caller should deallocate the object. - bool decrementShouldDeallocate() { - uint32_t oldval = __atomic_fetch_sub(&refCount, RC_ONE, __ATOMIC_RELAXED); - assert(oldval >= RC_ONE && "weak refcount underflow"); + // STRONG + + void incrementStrong(uint32_t inc) { + refCounts.increment(inc); + } - // Should dealloc if count was 1 before decrementing (i.e. it is zero now) - return (oldval & RC_COUNT_MASK) == RC_ONE; + template + bool decrementStrong(uint32_t dec) { + return refCounts.doDecrement(dec); } - /// Decrement the weak reference count. - /// Return true if the caller should deallocate the object. - bool decrementShouldDeallocateN(uint32_t n) { - uint32_t subval = (n << RC_FLAGS_COUNT); - uint32_t oldval = __atomic_fetch_sub(&refCount, subval, __ATOMIC_RELAXED); - assert(oldval >= subval && "weak refcount underflow"); + void decrementFromOneNonAtomic() { + // FIXME: can there be a non-atomic implementation? + decrementStrong(1); + } + + bool isDeiniting() const { + return refCounts.isDeiniting(); + } - // Should dealloc if count was subval before decrementing (i.e. it is zero now) - return (oldval & RC_COUNT_MASK) == subval; + bool tryIncrement() { + return refCounts.tryIncrement(); + } + + bool tryIncrementAndPin() { + return refCounts.tryIncrementAndPin(); } - // Return weak reference count. - // Note that this is not equal to the number of outstanding weak pointers. uint32_t getCount() const { - return __atomic_load_n(&refCount, __ATOMIC_RELAXED) >> RC_FLAGS_COUNT; + return refCounts.getCount(); + } + + bool isUniquelyReferencedOrPinned() const { + return refCounts.isUniquelyReferencedOrPinned(); + } + + // UNOWNED + + void incrementUnowned(uint32_t inc) { + return refCounts.incrementUnowned(inc); + } + + bool decrementUnownedShouldFree(uint32_t dec) { + bool shouldFree = refCounts.decrementUnownedShouldFree(dec); + if (shouldFree) { + // DEINITED -> FREED + // Caller will free the object. + decrementWeak(); + } + + return shouldFree; + } + + uint32_t getUnownedCount() const { + return refCounts.getUnownedCount(); + } + + + // WEAK + + LLVM_NODISCARD + HeapObjectSideTableEntry* incrementWeak() { + // incrementWeak need not be atomic w.r.t. concurrent deinit initiation. + // The client can't actually get a reference to the object without + // going through tryRetain(). tryRetain is the one that needs to be + // atomic w.r.t. concurrent deinit initiation. + // The check here is merely an optimization. + if (refCounts.isDeiniting()) + return nullptr; + refCounts.incrementWeak(); + return this; + } + + void decrementWeak() { + // FIXME: assertions + // FIXME: optimize barriers + bool cleanup = refCounts.decrementWeakShouldCleanUp(); + if (!cleanup) + return; + + // Weak ref count is now zero. Delete the side table entry. + // FREED -> DEAD + assert(refCounts.getUnownedCount() == 0); + delete this; + } + + uint32_t getWeakCount() const { + return refCounts.getWeakCount(); } }; -static_assert(swift::IsTriviallyConstructible::value, - "StrongRefCount must be trivially initializable"); -static_assert(swift::IsTriviallyConstructible::value, - "WeakRefCount must be trivially initializable"); -static_assert(std::is_trivially_destructible::value, - "StrongRefCount must be trivially destructible"); -static_assert(std::is_trivially_destructible::value, - "WeakRefCount must be trivially destructible"); + +// Inline version of non-atomic strong decrement. +// This version can actually be non-atomic. +template <> +template +LLVM_ATTRIBUTE_ALWAYS_INLINE +inline bool RefCounts::doDecrementNonAtomic(uint32_t dec) { + + // We can get away without atomicity here. + // The caller claims that there are no other threads with strong references + // to this object. + // We can non-atomically check that there are no outstanding unowned or + // weak references, and if nobody else has a strong reference then + // nobody else can form a new unowned or weak reference. + // Therefore there is no other thread that can be concurrently + // manipulating this object's retain counts. + + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + + // Use slow path if we can't guarantee atomicity. + if (oldbits.hasSideTable() || oldbits.getUnownedRefCount() != 1) + return doDecrementSlow(oldbits, dec); + + auto newbits = oldbits; + bool fast = newbits.decrementStrongExtraRefCount(dec, clearPinnedFlag); + if (!fast) + return doDecrementSlow(oldbits, dec); + + refCounts.store(newbits, std::memory_order_relaxed); + return false; // don't deinit +} + +// Out-of-line version of non-atomic strong decrement. +// This version needs to be atomic because of the +// threat of concurrent read of a weak reference. +template <> +template +inline bool RefCounts:: +doDecrementNonAtomic(uint32_t dec) { + return doDecrement(dec); +} + + +template <> +template +inline bool RefCounts:: +doDecrementSideTable(InlineRefCountBits oldbits, uint32_t dec) { + auto side = oldbits.getSideTable(); + return side->decrementStrong(dec); +} + +template <> +template +inline bool RefCounts:: +doDecrementSideTable(SideTableRefCountBits oldbits, uint32_t dec) { + swift::crash("side table refcount must not have " + "a side table entry of its own"); +} + + +template <> inline +HeapObject* RefCounts::getHeapObject() const { + auto offset = sizeof(void *); + auto prefix = ((char *)this - offset); + return (HeapObject *)prefix; +} + +template <> inline +HeapObject* RefCounts::getHeapObject() const { + auto offset = HeapObjectSideTableEntry::refCountsOffset(); + auto prefix = ((char *)this - offset); + return *(HeapObject **)prefix; +} + + +// namespace swift +} + +// for use by SWIFT_HEAPOBJECT_NON_OBJC_MEMBERS +typedef swift::InlineRefCounts InlineRefCounts; // __cplusplus #endif diff --git a/stdlib/public/runtime/CMakeLists.txt b/stdlib/public/runtime/CMakeLists.txt index 0d967adfb5f83..19bb36147fa98 100644 --- a/stdlib/public/runtime/CMakeLists.txt +++ b/stdlib/public/runtime/CMakeLists.txt @@ -53,6 +53,7 @@ set(swift_runtime_sources Once.cpp Portability.cpp ProtocolConformance.cpp + RefCount.cpp RuntimeEntrySymbols.cpp) # Acknowledge that the following sources are known. diff --git a/stdlib/public/runtime/Errors.cpp b/stdlib/public/runtime/Errors.cpp index c50ad13e5d602..b7f0a5094026d 100644 --- a/stdlib/public/runtime/Errors.cpp +++ b/stdlib/public/runtime/Errors.cpp @@ -272,3 +272,25 @@ swift_deletedMethodError() { swift::fatalError(/* flags = */ 0, "fatal error: call of deleted method\n"); } + + +// Crash due to a retain count overflow. +// FIXME: can't pass the object's address from InlineRefCounts without hacks +void swift::swift_abortRetainOverflow() { + swift::fatalError(FatalErrorFlags::ReportBacktrace, + "fatal error: object was retained too many times"); +} + +// Crash due to retain of a dead unowned reference. +// FIXME: can't pass the object's address from InlineRefCounts without hacks +void swift::swift_abortRetainUnowned(const void *object) { + if (object) { + swift::fatalError(FatalErrorFlags::ReportBacktrace, + "fatal error: attempted to read an unowned reference but " + "object %p was already deallocated", object); + } else { + swift::fatalError(FatalErrorFlags::ReportBacktrace, + "fatal error: attempted to read an unowned reference but " + "the object was already deallocated"); + } +} diff --git a/stdlib/public/runtime/HeapObject.cpp b/stdlib/public/runtime/HeapObject.cpp index 75f2fb4d9ff1b..402c9d4a1f6c9 100644 --- a/stdlib/public/runtime/HeapObject.cpp +++ b/stdlib/public/runtime/HeapObject.cpp @@ -23,6 +23,7 @@ #include "llvm/Support/MathExtras.h" #include "MetadataCache.h" #include "Private.h" +#include "WeakReference.h" #include "swift/Runtime/Debug.h" #include #include @@ -63,8 +64,7 @@ SWIFT_RT_ENTRY_IMPL(swift_allocObject)(HeapMetadata const *metadata, requiredAlignmentMask)); // FIXME: this should be a placement new but that adds a null check object->metadata = metadata; - object->refCount.init(); - object->weakRefCount.init(); + object->refCounts.init(); // If leak tracking is enabled, start tracking this object. SWIFT_LEAKS_START_TRACKING_OBJECT(object); @@ -76,8 +76,7 @@ HeapObject * swift::swift_initStackObject(HeapMetadata const *metadata, HeapObject *object) { object->metadata = metadata; - object->refCount.init(); - object->weakRefCount.initForNotDeallocating(); + object->refCounts.initForNotFreeing(); return object; @@ -85,13 +84,17 @@ swift::swift_initStackObject(HeapMetadata const *metadata, void swift::swift_verifyEndOfLifetime(HeapObject *object) { - if (object->refCount.getCount() != 0) + if (object->refCounts.getCount() != 0) swift::fatalError(/* flags = */ 0, "fatal error: stack object escaped\n"); - if (object->weakRefCount.getCount() != 1) + if (object->refCounts.getUnownedCount() != 1) swift::fatalError(/* flags = */ 0, - "fatal error: weak/unowned reference to stack object\n"); + "fatal error: unowned reference to stack object\n"); + + if (object->refCounts.getWeakCount() != 0) + swift::fatalError(/* flags = */ 0, + "fatal error: weak reference to stack object\n"); } /// \brief Allocate a reference-counted object on the heap that @@ -203,7 +206,8 @@ void swift::swift_nonatomic_retain(HeapObject *object) { SWIFT_RT_ENTRY_IMPL_VISIBILITY extern "C" void SWIFT_RT_ENTRY_IMPL(swift_nonatomic_retain)(HeapObject *object) { - _swift_nonatomic_retain_inlined(object); + if (object) + object->refCounts.incrementNonAtomic(1); } void swift::swift_nonatomic_release(HeapObject *object) { @@ -213,17 +217,16 @@ void swift::swift_nonatomic_release(HeapObject *object) { SWIFT_RT_ENTRY_IMPL_VISIBILITY extern "C" void SWIFT_RT_ENTRY_IMPL(swift_nonatomic_release)(HeapObject *object) { - if (object && object->refCount.decrementShouldDeallocateNonAtomic()) { - // TODO: Use non-atomic _swift_release_dealloc? - _swift_release_dealloc(object); - } + if (object) + object->refCounts.decrementAndMaybeDeinitNonAtomic(1); } SWIFT_RT_ENTRY_IMPL_VISIBILITY extern "C" void SWIFT_RT_ENTRY_IMPL(swift_retain)(HeapObject *object) SWIFT_CC(RegisterPreservingCC_IMPL) { - _swift_retain_inlined(object); + if (object) + object->refCounts.increment(1); } void swift::swift_retain_n(HeapObject *object, uint32_t n) @@ -235,9 +238,8 @@ SWIFT_RT_ENTRY_IMPL_VISIBILITY extern "C" void SWIFT_RT_ENTRY_IMPL(swift_retain_n)(HeapObject *object, uint32_t n) SWIFT_CC(RegisterPreservingCC_IMPL) { - if (object) { - object->refCount.increment(n); - } + if (object) + object->refCounts.increment(n); } void swift::swift_nonatomic_retain_n(HeapObject *object, uint32_t n) @@ -249,9 +251,8 @@ SWIFT_RT_ENTRY_IMPL_VISIBILITY extern "C" void SWIFT_RT_ENTRY_IMPL(swift_nonatomic_retain_n)(HeapObject *object, uint32_t n) SWIFT_CC(RegisterPreservingCC_IMPL) { - if (object) { - object->refCount.incrementNonAtomic(n); - } + if (object) + object->refCounts.incrementNonAtomic(n); } void swift::swift_release(HeapObject *object) @@ -263,9 +264,8 @@ SWIFT_RT_ENTRY_IMPL_VISIBILITY extern "C" void SWIFT_RT_ENTRY_IMPL(swift_release)(HeapObject *object) SWIFT_CC(RegisterPreservingCC_IMPL) { - if (object && object->refCount.decrementShouldDeallocate()) { - _swift_release_dealloc(object); - } + if (object) + object->refCounts.decrementAndMaybeDeinit(1); } void swift::swift_release_n(HeapObject *object, uint32_t n) @@ -277,13 +277,12 @@ SWIFT_RT_ENTRY_IMPL_VISIBILITY extern "C" void SWIFT_RT_ENTRY_IMPL(swift_release_n)(HeapObject *object, uint32_t n) SWIFT_CC(RegisterPreservingCC_IMPL) { - if (object && object->refCount.decrementShouldDeallocateN(n)) { - _swift_release_dealloc(object); - } + if (object) + object->refCounts.decrementAndMaybeDeinit(n); } void swift::swift_setDeallocating(HeapObject *object) { - object->refCount.decrementFromOneAndDeallocateNonAtomic(); + object->refCounts.decrementFromOneNonAtomic(); } void swift::swift_nonatomic_release_n(HeapObject *object, uint32_t n) @@ -295,17 +294,16 @@ SWIFT_RT_ENTRY_IMPL_VISIBILITY extern "C" void SWIFT_RT_ENTRY_IMPL(swift_nonatomic_release_n)(HeapObject *object, uint32_t n) SWIFT_CC(RegisterPreservingCC_IMPL) { - if (object && object->refCount.decrementShouldDeallocateNNonAtomic(n)) { - _swift_release_dealloc(object); - } + if (object) + object->refCounts.decrementAndMaybeDeinitNonAtomic(n); } size_t swift::swift_retainCount(HeapObject *object) { - return object->refCount.getCount(); + return object->refCounts.getCount(); } size_t swift::swift_unownedRetainCount(HeapObject *object) { - return object->weakRefCount.getCount(); + return object->refCounts.getUnownedCount(); } void swift::swift_unownedRetain(HeapObject *object) @@ -313,7 +311,7 @@ void swift::swift_unownedRetain(HeapObject *object) if (!object) return; - object->weakRefCount.increment(); + object->refCounts.incrementUnowned(1); } void swift::swift_unownedRelease(HeapObject *object) @@ -321,12 +319,13 @@ void swift::swift_unownedRelease(HeapObject *object) if (!object) return; - if (object->weakRefCount.decrementShouldDeallocate()) { - // Only class objects can be weak-retained and weak-released. - auto metadata = object->metadata; - assert(metadata->isClassObject()); - auto classMetadata = static_cast(metadata); - assert(classMetadata->isTypeMetadata()); + // Only class objects can be unowned-retained and unowned-released. + assert(object->metadata->isClassObject()); + assert(static_cast(object->metadata)->isTypeMetadata()); + + if (object->refCounts.decrementUnownedShouldFree(1)) { + auto classMetadata = static_cast(object->metadata); + SWIFT_RT_ENTRY_CALL(swift_slowDealloc) (object, classMetadata->getInstanceSize(), classMetadata->getInstanceAlignMask()); @@ -338,7 +337,7 @@ void swift::swift_unownedRetain_n(HeapObject *object, int n) if (!object) return; - object->weakRefCount.increment(n); + object->refCounts.incrementUnowned(n); } void swift::swift_unownedRelease_n(HeapObject *object, int n) @@ -346,12 +345,12 @@ void swift::swift_unownedRelease_n(HeapObject *object, int n) if (!object) return; - if (object->weakRefCount.decrementShouldDeallocateN(n)) { - // Only class objects can be weak-retained and weak-released. - auto metadata = object->metadata; - assert(metadata->isClassObject()); - auto classMetadata = static_cast(metadata); - assert(classMetadata->isTypeMetadata()); + // Only class objects can be unowned-retained and unowned-released. + assert(object->metadata->isClassObject()); + assert(static_cast(object->metadata)->isTypeMetadata()); + + if (object->refCounts.decrementUnownedShouldFree(n)) { + auto classMetadata = static_cast(object->metadata); SWIFT_RT_ENTRY_CALL(swift_slowDealloc) (object, classMetadata->getInstanceSize(), classMetadata->getInstanceAlignMask()); @@ -364,9 +363,8 @@ HeapObject *swift::swift_tryPin(HeapObject *object) // Try to set the flag. If this succeeds, the caller will be // responsible for clearing it. - if (object->refCount.tryIncrementAndPin()) { + if (object->refCounts.tryIncrementAndPin()) return object; - } // If setting the flag failed, it's because it was already set. // Return nil so that the object will be deallocated later. @@ -375,9 +373,8 @@ HeapObject *swift::swift_tryPin(HeapObject *object) void swift::swift_unpin(HeapObject *object) SWIFT_CC(RegisterPreservingCC_IMPL) { - if (object && object->refCount.decrementAndUnpinShouldDeallocate()) { - _swift_release_dealloc(object); - } + if (object) + object->refCounts.decrementAndUnpinAndMaybeDeinit(); } HeapObject *swift::swift_tryRetain(HeapObject *object) @@ -391,9 +388,8 @@ HeapObject *swift::swift_nonatomic_tryPin(HeapObject *object) // Try to set the flag. If this succeeds, the caller will be // responsible for clearing it. - if (object->refCount.tryIncrementAndPinNonAtomic()) { + if (object->refCounts.tryIncrementAndPinNonAtomic()) return object; - } // If setting the flag failed, it's because it was already set. // Return nil so that the object will be deallocated later. @@ -402,9 +398,8 @@ HeapObject *swift::swift_nonatomic_tryPin(HeapObject *object) void swift::swift_nonatomic_unpin(HeapObject *object) SWIFT_CC(RegisterPreservingCC_IMPL) { - if (object && object->refCount.decrementAndUnpinShouldDeallocateNonAtomic()) { - _swift_release_dealloc(object); - } + if (object) + object->refCounts.decrementAndUnpinAndMaybeDeinitNonAtomic(); } SWIFT_RT_ENTRY_IMPL_VISIBILITY @@ -414,7 +409,7 @@ HeapObject *SWIFT_RT_ENTRY_IMPL(swift_tryRetain)(HeapObject *object) if (!object) return nullptr; - if (object->refCount.tryIncrement()) return object; + if (object->refCounts.tryIncrement()) return object; else return nullptr; } @@ -426,47 +421,48 @@ bool swift_isDeallocating(HeapObject *object) { SWIFT_RT_ENTRY_IMPL_VISIBILITY extern "C" bool SWIFT_RT_ENTRY_IMPL(swift_isDeallocating)(HeapObject *object) { - if (!object) return false; - return object->refCount.isDeallocating(); + if (!object) + return false; + return object->refCounts.isDeiniting(); } void swift::swift_unownedRetainStrong(HeapObject *object) SWIFT_CC(RegisterPreservingCC_IMPL) { if (!object) return; - assert(object->weakRefCount.getCount() && - "object is not currently weakly retained"); + assert(object->refCounts.getUnownedCount() && + "object is not currently unowned-retained"); - if (! object->refCount.tryIncrement()) - _swift_abortRetainUnowned(object); + if (! object->refCounts.tryIncrement()) + swift::swift_abortRetainUnowned(object); } void swift::swift_unownedRetainStrongAndRelease(HeapObject *object) SWIFT_CC(RegisterPreservingCC_IMPL) { if (!object) return; - assert(object->weakRefCount.getCount() && - "object is not currently weakly retained"); + assert(object->refCounts.getUnownedCount() && + "object is not currently unowned-retained"); - if (! object->refCount.tryIncrement()) - _swift_abortRetainUnowned(object); + if (! object->refCounts.tryIncrement()) + swift::swift_abortRetainUnowned(object); // This should never cause a deallocation. - bool dealloc = object->weakRefCount.decrementShouldDeallocate(); + bool dealloc = object->refCounts.decrementUnownedShouldFree(1); assert(!dealloc && "retain-strong-and-release caused dealloc?"); (void) dealloc; } void swift::swift_unownedCheck(HeapObject *object) { if (!object) return; - assert(object->weakRefCount.getCount() && - "object is not currently weakly retained"); + assert(object->refCounts.getUnownedCount() && + "object is not currently unowned-retained"); - if (object->refCount.isDeallocating()) - _swift_abortRetainUnowned(object); + if (object->refCounts.isDeiniting()) + swift::swift_abortRetainUnowned(object); } -// Declared extern "C" LLVM_LIBRARY_VISIBILITY above. +// Declared extern "C" LLVM_LIBRARY_VISIBILITY in RefCount.h void _swift_release_dealloc(HeapObject *object) SWIFT_CC(RegisterPreservingCC_IMPL) { asFullMetadata(object->metadata)->destroy(object); @@ -550,7 +546,7 @@ void swift_deallocPartialClassInstance(HeapObject *object, #endif // The strong reference count should be +1 -- tear down the object - bool shouldDeallocate = object->refCount.decrementShouldDeallocate(); + bool shouldDeallocate = object->refCounts.decrementShouldDeinit(1); assert(shouldDeallocate); (void) shouldDeallocate; swift_deallocClassInstance(object, allocatedSize, allocatedAlignMask); @@ -572,7 +568,7 @@ void swift::swift_deallocObject(HeapObject *object, size_t allocatedSize, size_t allocatedAlignMask) SWIFT_CC(RegisterPreservingCC_IMPL) { assert(isAlignmentMask(allocatedAlignMask)); - assert(object->refCount.isDeallocating()); + assert(object->refCounts.isDeiniting()); #ifdef SWIFT_RUNTIME_CLOBBER_FREED_OBJECTS memset_pattern8((uint8_t *)object + sizeof(HeapObject), "\xAB\xAD\x1D\xEA\xF4\xEE\xD0\bB9", @@ -582,6 +578,7 @@ void swift::swift_deallocObject(HeapObject *object, size_t allocatedSize, // If we are tracking leaks, stop tracking this object. SWIFT_LEAKS_STOP_TRACKING_OBJECT(object); + // Drop the initial weak retain of the object. // // If the outstanding weak retain count is 1 (i.e. only the initial @@ -647,157 +644,56 @@ void swift::swift_deallocObject(HeapObject *object, size_t allocatedSize, // release, we will fall back on swift_unownedRelease, which does an // atomic decrement (and has the ability to reconstruct // allocatedSize and allocatedAlignMask). - if (object->weakRefCount.getCount() == 1) { + // + // Note: This shortcut is NOT an optimization. + // Some allocations passed to swift_deallocObject() are not compatible + // with swift_unownedRelease() because they do not have ClassMetadata. + + if (object->refCounts.canBeFreedNow()) { + // object state DEINITING -> DEAD SWIFT_RT_ENTRY_CALL(swift_slowDealloc) (object, allocatedSize, allocatedAlignMask); } else { + // object state DEINITING -> DEINITED SWIFT_RT_ENTRY_CALL(swift_unownedRelease)(object); } } -enum: uintptr_t { - WR_NATIVE = 1<<(swift::heap_object_abi::ObjCReservedLowBits), - WR_READING = 1<<(swift::heap_object_abi::ObjCReservedLowBits+1), - - WR_NATIVEMASK = WR_NATIVE | swift::heap_object_abi::ObjCReservedBitsMask, -}; - -static_assert(WR_READING < alignof(void*), - "weakref lock bit mustn't interfere with real pointer bits"); - -enum: short { - WR_SPINLIMIT = 64, -}; - -bool swift::isNativeSwiftWeakReference(WeakReference *ref) { - return (ref->Value & WR_NATIVEMASK) == WR_NATIVE; -} void swift::swift_weakInit(WeakReference *ref, HeapObject *value) { - ref->Value = (uintptr_t)value | WR_NATIVE; - SWIFT_RT_ENTRY_CALL(swift_unownedRetain)(value); + ref->nativeInit(value); } -void swift::swift_weakAssign(WeakReference *ref, HeapObject *newValue) { - SWIFT_RT_ENTRY_CALL(swift_unownedRetain)(newValue); - auto oldValue = (HeapObject*) (ref->Value & ~WR_NATIVE); - ref->Value = (uintptr_t)newValue | WR_NATIVE; - SWIFT_RT_ENTRY_CALL(swift_unownedRelease)(oldValue); +void swift::swift_weakAssign(WeakReference *ref, HeapObject *value) { + ref->nativeAssign(value); } HeapObject *swift::swift_weakLoadStrong(WeakReference *ref) { - if (ref->Value == (uintptr_t)nullptr) { - return nullptr; - } - - // ref might be visible to other threads - auto ptr = __atomic_fetch_or(&ref->Value, WR_READING, __ATOMIC_RELAXED); - while (ptr & WR_READING) { - short c = 0; - while (__atomic_load_n(&ref->Value, __ATOMIC_RELAXED) & WR_READING) { - if (++c == WR_SPINLIMIT) { - std::this_thread::yield(); - c -= 1; - } - } - ptr = __atomic_fetch_or(&ref->Value, WR_READING, __ATOMIC_RELAXED); - } - - auto object = (HeapObject*)(ptr & ~WR_NATIVE); - if (object == nullptr) { - __atomic_store_n(&ref->Value, (uintptr_t)nullptr, __ATOMIC_RELAXED); - return nullptr; - } - if (object->refCount.isDeallocating()) { - __atomic_store_n(&ref->Value, (uintptr_t)nullptr, __ATOMIC_RELAXED); - SWIFT_RT_ENTRY_CALL(swift_unownedRelease)(object); - return nullptr; - } - auto result = swift_tryRetain(object); - __atomic_store_n(&ref->Value, ptr, __ATOMIC_RELAXED); - return result; + return ref->nativeLoadStrong(); } HeapObject *swift::swift_weakTakeStrong(WeakReference *ref) { - auto object = (HeapObject*) (ref->Value & ~WR_NATIVE); - if (object == nullptr) return nullptr; - auto result = swift_tryRetain(object); - ref->Value = (uintptr_t)nullptr; - swift_unownedRelease(object); - return result; + return ref->nativeTakeStrong(); } void swift::swift_weakDestroy(WeakReference *ref) { - auto tmp = (HeapObject*) (ref->Value & ~WR_NATIVE); - ref->Value = (uintptr_t)nullptr; - SWIFT_RT_ENTRY_CALL(swift_unownedRelease)(tmp); + ref->nativeDestroy(); } void swift::swift_weakCopyInit(WeakReference *dest, WeakReference *src) { - if (src->Value == (uintptr_t)nullptr) { - dest->Value = (uintptr_t)nullptr; - return; - } - - // src might be visible to other threads - auto ptr = __atomic_fetch_or(&src->Value, WR_READING, __ATOMIC_RELAXED); - while (ptr & WR_READING) { - short c = 0; - while (__atomic_load_n(&src->Value, __ATOMIC_RELAXED) & WR_READING) { - if (++c == WR_SPINLIMIT) { - std::this_thread::yield(); - c -= 1; - } - } - ptr = __atomic_fetch_or(&src->Value, WR_READING, __ATOMIC_RELAXED); - } - - auto object = (HeapObject*)(ptr & ~WR_NATIVE); - if (object == nullptr) { - __atomic_store_n(&src->Value, (uintptr_t)nullptr, __ATOMIC_RELAXED); - dest->Value = (uintptr_t)nullptr; - } else if (object->refCount.isDeallocating()) { - __atomic_store_n(&src->Value, (uintptr_t)nullptr, __ATOMIC_RELAXED); - SWIFT_RT_ENTRY_CALL(swift_unownedRelease)(object); - dest->Value = (uintptr_t)nullptr; - } else { - SWIFT_RT_ENTRY_CALL(swift_unownedRetain)(object); - __atomic_store_n(&src->Value, ptr, __ATOMIC_RELAXED); - dest->Value = (uintptr_t)object | WR_NATIVE; - } + dest->nativeCopyInit(src); } void swift::swift_weakTakeInit(WeakReference *dest, WeakReference *src) { - auto object = (HeapObject*) (src->Value & ~WR_NATIVE); - if (object == nullptr) { - dest->Value = (uintptr_t)nullptr; - } else if (object->refCount.isDeallocating()) { - dest->Value = (uintptr_t)nullptr; - SWIFT_RT_ENTRY_CALL(swift_unownedRelease)(object); - } else { - dest->Value = (uintptr_t)object | WR_NATIVE; - } - src->Value = (uintptr_t)nullptr; + dest->nativeTakeInit(src); } void swift::swift_weakCopyAssign(WeakReference *dest, WeakReference *src) { - if (dest->Value) { - auto object = (HeapObject*) (dest->Value & ~WR_NATIVE); - SWIFT_RT_ENTRY_CALL(swift_unownedRelease)(object); - } - swift_weakCopyInit(dest, src); + dest->nativeCopyAssign(src); } void swift::swift_weakTakeAssign(WeakReference *dest, WeakReference *src) { - if (dest->Value) { - auto object = (HeapObject*) (dest->Value & ~WR_NATIVE); - SWIFT_RT_ENTRY_CALL(swift_unownedRelease)(object); - } - swift_weakTakeInit(dest, src); + dest->nativeTakeAssign(src); } -void swift::_swift_abortRetainUnowned(const void *object) { - (void)object; - swift::crash("attempted to retain deallocated object"); -} diff --git a/stdlib/public/runtime/MetadataImpl.h b/stdlib/public/runtime/MetadataImpl.h index 95ffa3073a744..28d79f1c65309 100644 --- a/stdlib/public/runtime/MetadataImpl.h +++ b/stdlib/public/runtime/MetadataImpl.h @@ -47,6 +47,9 @@ #if SWIFT_OBJC_INTEROP #include "swift/Runtime/ObjCBridge.h" #endif + +#include "WeakReference.h" + #include #include diff --git a/stdlib/public/runtime/Private.h b/stdlib/public/runtime/Private.h index d1398e583202d..555c6d802fb82 100644 --- a/stdlib/public/runtime/Private.h +++ b/stdlib/public/runtime/Private.h @@ -42,9 +42,6 @@ namespace swift { const ProtocolDescriptor *theProtocol); #endif - extern "C" LLVM_LIBRARY_VISIBILITY LLVM_ATTRIBUTE_NORETURN - void _swift_abortRetainUnowned(const void *object); - /// Is the given value a valid alignment mask? static inline bool isAlignmentMask(size_t mask) { // mask == xyz01111... @@ -127,6 +124,23 @@ namespace swift { LLVM_LIBRARY_VISIBILITY bool usesNativeSwiftReferenceCounting(const ClassMetadata *theClass); + static inline + bool objectUsesNativeSwiftReferenceCounting(const void *object) { + assert(!isObjCTaggedPointerOrNull(object)); +#if SWIFT_HAS_OPAQUE_ISAS + // Fast path for opaque ISAs. We don't want to call + // _swift_getClassOfAllocated as that will call object_getClass. + // Instead we can look at the bits in the ISA and tell if its a + // non-pointer opaque ISA which means it is definitely an ObjC + // object and doesn't use native swift reference counting. + if (_swift_isNonPointerIsaObjCClass(object)) + return false; + return usesNativeSwiftReferenceCounting(_swift_getClassOfAllocatedFromPointer(object)); +#else + return usesNativeSwiftReferenceCounting(_swift_getClassOfAllocated(object)); +#endif + } + /// Get the superclass pointer value used for Swift root classes. /// Note that this function may return a nullptr on non-objc platforms, /// where there is no common root class. rdar://problem/18987058 diff --git a/stdlib/public/runtime/RefCount.cpp b/stdlib/public/runtime/RefCount.cpp new file mode 100644 index 0000000000000..920df042660a1 --- /dev/null +++ b/stdlib/public/runtime/RefCount.cpp @@ -0,0 +1,137 @@ +//===--- RefCount.cpp -----------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "swift/Runtime/HeapObject.h" + +namespace swift { + +template +void RefCounts::incrementSlow(RefCountBits oldbits, + uint32_t n) { + if (oldbits.hasSideTable()) { + // Out-of-line slow path. + auto side = oldbits.getSideTable(); + side->incrementStrong(n); + } + else { + // Retain count overflow. + swift::swift_abortRetainOverflow(); + } +} +template void RefCounts::incrementSlow(InlineRefCountBits oldbits, uint32_t n); +template void RefCounts::incrementSlow(SideTableRefCountBits oldbits, uint32_t n); + +template +void RefCounts::incrementNonAtomicSlow(RefCountBits oldbits, + uint32_t n) { + if (oldbits.hasSideTable()) { + // Out-of-line slow path. + auto side = oldbits.getSideTable(); + side->incrementStrong(n); // FIXME: can there be a nonatomic impl? + } else { + swift::swift_abortRetainOverflow(); + } +} +template void RefCounts::incrementNonAtomicSlow(InlineRefCountBits oldbits, uint32_t n); +template void RefCounts::incrementNonAtomicSlow(SideTableRefCountBits oldbits, uint32_t n); + +template +bool RefCounts::tryIncrementSlow(RefCountBits oldbits) { + if (oldbits.hasSideTable()) + return oldbits.getSideTable()->tryIncrement(); + else + swift::swift_abortRetainOverflow(); +} +template bool RefCounts::tryIncrementSlow(InlineRefCountBits oldbits); +template bool RefCounts::tryIncrementSlow(SideTableRefCountBits oldbits); + +template +bool RefCounts::tryIncrementAndPinSlow(RefCountBits oldbits) { + if (oldbits.hasSideTable()) + return oldbits.getSideTable()->tryIncrementAndPin(); + else + swift::swift_abortRetainOverflow(); +} +template bool RefCounts::tryIncrementAndPinSlow(InlineRefCountBits oldbits); +template bool RefCounts::tryIncrementAndPinSlow(SideTableRefCountBits oldbits); + +template +bool RefCounts::tryIncrementAndPinNonAtomicSlow(RefCountBits oldbits) { + // No nonatomic implementation provided. + return tryIncrementAndPinSlow(oldbits); +} +template bool RefCounts::tryIncrementAndPinNonAtomicSlow(InlineRefCountBits oldbits); +template bool RefCounts::tryIncrementAndPinNonAtomicSlow(SideTableRefCountBits oldbits); + + +// Return an object's side table, allocating it if necessary. +// Returns null if the object is deiniting. +// SideTableRefCountBits specialization intentionally does not exist. +template <> +HeapObjectSideTableEntry* RefCounts::allocateSideTable() +{ + auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME); + + // Preflight failures before allocating a new side table. + if (oldbits.hasSideTable()) { + // Already have a side table. Return it. + return oldbits.getSideTable(); + } + else if (oldbits.getIsDeiniting()) { + // Already past the start of deinit. Do nothing. + return nullptr; + } + + // Preflight passed. Allocate a side table. + + // FIXME: custom side table allocator + HeapObjectSideTableEntry *side = new HeapObjectSideTableEntry(getHeapObject()); + + auto newbits = InlineRefCountBits(side); + + do { + if (oldbits.hasSideTable()) { + // Already have a side table. Return it and delete ours. + // Read before delete to streamline barriers. + auto result = oldbits.getSideTable(); + delete side; + return result; + } + else if (oldbits.getIsDeiniting()) { + // Already past the start of deinit. Do nothing. + return nullptr; + } + + side->initRefCounts(oldbits); + + } while (! refCounts.compare_exchange_weak(oldbits, newbits, + std::memory_order_release, + std::memory_order_relaxed)); + return side; +} + + +// SideTableRefCountBits specialization intentionally does not exist. +template <> +HeapObjectSideTableEntry* RefCounts::formWeakReference() +{ + auto side = allocateSideTable(); + if (side) + return side->incrementWeak(); + else + return nullptr; +} + +// namespace swift +} + + diff --git a/stdlib/public/runtime/Reflection.mm b/stdlib/public/runtime/Reflection.mm index 9045de5322beb..ebeed8c9dc04b 100644 --- a/stdlib/public/runtime/Reflection.mm +++ b/stdlib/public/runtime/Reflection.mm @@ -20,6 +20,7 @@ #include "swift/Runtime/Debug.h" #include "swift/Runtime/Portability.h" #include "Private.h" +#include "WeakReference.h" #include "llvm/Support/Compiler.h" #include #include diff --git a/stdlib/public/runtime/SwiftObject.mm b/stdlib/public/runtime/SwiftObject.mm index e2b483c26d9c4..81facfbc1da03 100644 --- a/stdlib/public/runtime/SwiftObject.mm +++ b/stdlib/public/runtime/SwiftObject.mm @@ -35,6 +35,7 @@ #include "../SwiftShims/RuntimeShims.h" #include "Private.h" #include "SwiftObject.h" +#include "WeakReference.h" #include "swift/Runtime/Debug.h" #if SWIFT_OBJC_INTEROP #include @@ -420,24 +421,10 @@ - (BOOL)isNSValue__ { return NO; } static uintptr_t const objectPointerIsObjCBit = 0x00000002U; #endif -static bool usesNativeSwiftReferenceCounting_allocated(const void *object) { - assert(!isObjCTaggedPointerOrNull(object)); -#if SWIFT_HAS_OPAQUE_ISAS - // Fast path for opaque ISAs. We don't want to call _swift_getClassOfAllocated - // as that will call object_getClass. Instead we can look at the bits in the - // ISA and tell if its a non-pointer opaque ISA which means it is definitely - // an ObjC object and doesn't use native swift reference counting. - if (_swift_isNonPointerIsaObjCClass(object)) - return false; - return usesNativeSwiftReferenceCounting(_swift_getClassOfAllocatedFromPointer(object)); -#endif - return usesNativeSwiftReferenceCounting(_swift_getClassOfAllocated(object)); -} - void swift::swift_unknownRetain_n(void *object, int n) SWIFT_CC(DefaultCC_IMPL) { if (isObjCTaggedPointerOrNull(object)) return; - if (usesNativeSwiftReferenceCounting_allocated(object)) { + if (objectUsesNativeSwiftReferenceCounting(object)) { swift_retain_n(static_cast(object), n); return; } @@ -448,7 +435,7 @@ static bool usesNativeSwiftReferenceCounting_allocated(const void *object) { void swift::swift_unknownRelease_n(void *object, int n) SWIFT_CC(DefaultCC_IMPL) { if (isObjCTaggedPointerOrNull(object)) return; - if (usesNativeSwiftReferenceCounting_allocated(object)) + if (objectUsesNativeSwiftReferenceCounting(object)) return swift_release_n(static_cast(object), n); for (int i = 0; i < n; ++i) objc_release(static_cast(object)); @@ -457,7 +444,7 @@ static bool usesNativeSwiftReferenceCounting_allocated(const void *object) { void swift::swift_unknownRetain(void *object) SWIFT_CC(DefaultCC_IMPL) { if (isObjCTaggedPointerOrNull(object)) return; - if (usesNativeSwiftReferenceCounting_allocated(object)) { + if (objectUsesNativeSwiftReferenceCounting(object)) { swift_retain(static_cast(object)); return; } @@ -467,7 +454,7 @@ static bool usesNativeSwiftReferenceCounting_allocated(const void *object) { void swift::swift_unknownRelease(void *object) SWIFT_CC(DefaultCC_IMPL) { if (isObjCTaggedPointerOrNull(object)) return; - if (usesNativeSwiftReferenceCounting_allocated(object)) + if (objectUsesNativeSwiftReferenceCounting(object)) return SWIFT_RT_ENTRY_CALL(swift_release)(static_cast(object)); return objc_release(static_cast(object)); } @@ -475,7 +462,7 @@ static bool usesNativeSwiftReferenceCounting_allocated(const void *object) { void swift::swift_nonatomic_unknownRetain_n(void *object, int n) SWIFT_CC(DefaultCC_IMPL) { if (isObjCTaggedPointerOrNull(object)) return; - if (usesNativeSwiftReferenceCounting_allocated(object)) { + if (objectUsesNativeSwiftReferenceCounting(object)) { swift_nonatomic_retain_n(static_cast(object), n); return; } @@ -486,7 +473,7 @@ static bool usesNativeSwiftReferenceCounting_allocated(const void *object) { void swift::swift_nonatomic_unknownRelease_n(void *object, int n) SWIFT_CC(DefaultCC_IMPL) { if (isObjCTaggedPointerOrNull(object)) return; - if (usesNativeSwiftReferenceCounting_allocated(object)) + if (objectUsesNativeSwiftReferenceCounting(object)) return swift_nonatomic_release_n(static_cast(object), n); for (int i = 0; i < n; ++i) objc_release(static_cast(object)); @@ -495,7 +482,7 @@ static bool usesNativeSwiftReferenceCounting_allocated(const void *object) { void swift::swift_nonatomic_unknownRetain(void *object) SWIFT_CC(DefaultCC_IMPL) { if (isObjCTaggedPointerOrNull(object)) return; - if (usesNativeSwiftReferenceCounting_allocated(object)) { + if (objectUsesNativeSwiftReferenceCounting(object)) { swift_nonatomic_retain(static_cast(object)); return; } @@ -505,7 +492,7 @@ static bool usesNativeSwiftReferenceCounting_allocated(const void *object) { void swift::swift_nonatomic_unknownRelease(void *object) SWIFT_CC(DefaultCC_IMPL) { if (isObjCTaggedPointerOrNull(object)) return; - if (usesNativeSwiftReferenceCounting_allocated(object)) + if (objectUsesNativeSwiftReferenceCounting(object)) return SWIFT_RT_ENTRY_CALL(swift_release)(static_cast(object)); return objc_release(static_cast(object)); } @@ -701,7 +688,7 @@ static bool isNonNative_unTagged_bridgeObject(void *object) { #if SWIFT_OBJC_INTEROP /*****************************************************************************/ -/**************************** UNOWNED REFERENCES *****************************/ +/************************ UNKNOWN UNOWNED REFERENCES *************************/ /*****************************************************************************/ // Swift's native unowned references are implemented purely with @@ -808,7 +795,7 @@ static bool classof(const UnownedReference *ref) { static bool isObjCForUnownedReference(void *value) { return (isObjCTaggedPointer(value) || - !usesNativeSwiftReferenceCounting_allocated(value)); + !objectUsesNativeSwiftReferenceCounting(value)); } void swift::swift_unknownUnownedInit(UnownedReference *dest, void *value) { @@ -848,7 +835,7 @@ static bool isObjCForUnownedReference(void *value) { } else if (auto objcRef = dyn_cast(ref)) { auto result = (void*) objc_loadWeakRetained(&objcRef->storage()->WeakRef); if (result == nullptr) { - _swift_abortRetainUnowned(nullptr); + swift::swift_abortRetainUnowned(nullptr); } return result; } else { @@ -863,7 +850,7 @@ static bool isObjCForUnownedReference(void *value) { auto storage = objcRef->storage(); auto result = (void*) objc_loadWeakRetained(&objcRef->storage()->WeakRef); if (result == nullptr) { - _swift_abortRetainUnowned(nullptr); + swift::swift_abortRetainUnowned(nullptr); } delete storage; return result; @@ -936,138 +923,45 @@ static bool isObjCForUnownedReference(void *value) { } /*****************************************************************************/ -/****************************** WEAK REFERENCES ******************************/ +/************************** UNKNOWN WEAK REFERENCES **************************/ /*****************************************************************************/ -// FIXME: these are not really valid implementations; they assume too -// much about the implementation of ObjC weak references, and the -// loads from ->Value can race with clears by the runtime. - -static void doWeakInit(WeakReference *addr, void *value, bool valueIsNative) { - assert(value != nullptr); - if (valueIsNative) { - swift_weakInit(addr, (HeapObject*) value); - } else { - objc_initWeak((id*) &addr->Value, (id) value); - } -} - -static void doWeakDestroy(WeakReference *addr, bool valueIsNative) { - if (valueIsNative) { - swift_weakDestroy(addr); - } else { - objc_destroyWeak((id*) &addr->Value); - } -} - -void swift::swift_unknownWeakInit(WeakReference *addr, void *value) { - if (isObjCTaggedPointerOrNull(value)) { - addr->Value = (uintptr_t) value; - return; - } - doWeakInit(addr, value, usesNativeSwiftReferenceCounting_allocated(value)); +void swift::swift_unknownWeakInit(WeakReference *ref, void *value) { + return ref->unknownInit(value); } -void swift::swift_unknownWeakAssign(WeakReference *addr, void *newValue) { - // If the incoming value is not allocated, this is just a destroy - // and re-initialize. - if (isObjCTaggedPointerOrNull(newValue)) { - swift_unknownWeakDestroy(addr); - addr->Value = (uintptr_t) newValue; - return; - } - - bool newIsNative = usesNativeSwiftReferenceCounting_allocated(newValue); - - // If the existing value is not allocated, this is just an initialize. - void *oldValue = (void*) addr->Value; - if (isObjCTaggedPointerOrNull(oldValue)) - return doWeakInit(addr, newValue, newIsNative); - - bool oldIsNative = isNativeSwiftWeakReference(addr); - - // If they're both native, we can use the native function. - if (oldIsNative && newIsNative) - return swift_weakAssign(addr, (HeapObject*) newValue); - - // If neither is native, we can use the ObjC function. - if (!oldIsNative && !newIsNative) - return (void) objc_storeWeak((id*) &addr->Value, (id) newValue); - - // Otherwise, destroy according to one set of semantics and - // re-initialize with the other. - doWeakDestroy(addr, oldIsNative); - doWeakInit(addr, newValue, newIsNative); +void swift::swift_unknownWeakAssign(WeakReference *ref, void *value) { + return ref->unknownAssign(value); } -void *swift::swift_unknownWeakLoadStrong(WeakReference *addr) { - if (isNativeSwiftWeakReference(addr)) { - return swift_weakLoadStrong(addr); - } - - void *value = (void*) addr->Value; - if (isObjCTaggedPointerOrNull(value)) return value; - - return (void*) objc_loadWeakRetained((id*) &addr->Value); +void *swift::swift_unknownWeakLoadStrong(WeakReference *ref) { + return ref->unknownLoadStrong(); } -void *swift::swift_unknownWeakTakeStrong(WeakReference *addr) { - if (isNativeSwiftWeakReference(addr)) { - return swift_weakTakeStrong(addr); - } - - void *value = (void*) addr->Value; - if (isObjCTaggedPointerOrNull(value)) return value; - - void *result = (void*) objc_loadWeakRetained((id*) &addr->Value); - objc_destroyWeak((id*) &addr->Value); - return result; +void *swift::swift_unknownWeakTakeStrong(WeakReference *ref) { + return ref->unknownTakeStrong(); } -void swift::swift_unknownWeakDestroy(WeakReference *addr) { - if (isNativeSwiftWeakReference(addr)) { - return swift_weakDestroy(addr); - } - - id object = (id) addr->Value; - if (isObjCTaggedPointerOrNull(object)) return; - objc_destroyWeak((id*) &addr->Value); +void swift::swift_unknownWeakDestroy(WeakReference *ref) { + ref->unknownDestroy(); } void swift::swift_unknownWeakCopyInit(WeakReference *dest, WeakReference *src) { - if (isNativeSwiftWeakReference(src)) { - return swift_weakCopyInit(dest, src); - } - - id object = (id) src->Value; - if (isObjCTaggedPointerOrNull(object)) { - dest->Value = (uintptr_t) object; - } else { - objc_copyWeak((id*) &dest->Value, (id*) src); - } + dest->unknownCopyInit(src); } void swift::swift_unknownWeakTakeInit(WeakReference *dest, WeakReference *src) { - if (isNativeSwiftWeakReference(src)) { - return swift_weakTakeInit(dest, src); - } - - id object = (id) src->Value; - if (isObjCTaggedPointerOrNull(object)) { - dest->Value = (uintptr_t) object; - } else { - objc_moveWeak((id*) &dest->Value, (id*) &src->Value); - } + dest->unknownTakeInit(src); } -void swift::swift_unknownWeakCopyAssign(WeakReference *dest, WeakReference *src) { - if (dest == src) return; - swift_unknownWeakDestroy(dest); - swift_unknownWeakCopyInit(dest, src); +void swift::swift_unknownWeakCopyAssign(WeakReference *dest, + WeakReference *src) { + dest->unknownCopyAssign(src); } -void swift::swift_unknownWeakTakeAssign(WeakReference *dest, WeakReference *src) { - if (dest == src) return; - swift_unknownWeakDestroy(dest); - swift_unknownWeakTakeInit(dest, src); +void swift::swift_unknownWeakTakeAssign(WeakReference *dest, + WeakReference *src) { + dest->unknownTakeAssign(src); } + +// SWIFT_OBJC_INTEROP #endif /*****************************************************************************/ @@ -1323,15 +1217,15 @@ static bool usesNativeSwiftReferenceCounting_nonNull( ) { assert(object != nullptr); return !isObjCTaggedPointer(object) && - usesNativeSwiftReferenceCounting_allocated(object); + objectUsesNativeSwiftReferenceCounting(object); } #endif bool swift::swift_isUniquelyReferenced_nonNull_native(const HeapObject *object) SWIFT_CC(RegisterPreservingCC_IMPL) { assert(object != nullptr); - assert(!object->refCount.isDeallocating()); - return object->refCount.isUniquelyReferenced(); + assert(!object->refCounts.isDeiniting()); + return object->refCounts.isUniquelyReferenced(); } bool swift::swift_isUniquelyReferenced_native(const HeapObject* object) { @@ -1436,8 +1330,8 @@ static bool usesNativeSwiftReferenceCounting_nonNull( bool swift::swift_isUniquelyReferencedOrPinned_nonNull_native( const HeapObject *object) SWIFT_CC(RegisterPreservingCC_IMPL) { assert(object != nullptr); - assert(!object->refCount.isDeallocating()); - return object->refCount.isUniquelyReferencedOrPinned(); + assert(!object->refCounts.isDeiniting()); + return object->refCounts.isUniquelyReferencedOrPinned(); } using ClassExtents = TwoWordPair; diff --git a/stdlib/public/runtime/WeakReference.h b/stdlib/public/runtime/WeakReference.h new file mode 100644 index 0000000000000..aa8f4f41d2000 --- /dev/null +++ b/stdlib/public/runtime/WeakReference.h @@ -0,0 +1,388 @@ +//===--- WeakReference.h - Swift weak references ----------------*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Swift weak reference implementation. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_RUNTIME_WEAKREFERENCE_H +#define SWIFT_RUNTIME_WEAKREFERENCE_H + +#include "swift/Runtime/Config.h" +#include "swift/Runtime/HeapObject.h" +#include "swift/Runtime/Metadata.h" + +#if SWIFT_OBJC_INTEROP +#include "swift/Runtime/ObjCBridge.h" +#endif + +#include "Private.h" + +#include + +namespace swift { + +// Note: This implementation of unknown weak makes several assumptions +// about ObjC's weak variables implementation: +// * Nil is stored verbatim. +// * Tagged pointer objects are stored verbatim with no side table entry. +// * Ordinary objects are stored with the LSB two bits (64-bit) or +// one bit (32-bit) all clear. The stored value otherwise need not be +// the pointed-to object. +// +// The Swift 3 implementation of unknown weak makes the following +// additional assumptions: +// * Ordinary objects are stored *verbatim* with the LSB *three* bits (64-bit) +// or *two* bits (32-bit) all clear. + +// Thread-safety: +// +// Reading a weak reference must be thread-safe with respect to: +// * concurrent readers +// * concurrent weak reference zeroing due to deallocation of the +// pointed-to object +// * concurrent ObjC readers or zeroing (for non-native weak storage) +// +// Reading a weak reference is NOT thread-safe with respect to: +// * concurrent writes to the weak variable other than zeroing +// * concurrent destruction of the weak variable +// +// Writing a weak reference must be thread-safe with respect to: +// * concurrent weak reference zeroing due to deallocation of the +// pointed-to object +// * concurrent ObjC zeroing (for non-native weak storage) +// +// Writing a weak reference is NOT thread-safe with respect to: +// * concurrent reads +// * concurrent writes other than zeroing + +class WeakReferenceBits { + // On ObjC platforms, a weak variable may be controlled by the ObjC + // runtime or by the Swift runtime. NativeMarkerMask and NativeMarkerValue + // are used to distinguish them. + // if ((ptr & NativeMarkerMask) == NativeMarkerValue) it's Swift + // else it's ObjC + // NativeMarkerMask incorporates the ObjC tagged pointer bits + // plus one more bit that is set in Swift-controlled weak pointer values. + // Non-ObjC platforms don't use any markers. + enum : uintptr_t { +#if !SWIFT_OBJC_INTEROP + NativeMarkerMask = 0, + NativeMarkerValue = 0 +#elif __x86_64__ + NativeMarkerMask = SWIFT_ABI_X86_64_OBJC_WEAK_REFERENCE_MARKER_MASK, + NativeMarkerValue = SWIFT_ABI_X86_64_OBJC_WEAK_REFERENCE_MARKER_VALUE +#elif __i386__ + NativeMarkerMask = SWIFT_ABI_I386_OBJC_WEAK_REFERENCE_MARKER_MASK, + NativeMarkerValue = SWIFT_ABI_I386_OBJC_WEAK_REFERENCE_MARKER_VALUE +#elif __arm__ + NativeMarkerMask = SWIFT_ABI_ARM_OBJC_WEAK_REFERENCE_MARKER_MASK, + NativeMarkerValue = SWIFT_ABI_ARM_OBJC_WEAK_REFERENCE_MARKER_VALUE +#elif __arm64__ + NativeMarkerMask = SWIFT_ABI_ARM64_OBJC_WEAK_REFERENCE_MARKER_MASK, + NativeMarkerValue = SWIFT_ABI_ARM64_OBJC_WEAK_REFERENCE_MARKER_VALUE +#else + #error unknown architecture +#endif + }; + + static_assert((NativeMarkerMask & NativeMarkerValue) == NativeMarkerValue, + "native marker value must fall within native marker mask"); + static_assert((NativeMarkerMask & heap_object_abi::SwiftSpareBitsMask) + == NativeMarkerMask, + "native marker mask must fall within Swift spare bits"); +#if SWIFT_OBJC_INTEROP + static_assert((NativeMarkerMask & heap_object_abi::ObjCReservedBitsMask) + == heap_object_abi::ObjCReservedBitsMask, + "native marker mask must contain all ObjC tagged pointer bits"); + static_assert((NativeMarkerValue & heap_object_abi::ObjCReservedBitsMask) + == 0, + "native marker value must not interfere with ObjC bits"); +#endif + + uintptr_t bits; + + public: + LLVM_ATTRIBUTE_ALWAYS_INLINE + WeakReferenceBits() { } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + WeakReferenceBits(HeapObjectSideTableEntry *newValue) { + setNativeOrNull(newValue); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + bool isNativeOrNull() const { + return bits == 0 || (bits & NativeMarkerMask) == NativeMarkerValue; + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + HeapObjectSideTableEntry *getNativeOrNull() const { + assert(isNativeOrNull()); + if (bits == 0) + return nullptr; + else + return + reinterpret_cast(bits & ~NativeMarkerMask); + } + + LLVM_ATTRIBUTE_ALWAYS_INLINE + void setNativeOrNull(HeapObjectSideTableEntry *newValue) { + assert((uintptr_t(newValue) & NativeMarkerMask) == 0); + if (newValue) + bits = uintptr_t(newValue) | NativeMarkerValue; + else + bits = 0; + } +}; + + +class WeakReference { + union { + std::atomic nativeValue; +#if SWIFT_OBJC_INTEROP + id nonnativeValue; +#endif + }; + + void destroyOldNativeBits(WeakReferenceBits oldBits) { + auto oldSide = oldBits.getNativeOrNull(); + if (oldSide) + oldSide->decrementWeak(); + } + + HeapObject *nativeLoadStrongFromBits(WeakReferenceBits bits) { + auto side = bits.getNativeOrNull(); + return side ? side->tryRetain() : nullptr; + } + + HeapObject *nativeTakeStrongFromBits(WeakReferenceBits bits) { + auto side = bits.getNativeOrNull(); + if (side) { + side->decrementWeak(); + return side->tryRetain(); + } else { + return nullptr; + } + } + + void nativeCopyInitFromBits(WeakReferenceBits srcBits) { + auto side = srcBits.getNativeOrNull(); + if (side) + side = side->incrementWeak(); + + nativeValue.store(WeakReferenceBits(side), std::memory_order_relaxed); + } + + public: + + WeakReference() = default; + + WeakReference(std::nullptr_t) + : nativeValue(WeakReferenceBits(nullptr)) { } + + WeakReference(const WeakReference& rhs) = delete; + + + void nativeInit(HeapObject *object) { + auto side = object ? object->refCounts.formWeakReference() : nullptr; + nativeValue.store(WeakReferenceBits(side), std::memory_order_relaxed); + } + + void nativeDestroy() { + auto oldBits = nativeValue.load(std::memory_order_relaxed); + nativeValue.store(nullptr, std::memory_order_relaxed); + destroyOldNativeBits(oldBits); + } + + void nativeAssign(HeapObject *newObject) { + if (newObject) { + assert(objectUsesNativeSwiftReferenceCounting(newObject) && + "weak assign native with non-native new object"); + } + + auto newSide = + newObject ? newObject->refCounts.formWeakReference() : nullptr; + auto newBits = WeakReferenceBits(newSide); + + auto oldBits = nativeValue.load(std::memory_order_relaxed); + nativeValue.store(newBits, std::memory_order_relaxed); + + assert(oldBits.isNativeOrNull() && + "weak assign native with non-native old object"); + destroyOldNativeBits(oldBits); + } + + HeapObject *nativeLoadStrong() { + auto bits = nativeValue.load(std::memory_order_relaxed); + return nativeLoadStrongFromBits(bits); + } + + HeapObject *nativeTakeStrong() { + auto bits = nativeValue.load(std::memory_order_relaxed); + nativeValue.store(nullptr, std::memory_order_relaxed); + return nativeTakeStrongFromBits(bits); + } + + void nativeCopyInit(WeakReference *src) { + auto srcBits = src->nativeValue.load(std::memory_order_relaxed); + return nativeCopyInitFromBits(srcBits); + } + + void nativeTakeInit(WeakReference *src) { + auto srcBits = src->nativeValue.load(std::memory_order_relaxed); + assert(srcBits.isNativeOrNull()); + src->nativeValue.store(nullptr, std::memory_order_relaxed); + nativeValue.store(srcBits, std::memory_order_relaxed); + } + + void nativeCopyAssign(WeakReference *src) { + if (this == src) return; + nativeDestroy(); + nativeCopyInit(src); + } + + void nativeTakeAssign(WeakReference *src) { + if (this == src) return; + nativeDestroy(); + nativeTakeInit(src); + } + +#if SWIFT_OBJC_INTEROP + private: + void nonnativeInit(id object) { + objc_initWeak(&nonnativeValue, object); + } + + void initWithNativeness(void *object, bool isNative) { + if (isNative) + nativeInit(static_cast(object)); + else + nonnativeInit(static_cast(object)); + } + + void nonnativeDestroy() { + objc_destroyWeak(&nonnativeValue); + } + + void destroyWithNativeness(bool isNative) { + if (isNative) + nativeDestroy(); + else + nonnativeDestroy(); + } + + public: + + void unknownInit(void *object) { + if (isObjCTaggedPointerOrNull(object)) { + nonnativeValue = static_cast(object); + } else { + bool isNative = objectUsesNativeSwiftReferenceCounting(object); + initWithNativeness(object, isNative); + } + } + + void unknownDestroy() { + auto oldBits = nativeValue.load(std::memory_order_relaxed); + destroyWithNativeness(oldBits.isNativeOrNull()); + } + + void unknownAssign(void *newObject) { + // If the new value is not allocated, simply destroy any old value. + if (isObjCTaggedPointerOrNull(newObject)) { + unknownDestroy(); + nonnativeValue = static_cast(newObject); + return; + } + + bool newIsNative = objectUsesNativeSwiftReferenceCounting(newObject); + + auto oldBits = nativeValue.load(std::memory_order_relaxed); + bool oldIsNative = oldBits.isNativeOrNull(); + + // If they're both native, use the native function. + if (oldIsNative && newIsNative) + return nativeAssign(static_cast(newObject)); + + // If neither is native, use ObjC. + if (!oldIsNative && !newIsNative) + return (void) objc_storeWeak(&nonnativeValue, static_cast(newObject)); + + // They don't match. Destroy and re-initialize. + destroyWithNativeness(oldIsNative); + initWithNativeness(newObject, newIsNative); + } + + void *unknownLoadStrong() { + auto bits = nativeValue.load(std::memory_order_relaxed); + if (bits.isNativeOrNull()) + return nativeLoadStrongFromBits(bits); + else + return objc_loadWeakRetained(&nonnativeValue); + } + + void *unknownTakeStrong() { + auto bits = nativeValue.load(std::memory_order_relaxed); + if (bits.isNativeOrNull()) { + nativeValue.store(nullptr, std::memory_order_relaxed); + return nativeTakeStrongFromBits(bits); + } + else { + id result = objc_loadWeakRetained(&nonnativeValue); + objc_destroyWeak(&nonnativeValue); + return result; + } + } + + void unknownCopyInit(WeakReference *src) { + auto srcBits = src->nativeValue.load(std::memory_order_relaxed); + if (srcBits.isNativeOrNull()) + nativeCopyInitFromBits(srcBits); + else + objc_copyWeak(&nonnativeValue, &src->nonnativeValue); + } + + void unknownTakeInit(WeakReference *src) { + auto srcBits = src->nativeValue.load(std::memory_order_relaxed); + if (srcBits.isNativeOrNull()) + nativeTakeInit(src); + else + objc_moveWeak(&nonnativeValue, &src->nonnativeValue); + } + + void unknownCopyAssign(WeakReference *src) { + if (this == src) return; + unknownDestroy(); + unknownCopyInit(src); + } + + void unknownTakeAssign(WeakReference *src) { + if (this == src) return; + unknownDestroy(); + unknownTakeInit(src); + } + +// SWIFT_OBJC_INTEROP +#endif + +}; + +static_assert(sizeof(WeakReference) == sizeof(void*), + "incorrect WeakReference size"); +static_assert(alignof(WeakReference) == alignof(void*), + "incorrect WeakReference alignment"); + +// namespace swift +} + +#endif diff --git a/unittests/runtime/CMakeLists.txt b/unittests/runtime/CMakeLists.txt index 797bbf0a6809f..347746e6faa46 100644 --- a/unittests/runtime/CMakeLists.txt +++ b/unittests/runtime/CMakeLists.txt @@ -22,6 +22,8 @@ if(("${SWIFT_HOST_VARIANT_SDK}" STREQUAL "${SWIFT_PRIMARY_VARIANT_SDK}") AND ${FOUNDATION_LIBRARY} swiftStdlibUnittest${SWIFT_PRIMARY_VARIANT_SUFFIX} ) + elseif(SWIFT_HOST_VARIANT STREQUAL "linux") + list(APPEND PLATFORM_TARGET_LINK_LIBRARIES "atomic") elseif(SWIFT_HOST_VARIANT STREQUAL "freebsd") find_library(EXECINFO_LIBRARY execinfo) list(APPEND PLATFORM_TARGET_LINK_LIBRARIES diff --git a/unittests/runtime/LongTests/CMakeLists.txt b/unittests/runtime/LongTests/CMakeLists.txt index 54befa11b67f6..5c28a79f07813 100644 --- a/unittests/runtime/LongTests/CMakeLists.txt +++ b/unittests/runtime/LongTests/CMakeLists.txt @@ -16,6 +16,10 @@ if(("${SWIFT_HOST_VARIANT_SDK}" STREQUAL "${SWIFT_PRIMARY_VARIANT_SDK}") AND # ${FOUNDATION_LIBRARY} # swiftStdlibUnittest${SWIFT_PRIMARY_VARIANT_SUFFIX} # ) + elseif(SWIFT_HOST_VARIANT STREQUAL "linux") + list(APPEND PLATFORM_TARGET_LINK_LIBRARIES + "atomic" + ) elseif(SWIFT_HOST_VARIANT STREQUAL "freebsd") find_library(EXECINFO_LIBRARY execinfo) list(APPEND PLATFORM_TARGET_LINK_LIBRARIES diff --git a/unittests/runtime/LongTests/LongRefcounting.cpp b/unittests/runtime/LongTests/LongRefcounting.cpp index 3abe49702a5e7..82a1df63c465e 100644 --- a/unittests/runtime/LongTests/LongRefcounting.cpp +++ b/unittests/runtime/LongTests/LongRefcounting.cpp @@ -15,23 +15,89 @@ #include "swift/Basic/ManglingMacros.h" #include "gtest/gtest.h" +#ifdef __APPLE__ +// FIXME: is EXPECT_UNALLOCATED reliable enough for CI? +// EXPECT_ALLOCATED may fail falsely if the memory is re-allocated. +# include +# define EXPECT_ALLOCATED(p) EXPECT_NE(0u, malloc_size(p)) +# define EXPECT_UNALLOCATED(p) EXPECT_EQ(0u, malloc_size(p)) +#else +// FIXME: heap assertion for other platforms? +# define EXPECT_ALLOCATED(p) do {} while (0) +# define EXPECT_UNALLOCATED(p) do {} while (0) +#endif + using namespace swift; struct TestObject : HeapObject { + // *Addr = Value during deinit size_t *Addr; size_t Value; + + // Check lifecycle state DEINITING during deinit + bool CheckLifecycle; + + // Weak variable to check in CheckLifecycle. nullptr skips the check. + // On entry to deinit: must point to object + // On exit from deinit: is destroyed + WeakReference *WeakRef; + + TestObject(size_t *addr, size_t value) + : Addr(addr), Value(value), CheckLifecycle(false), WeakRef(nullptr) + { } }; -static SWIFT_CC(swift) void destroyTestObject(SWIFT_CONTEXT HeapObject *_object) { +static SWIFT_CC(swift) void deinitTestObject(SWIFT_CONTEXT HeapObject *_object) { auto object = static_cast(_object); assert(object->Addr && "object already deallocated"); + + if (object->CheckLifecycle) { + // RC ok + swift_retain(object); + swift_retain(object); + swift_release(object); + swift_release(object); + // FIXME: RC underflow during deinit? + + // URC load crashes + // URC increment OK + // URC decrement OK + ASSERT_DEATH(swift_unownedCheck(object), + "attempted to read an unowned reference"); + swift_unownedRetain(object); + swift_unownedRetain(object); + swift_unownedRelease(object); + swift_unownedRelease(object); + + if (object->WeakRef) { + // WRC load is nil + // WRC increment is nil + // WRC decrement OK + + // WRC -1 + auto weak_value = swift_weakLoadStrong(object->WeakRef); + EXPECT_EQ(nullptr, weak_value); + swift_weakDestroy(object->WeakRef); + + // WRC no change + swift_weakInit(object->WeakRef, object); + weak_value = swift_weakLoadStrong(object->WeakRef); + EXPECT_EQ(nullptr, weak_value); + + // WRC no change + swift_weakInit(object->WeakRef, object); + weak_value = swift_weakLoadStrong(object->WeakRef); + EXPECT_EQ(nullptr, weak_value); + } + } + *object->Addr = object->Value; object->Addr = nullptr; swift_deallocObject(object, sizeof(TestObject), alignof(TestObject) - 1); } static const FullMetadata TestClassObjectMetadata = { - { { &destroyTestObject }, { &VALUE_WITNESS_SYM(Bo) } }, + { { &deinitTestObject }, { &VALUE_WITNESS_SYM(Bo) } }, { { { MetadataKind::Class } }, 0, /*rodata*/ 1, ClassFlags::UsesSwift1Refcounting, nullptr, 0, 0, 0, 0, 0 } }; @@ -39,13 +105,11 @@ static const FullMetadata TestClassObjectMetadata = { /// Create an object that, when deallocated, stores the given value to /// the given pointer. static TestObject *allocTestObject(size_t *addr, size_t value) { - auto result = - static_cast(swift_allocObject(&TestClassObjectMetadata, - sizeof(TestObject), - alignof(TestObject) - 1)); - result->Addr = addr; - result->Value = value; - return result; + auto buf = swift_allocObject(&TestClassObjectMetadata, + sizeof(TestObject), + alignof(TestObject) - 1); + + return new (buf) TestObject(addr, value); } @@ -53,7 +117,6 @@ static TestObject *allocTestObject(size_t *addr, size_t value) { // Max retain count and overflow checking // //////////////////////////////////////////// - template static void retainALot(TestObject *object, size_t &deallocated, uint64_t count) { @@ -74,8 +137,9 @@ static void releaseALot(TestObject *object, size_t &deallocated, } } -// 32-2 bits of retain count. -const uint64_t maxRC = (1ULL << (32 - 2)) - 1; +// Maximum legal retain count. +// 32-2 bits of extra retain count, plus 1 for the implicit retain. +const uint64_t maxRC = 1ULL << (32 - 2); TEST(LongRefcountingTest, retain_max) { size_t deallocated = 0; @@ -84,14 +148,23 @@ TEST(LongRefcountingTest, retain_max) { // RC is 1. // Retain to maxRC, release back to 1, then release and verify deallocation. retainALot(object, deallocated, maxRC - 1); - EXPECT_EQ(swift_retainCount(object), maxRC); releaseALot(object, deallocated, maxRC - 1); - EXPECT_EQ(swift_retainCount(object), 1u); EXPECT_EQ(0u, deallocated); swift_release(object); EXPECT_EQ(1u, deallocated); } +TEST(LongRefcountingTest, retain_overflow_DeathTest) { + size_t deallocated = 0; + auto object = allocTestObject(&deallocated, 1); + + // RC is 1. Retain to maxRC, then retain again and verify overflow error. + retainALot(object, deallocated, maxRC - 1); + EXPECT_EQ(0u, deallocated); + ASSERT_DEATH(swift_retain(object), + "object was retained too many times"); +} + TEST(LongRefcountingTest, nonatomic_retain_max) { size_t deallocated = 0; auto object = allocTestObject(&deallocated, 1); @@ -99,42 +172,632 @@ TEST(LongRefcountingTest, nonatomic_retain_max) { // RC is 1. // Retain to maxRC, release back to 1, then release and verify deallocation. retainALot(object, deallocated, maxRC - 1); - EXPECT_EQ(swift_retainCount(object), maxRC); releaseALot(object, deallocated, maxRC - 1); - EXPECT_EQ(swift_retainCount(object), 1u); EXPECT_EQ(0u, deallocated); swift_nonatomic_release(object); EXPECT_EQ(1u, deallocated); } -TEST(RefcountingTest, retain_overflow) { +TEST(LongRefcountingTest, nonatomic_retain_overflow_DeathTest) { size_t deallocated = 0; auto object = allocTestObject(&deallocated, 1); - // RC is 1. Retain to maxRC, then retain again and verify overflow. - retainALot(object, deallocated, maxRC - 1); - EXPECT_EQ(swift_retainCount(object), maxRC); + // RC is 1. Retain to maxRC, then retain again and verify overflow error. + retainALot(object, deallocated, maxRC - 1); EXPECT_EQ(0u, deallocated); + ASSERT_DEATH(swift_nonatomic_retain(object), + "object was retained too many times"); +} + + +////////////////////// +// Object lifecycle // +////////////////////// + +// FIXME: use the real WeakReference definition +namespace swift { + +class WeakReference { + uintptr_t value; + + public: + void *getSideTable() { + return (void*)(value & ~3ULL); + } +}; + +} + +// Lifecycle paths. One test each. +// +// LIVE -> DEINITING -> DEAD, no side table +// LIVE -> DEINITING -> DEINITED -> DEAD, no side table +// +// LIVE -> DEINITING -> DEAD, with side table +// LIVE -> DEINITING -> DEINITED -> DEAD, with side table +// LIVE -> DEINITING -> FREED -> DEAD, with side table +// LIVE -> DEINITING -> DEINITED -> FREED -> DEAD, with side table + + +// LIVE -> DEINITING -> DEAD, no side table +TEST(LongRefcountingTest, lifecycle_live_deiniting_no_side_DeathTest) { + ::testing::FLAGS_gtest_death_test_style = "threadsafe"; + + size_t deinited = 0; + auto object = allocTestObject(&deinited, 1); + object->CheckLifecycle = true; + + // Object is LIVE + + EXPECT_ALLOCATED(object); + // RC tested elsewhere + + // URC load OK + // URC increment OK + // URC decrement OK + swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + + // WRC load can't happen + // WRC increment adds side table which is tested elsewhere + // WRC decrement can't happen + + // RC == 1 + // URC == 1 + // WRC == 1 + + swift_release(object); // DEINITING is in here + + // Object is DEAD + // RC == 0 + // URC == 0 + // WRC == 0 + + EXPECT_UNALLOCATED(object); +} + + +// LIVE -> DEINITING -> DEINITED -> DEAD, no side table +TEST(LongRefcountingTest, lifecycle_live_deiniting_deinited_no_side_DeathTest) { + ::testing::FLAGS_gtest_death_test_style = "threadsafe"; - // There is no overflow enforcement in the runtime today. - // Instead we check that the retain count wrapped around. + size_t deinited = 0; + auto object = allocTestObject(&deinited, 1); + object->CheckLifecycle = true; + + // Object is LIVE + + EXPECT_ALLOCATED(object); + // RC tested elsewhere + + // URC load OK + // URC increment OK + // URC decrement OK + swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + + // WRC load can't happen + // WRC increment adds side table which is tested elsewhere + // WRC decrement can't happen + + // RC == 1 + // URC == 3 + // WRC == 1 + + swift_release(object); // DEINITING is in here + + // Object is DEINITED + // RC == 0 + // URC == 2 + // WRC == 1 + + EXPECT_EQ(1u, deinited); + EXPECT_ALLOCATED(object); + + // RC can't happen + + // WRC can't happen + + // URC load crashes + // URC increment can't happen + // URC decrement OK + ASSERT_DEATH(swift_unownedCheck(object), + "attempted to read an unowned reference"); + swift_unownedRelease(object); + EXPECT_ALLOCATED(object); + + // RC == 0 + // URC == 1 + // WRC == 1 + + swift_unownedRelease(object); + + // Object is DEAD + // RC == 0 + // URC == 0 + // WRC == 0 + + EXPECT_UNALLOCATED(object); +} + + +// LIVE -> DEINITING -> DEAD, with side table +TEST(LongRefcountingTest, lifecycle_live_deiniting_with_side_DeathTest) { + ::testing::FLAGS_gtest_death_test_style = "threadsafe"; + + size_t deinited = 0; + auto object = allocTestObject(&deinited, 1); + object->CheckLifecycle = true; + + // Object is LIVE + + EXPECT_ALLOCATED(object); + // RC tested elsewhere + + // URC load OK + // URC increment OK + // URC decrement OK + swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + // Remaining releases are performed after the side table is allocated. + + // WRC load can't happen + // WRC increment adds side table + // WRC decrement can't happen + + WeakReference w; + swift_weakInit(&w, object); + + // Object is LIVE with side table + + void *side = w.getSideTable(); + EXPECT_ALLOCATED(side); + + WeakReference w_deinit; + swift_weakInit(&w_deinit, object); + object->WeakRef = &w_deinit; + // destroyed during deinit + + // RC increment ok + // RC decrement ok swift_retain(object); - EXPECT_EQ(swift_retainCount(object), 0u); - EXPECT_EQ(0u, deallocated); + swift_retain(object); + swift_retain(object); + swift_release(object); + swift_release(object); + swift_release(object); + + // URC load OK + // URC increment OK + // URC decrement OK + swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + // ...and balancing from previously... + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + + // WRC load OK + // WRC increment OK + // WRC decrement OK + + WeakReference w2; + swift_weakInit(&w2, object); + HeapObject *weakValue = swift_weakTakeStrong(&w2); + EXPECT_EQ(weakValue, object); + swift_release(weakValue); + + weakValue = swift_weakTakeStrong(&w); + EXPECT_EQ(weakValue, object); + swift_release(weakValue); + + // RC == 1 + // URC == 1 + // WRC == 1 + + swift_release(object); // DEINITING is in here + + // Object is DEAD + // RC == 0 + // URC == 0 + // WRC == 0 + + EXPECT_UNALLOCATED(side); + EXPECT_UNALLOCATED(object); } -TEST(RefcountingTest, nonatomic_retain_overflow) { - size_t deallocated = 0; - auto object = allocTestObject(&deallocated, 1); - // RC is 1. Retain to maxRC, then retain again and verify overflow. - retainALot(object, deallocated, maxRC - 1); - EXPECT_EQ(swift_retainCount(object), maxRC); - EXPECT_EQ(0u, deallocated); +// LIVE -> DEINITING -> DEINITED -> DEAD, with side table +TEST(LongRefcountingTest, lifecycle_live_deiniting_deinited_with_side_DeathTest) { + ::testing::FLAGS_gtest_death_test_style = "threadsafe"; - // There is no overflow enforcement in the runtime today. - // Instead we check that the retain count wrapped around. - swift_nonatomic_retain(object); - EXPECT_EQ(swift_retainCount(object), 0u); - EXPECT_EQ(0u, deallocated); + size_t deinited = 0; + auto object = allocTestObject(&deinited, 1); + object->CheckLifecycle = true; + + // Object is LIVE + + EXPECT_ALLOCATED(object); + // RC tested elsewhere + + // URC load OK + // URC increment OK + // URC decrement OK + swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + // Remaining releases are performed during DEINITED. + + // WRC load can't happen + // WRC increment adds side table + // WRC decrement can't happen + + WeakReference w; + swift_weakInit(&w, object); + + // Object is LIVE with side table + + void *side = w.getSideTable(); + EXPECT_ALLOCATED(side); + + WeakReference w_deinit; + swift_weakInit(&w_deinit, object); + object->WeakRef = &w_deinit; + // destroyed during deinit + + // RC increment ok + // RC decrement ok + swift_retain(object); + swift_retain(object); + swift_retain(object); + swift_release(object); + swift_release(object); + swift_release(object); + + // URC load OK + // URC increment OK + // URC decrement OK + swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + + // WRC load OK + // WRC increment OK + // WRC decrement OK + + WeakReference w2; + swift_weakInit(&w2, object); + HeapObject *weakValue = swift_weakTakeStrong(&w2); + EXPECT_EQ(weakValue, object); + swift_release(weakValue); + + weakValue = swift_weakTakeStrong(&w); + EXPECT_EQ(weakValue, object); + swift_release(weakValue); + + // RC == 1 + // URC == 3 + // WRC == 1 + + swift_release(object); // DEINITING is in here + + // Object is DEINITED + // RC == 0 + // URC == 2 + // WRC == 1 + + EXPECT_EQ(1u, deinited); + EXPECT_ALLOCATED(object); + EXPECT_ALLOCATED(side); + + // RC can't happen + + // WRC can't happen + + // URC load crashes + // URC increment can't happen + // URC decrement OK + ASSERT_DEATH(swift_unownedCheck(object), + "attempted to read an unowned reference"); + swift_unownedRelease(object); + EXPECT_ALLOCATED(object); + EXPECT_ALLOCATED(side); + + // RC == 0 + // URC == 1 + // WRC == 1 + + swift_unownedRelease(object); + + // Object is DEAD + // RC == 0 + // URC == 0 + // WRC == 0 + + EXPECT_UNALLOCATED(object); + EXPECT_UNALLOCATED(side); +} + + +// LIVE -> DEINITING -> FREED -> DEAD, with side table +TEST(LongRefcountingTest, lifecycle_live_deiniting_freed_with_side_DeathTest) { + ::testing::FLAGS_gtest_death_test_style = "threadsafe"; + + size_t deinited = 0; + auto object = allocTestObject(&deinited, 1); + object->CheckLifecycle = true; + + // Object is LIVE + + EXPECT_ALLOCATED(object); + // RC tested elsewhere + + // URC load OK + // URC increment OK + // URC decrement OK + swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + + // WRC load can't happen + // WRC increment adds side table + // WRC decrement can't happen + + WeakReference w; + swift_weakInit(&w, object); + + // Object is LIVE with side table + + void *side = w.getSideTable(); + EXPECT_ALLOCATED(side); + + WeakReference w_deinit; + swift_weakInit(&w_deinit, object); + object->WeakRef = &w_deinit; + // destroyed during deinit + + // RC increment ok + // RC decrement ok + swift_retain(object); + swift_retain(object); + swift_retain(object); + swift_release(object); + swift_release(object); + swift_release(object); + + // URC load OK + // URC increment OK + // URC decrement OK + swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + + // WRC load OK + // WRC increment OK + // WRC decrement OK + + WeakReference w2; + swift_weakInit(&w2, object); + HeapObject *weakValue = swift_weakLoadStrong(&w2); + EXPECT_EQ(weakValue, object); + swift_release(weakValue); + + weakValue = swift_weakLoadStrong(&w); + EXPECT_EQ(weakValue, object); + swift_release(weakValue); + + // RC == 1 + // URC == 1 + // WRC == 3 + + swift_release(object); // DEINITING is in here + + // Object is FREED + // RC == 0 + // URC == 0 + // WRC == 2 + + EXPECT_EQ(1u, deinited); + EXPECT_UNALLOCATED(object); + EXPECT_ALLOCATED(side); + + // RC can't happen + + // URC can't happen + + // WRC load is nil + // WRC increment can't happen + // WRC decrement OK + + weakValue = swift_weakTakeStrong(&w2); + EXPECT_EQ(0, weakValue); + + // RC == 0 + // URC == 0 + // WRC == 1 + + weakValue = swift_weakTakeStrong(&w); + + // Object is DEAD + // RC == 0 + // URC == 0 + // WRC == 0 + + EXPECT_UNALLOCATED(side); + EXPECT_EQ(0, weakValue); +} + + +// LIVE -> DEINITING -> DEINITED -> FREED -> DEAD, with side table +TEST(LongRefcountingTest, lifecycle_live_deiniting_deinited_freed_with_side_DeathTest) { + ::testing::FLAGS_gtest_death_test_style = "threadsafe"; + + size_t deinited = 0; + auto object = allocTestObject(&deinited, 1); + object->CheckLifecycle = true; + + // Object is LIVE + + EXPECT_ALLOCATED(object); + // RC tested elsewhere + + // URC load OK + // URC increment OK + // URC decrement OK + swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + // Remaining releases are performed during DEINITED. + + // WRC load can't happen + // WRC increment adds side table + // WRC decrement can't happen + + WeakReference w; + swift_weakInit(&w, object); + + // Object is LIVE with side table + + void *side = w.getSideTable(); + EXPECT_ALLOCATED(side); + + WeakReference w_deinit; + swift_weakInit(&w_deinit, object); + object->WeakRef = &w_deinit; + // destroyed during deinit + + // RC increment ok + // RC decrement ok + swift_retain(object); + swift_retain(object); + swift_retain(object); + swift_release(object); + swift_release(object); + swift_release(object); + + // URC load OK + // URC increment OK + // URC decrement OK + swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRetain(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + swift_unownedRelease(object); swift_unownedCheck(object); + + // WRC load OK + // WRC increment OK + // WRC decrement OK + + WeakReference w2; + swift_weakInit(&w2, object); + HeapObject *weakValue = swift_weakLoadStrong(&w2); + EXPECT_EQ(weakValue, object); + swift_release(weakValue); + + weakValue = swift_weakLoadStrong(&w); + EXPECT_EQ(weakValue, object); + swift_release(weakValue); + + // RC == 1 + // URC == 3 + // WRC == 3 + + swift_release(object); // DEINITING is in here + + // Object is DEINITED + // RC == 0 + // URC == 2 + // WRC == 3 + + EXPECT_EQ(1u, deinited); + EXPECT_ALLOCATED(object); + EXPECT_ALLOCATED(side); + + // RC can't happen + + // WRC load is nil + // WRC increment can't happen + // WRC decrement OK + + weakValue = swift_weakTakeStrong(&w2); + EXPECT_EQ(0, weakValue); + + // URC load crashes + // URC increment can't happen + // URC decrement OK + ASSERT_DEATH(swift_unownedCheck(object), + "attempted to read an unowned reference"); + swift_unownedRelease(object); + EXPECT_ALLOCATED(object); + EXPECT_ALLOCATED(side); + + // RC == 0 + // URC == 1 + // WRC == 2 + + swift_unownedRelease(object); + + // Object is FREED + // RC == 0 + // URC == 0 + // WRC == 1 + + EXPECT_EQ(1u, deinited); + EXPECT_UNALLOCATED(object); + EXPECT_ALLOCATED(side); + + // RC can't happen + + // URC can't happen + + // WRC load is nil + // WRC increment can't happen + // WRC decrement OK + + // RC == 0 + // URC == 0 + // WRC == 1 + + weakValue = swift_weakTakeStrong(&w); + + // Object is DEAD + // RC == 0 + // URC == 0 + // WRC == 0 + + EXPECT_UNALLOCATED(side); + EXPECT_EQ(0, weakValue); } diff --git a/unittests/runtime/Refcounting.cpp b/unittests/runtime/Refcounting.cpp index 8629f701b8505..d01b2796b29b4 100644 --- a/unittests/runtime/Refcounting.cpp +++ b/unittests/runtime/Refcounting.cpp @@ -153,6 +153,46 @@ TEST(RefcountingTest, unowned_retain_release_n) { EXPECT_EQ(1u, value); } +TEST(RefcountingTest, isUniquelyReferenced) { + size_t value = 0; + auto object = allocTestObject(&value, 1); + EXPECT_EQ(0u, value); + EXPECT_TRUE(swift_isUniquelyReferenced_nonNull_native(object)); + + swift_retain(object); + EXPECT_FALSE(swift_isUniquelyReferenced_nonNull_native(object)); + + swift_release(object); + EXPECT_TRUE(swift_isUniquelyReferenced_nonNull_native(object)); + + swift_release(object); + EXPECT_EQ(1u, value); +} + +TEST(RefcountingTest, isUniquelyReferencedOrPinned) { + size_t value = 0; + auto object = allocTestObject(&value, 1); + EXPECT_EQ(0u, value); + // RC 1, unpinned + EXPECT_TRUE(swift_isUniquelyReferencedOrPinned_nonNull_native(object)); + + swift_retain(object); + // RC big, unpinned + EXPECT_FALSE(swift_isUniquelyReferencedOrPinned_nonNull_native(object)); + + auto pinResult = swift_tryPin(object); + // RC big, pinned + EXPECT_TRUE(swift_isUniquelyReferencedOrPinned_nonNull_native(object)); + + swift_release(object); + // RC 1, pinned + EXPECT_TRUE(swift_isUniquelyReferencedOrPinned_nonNull_native(object)); + + swift_unpin(object); + swift_release(object); + EXPECT_EQ(1u, value); +} + ///////////////////////////////////////// // Non-atomic reference counting tests // ///////////////////////////////////////// @@ -245,4 +285,3 @@ TEST(RefcountingTest, nonatomic_unknown_retain_release_n) { EXPECT_EQ(0u, value); EXPECT_EQ(1u, swift_retainCount(object)); } - diff --git a/unittests/runtime/weak.mm b/unittests/runtime/weak.mm index 70f37c8c225df..4d5547909c331 100644 --- a/unittests/runtime/weak.mm +++ b/unittests/runtime/weak.mm @@ -14,10 +14,17 @@ #include #include "swift/Runtime/HeapObject.h" #include "swift/Runtime/Metadata.h" +#include "swift/Runtime/Metadata.h" #include "gtest/gtest.h" using namespace swift; +// A fake definition of Swift runtime's WeakReference. +// This has the proper size and alignment which is all we need. +namespace swift { +class WeakReference { void *value __attribute__((unused)); }; +} + // Declare some Objective-C stuff. extern "C" void objc_release(id); diff --git a/utils/gen-static-stdlib-link-args b/utils/gen-static-stdlib-link-args index ef1423b11c0a3..28e5123808c28 100755 --- a/utils/gen-static-stdlib-link-args +++ b/utils/gen-static-stdlib-link-args @@ -61,6 +61,7 @@ function write_linkfile { cat >$OUTPUTFILE <