diff --git a/include/swift/Threading/Impl/Linux.h b/include/swift/Threading/Impl/Linux.h index f631dc9fb4061..5baeaba8c0ea6 100644 --- a/include/swift/Threading/Impl/Linux.h +++ b/include/swift/Threading/Impl/Linux.h @@ -135,7 +135,10 @@ inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) { struct once_t { std::atomic flag; +#if defined(__LP64__) || defined(_LP64) + // On 32-bit Linux we can't have the lock, so we'll be less efficient linux::ulock_t lock; +#endif }; void once_slow(once_t &predicate, void (*fn)(void *), void *context); diff --git a/lib/Threading/Linux.cpp b/lib/Threading/Linux.cpp index 0a9876ece908f..811f470ba0832 100644 --- a/lib/Threading/Linux.cpp +++ b/lib/Threading/Linux.cpp @@ -36,6 +36,10 @@ class MainThreadRememberer { MainThreadRememberer rememberer; +#if !defined(__LP64__) && !defined(_LP64) +pthread_mutex_t once_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + #pragma clang diagnostic pop } // namespace @@ -49,12 +53,21 @@ bool swift::threading_impl::thread_is_main() { void swift::threading_impl::once_slow(once_t &predicate, void (*fn)(void *), void *context) { + // On 32-bit Linux we can't have per-once locks +#if defined(__LP64__) || defined(_LP64) linux::ulock_lock(&predicate.lock); +#else + pthread_mutex_lock(&once_mutex); +#endif if (predicate.flag.load(std::memory_order_acquire) == 0) { fn(context); predicate.flag.store(-1, std::memory_order_release); } +#if defined(__LP64__) || defined(_LP64) linux::ulock_unlock(&predicate.lock); +#else + pthread_mutex_unlock(&once_mutex); +#endif } llvm::Optional