diff --git a/stl/inc/semaphore b/stl/inc/semaphore index 6825f53519b..e012ece735b 100644 --- a/stl/inc/semaphore +++ b/stl/inc/semaphore @@ -29,12 +29,6 @@ _STL_DISABLE_CLANG_WARNINGS _STD_BEGIN -template -_NODISCARD unsigned long long _Semaphore_deadline(const chrono::duration<_Rep, _Period>& _Rel_time) { - return __std_atomic_wait_get_deadline( - chrono::duration_cast>(_Rel_time).count()); -} - template _NODISCARD unsigned long _Semaphore_remaining_timeout(const chrono::time_point<_Clock, _Duration>& _Abs_time) { const auto _Now = _Clock::now(); @@ -145,11 +139,11 @@ public: template _NODISCARD_TRY_CHANGE_STATE bool try_acquire_for(const chrono::duration<_Rep, _Period>& _Rel_time) { - auto _Deadline = _Semaphore_deadline(_Rel_time); + auto _Deadline = _STD chrono::steady_clock::now() + _Rel_time; ptrdiff_t _Current = _Counter.load(memory_order_relaxed); for (;;) { while (_Current == 0) { - const auto _Remaining_timeout = __std_atomic_wait_get_remaining_timeout(_Deadline); + const auto _Remaining_timeout = _Semaphore_remaining_timeout(_Deadline); if (_Remaining_timeout == 0) { return false; } @@ -257,7 +251,7 @@ public: template _NODISCARD_TRY_CHANGE_STATE bool try_acquire_for(const chrono::duration<_Rep, _Period>& _Rel_time) { - auto _Deadline = _Semaphore_deadline(_Rel_time); + auto _Deadline = _STD chrono::steady_clock::now() + _Rel_time; for (;;) { // "happens after release" ordering is provided by this exchange, so loads and waits can be relaxed // TRANSITION, GH-1133: should be memory_order_acquire @@ -267,7 +261,7 @@ public: } _STL_VERIFY(_Prev == 0, "Invariant: semaphore counter is non-negative and doesn't exceed max(), " "possibly caused by memory corruption"); - const auto _Remaining_timeout = __std_atomic_wait_get_remaining_timeout(_Deadline); + const auto _Remaining_timeout = _Semaphore_remaining_timeout(_Deadline); if (_Remaining_timeout == 0) { return false; } diff --git a/stl/inc/xatomic_wait.h b/stl/inc/xatomic_wait.h index 5058335d974..0f67e66a81f 100644 --- a/stl/inc/xatomic_wait.h +++ b/stl/inc/xatomic_wait.h @@ -40,12 +40,6 @@ int __stdcall __std_atomic_wait_indirect(const void* _Storage, void* _Comparand, void __stdcall __std_atomic_notify_one_indirect(const void* _Storage) noexcept; void __stdcall __std_atomic_notify_all_indirect(const void* _Storage) noexcept; -// These functions convert a duration into a time point in order to tolerate spurious wakes in atomic wait, and then -// convert back from the time point to individual wait attempts (which are limited by DWORD milliseconds to a length of -// ~49 days) -unsigned long long __stdcall __std_atomic_wait_get_deadline(unsigned long long _Timeout) noexcept; -unsigned long __stdcall __std_atomic_wait_get_remaining_timeout(unsigned long long _Deadline) noexcept; - } // extern "C" #pragma pop_macro("new") diff --git a/stl/src/atomic_wait.cpp b/stl/src/atomic_wait.cpp index 4bafb1a86bb..b20c6f5cc38 100644 --- a/stl/src/atomic_wait.cpp +++ b/stl/src/atomic_wait.cpp @@ -177,6 +177,7 @@ int __stdcall __std_atomic_wait_indirect(const void* _Storage, void* _Comparand, } } +// TRANSITION, ABI: preserved for binary compatibility unsigned long long __stdcall __std_atomic_wait_get_deadline(const unsigned long long _Timeout) noexcept { if (_Timeout == _Atomic_wait_no_deadline) { return _Atomic_wait_no_deadline; @@ -185,6 +186,7 @@ unsigned long long __stdcall __std_atomic_wait_get_deadline(const unsigned long } } +// TRANSITION, ABI: preserved for binary compatibility unsigned long __stdcall __std_atomic_wait_get_remaining_timeout(unsigned long long _Deadline) noexcept { static_assert(__std_atomic_wait_no_timeout == INFINITE, "__std_atomic_wait_no_timeout is passed directly to underlying API, so should match it");