diff --git a/stl/inc/chrono b/stl/inc/chrono index f5d0b5354ae..4991b19d61a 100644 --- a/stl/inc/chrono +++ b/stl/inc/chrono @@ -12,7 +12,6 @@ #include #include #include -#include #include #pragma pack(push, _CRT_PACKING) @@ -604,45 +603,9 @@ namespace chrono { using time_point = chrono::time_point; static constexpr bool is_steady = true; -#pragma warning(push) -#pragma warning(disable : 28112) // A variable which is accessed via an Interlocked function must - // always be accessed via an Interlocked function. _NODISCARD static time_point now() noexcept { // get current time -#if (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)) && !defined(_M_CEE_PURE) - // Implement atomics avoiding header dependency - static volatile long long _Cached_freq = LLONG_MAX; - static volatile long long _Cached_ctr_base = LLONG_MAX; - static volatile long long _Cached_result_base = LLONG_MAX; - - const long long _Freq_from_cache = _Atomic_load_ll_relaxed(&_Cached_freq); - const long long _Ctr_base = _Atomic_load_ll_relaxed(&_Cached_ctr_base); - const long long _Result_base = _Atomic_load_ll_relaxed(&_Cached_result_base); - if (_Freq_from_cache != LLONG_MAX && _Ctr_base != LLONG_MAX && _Result_base != LLONG_MAX) { - // Fast path - const long long _Ctr = _Query_perf_counter(); - return time_point(duration(_Result_base + (_Ctr - _Ctr_base) * period::den / _Freq_from_cache)); - } - // Calculate with two divisions to prevent overflow - const long long _Freq = _Query_perf_frequency(); - const long long _Ctr = _Query_perf_counter(); - const long long _Result = _Scale_large_counter(_Ctr, _Freq); - if (_InterlockedCompareExchange64(&_Cached_freq, _Freq, LLONG_MAX) == LLONG_MAX) { - // This is the first result, save current result as base for fast path - _InterlockedCompareExchange64(&_Cached_ctr_base, _Ctr, LLONG_MAX); - _InterlockedCompareExchange64(&_Cached_result_base, _Result, LLONG_MAX); - } - // if _Result is not saved as first, it is still compatible with fast result - return time_point(duration(_Result)); -#else // ^^^ known hardware && !defined(_M_CEE_PURE) / unknown hardware || defined(_M_CEE_PURE) vvv const long long _Freq = _Query_perf_frequency(); // doesn't change after system boot const long long _Ctr = _Query_perf_counter(); - return time_point(duration(_Scale_large_counter(_Ctr, _Freq))); -#endif // (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)) && !defined(_M_CEE_PURE) - } -#pragma warning(pop) - - private: - _NODISCARD static long long _Scale_large_counter(const long long _Ctr, const long long _Freq) noexcept { static_assert(period::num == 1, "This assumes period::num == 1."); // Instead of just having "(_Ctr * period::den) / _Freq", // the algorithm below prevents overflow when _Ctr is sufficiently large. @@ -651,7 +614,7 @@ namespace chrono { // but the initial value of _Ctr could be large. const long long _Whole = (_Ctr / _Freq) * period::den; const long long _Part = (_Ctr % _Freq) * period::den / _Freq; - return _Whole + _Part; + return time_point(duration(_Whole + _Part)); } }; diff --git a/stl/inc/xatomic.h b/stl/inc/xatomic.h index 648ff2b5594..8498f18c1a7 100644 --- a/stl/inc/xatomic.h +++ b/stl/inc/xatomic.h @@ -101,18 +101,6 @@ _NODISCARD const volatile _Integral* _Atomic_address_as(const _Ty& _Source) noex return &reinterpret_cast(_Source); } -// FUNCTION TEMPLATE _Atomic_load_ll_relaxed -#if (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)) && !defined(_M_CEE_PURE) -_NODISCARD inline long long _Atomic_load_ll_relaxed(volatile long long* _Mem) noexcept { - // Copy from _Atomic_storage<_Ty, 8>::load -#ifdef _M_ARM - return __ldrexd(_Mem); -#else - return __iso_volatile_load64(_Mem); -#endif -} -#endif // (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)) && !defined(_M_CEE_PURE) - _STD_END #pragma pop_macro("new")