diff --git a/.clang-format b/.clang-format index d0fb93541e5..41bfa083dba 100644 --- a/.clang-format +++ b/.clang-format @@ -186,9 +186,22 @@ SpaceAfterCStyleCast: true # SpacesInParentheses: false # SpacesInSquareBrackets: false # Standard: Cpp11 + +# NOTE: _STD_BEGIN, _STD_END, etc. aren't macros for complete statements, but telling clang-format that they are +# produces the behavior that we want (with no block indentation). # StatementMacros: # - Q_UNUSED # - QT_REQUIRE_VERSION +StatementMacros: + - _STD_BEGIN + - _STD_END + - _STDEXT_BEGIN + - _STDEXT_END + - _EXTERN_C + - _END_EXTERN_C + - _EXTERN_C_UNLESS_PURE + - _END_EXTERN_C_UNLESS_PURE + # TabWidth: 8 # UseTab: Never ... diff --git a/stl/CMakeLists.txt b/stl/CMakeLists.txt index ef34a76356a..a42c7200a12 100644 --- a/stl/CMakeLists.txt +++ b/stl/CMakeLists.txt @@ -237,6 +237,7 @@ endforeach() # Objs that exist in both libcpmt[d][01].lib and msvcprt[d].lib. set(IMPLIB_SOURCES ${CMAKE_CURRENT_LIST_DIR}/src/filesystem.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/filesystem_space.cpp ${CMAKE_CURRENT_LIST_DIR}/src/locale0_implib.cpp ${CMAKE_CURRENT_LIST_DIR}/src/nothrow.cpp ${CMAKE_CURRENT_LIST_DIR}/src/parallel_algorithms.cpp diff --git a/stl/inc/execution b/stl/inc/execution index 9d169742c60..15c210a8871 100644 --- a/stl/inc/execution +++ b/stl/inc/execution @@ -103,22 +103,19 @@ namespace execution { } // namespace execution template <> -struct is_execution_policy : true_type { // sequenced_policy is an execution policy -}; +struct is_execution_policy : true_type {}; // sequenced_policy is an execution policy template <> -struct is_execution_policy : true_type { // parallel_policy is an execution policy -}; +struct is_execution_policy : true_type {}; // parallel_policy is an execution policy template <> -struct is_execution_policy - : true_type { // parallel_unsequenced_policy is an execution policy -}; +struct is_execution_policy : true_type { +}; // parallel_unsequenced_policy is an execution policy // STRUCT _Parallelism_resources_exhausted struct _Parallelism_resources_exhausted : exception { - _NODISCARD virtual const char* __CLR_OR_THIS_CALL what() const - noexcept override { // return pointer to message string + _NODISCARD virtual const char* __CLR_OR_THIS_CALL what() const noexcept override { + // return pointer to message string return "Insufficient resources were available to use additional parallelism."; } @@ -208,8 +205,8 @@ void _Run_available_chunked_work(_Work& _Operation) { // FUNCTION TEMPLATE _Run_chunked_parallel_work template -void _Run_chunked_parallel_work( - const size_t _Hw_threads, _Work& _Operation) { // process chunks of _Operation on the thread pool +void _Run_chunked_parallel_work(const size_t _Hw_threads, _Work& _Operation) { + // process chunks of _Operation on the thread pool const _Work_ptr _Work_op{_Operation}; // setup complete, hereafter nothrow or terminate _Work_op._Submit_for_chunks(_Hw_threads, _Operation._Team._Chunks); @@ -217,13 +214,11 @@ void _Run_chunked_parallel_work( } // CHUNK CALCULATION FUNCTIONS -// The parallel algorithms library below assumes that distance(first, last) fits into a -// size_t; forward iterators must refer to objects in memory and therefore must meet -// this requirement. +// The parallel algorithms library below assumes that distance(first, last) fits into a size_t; +// forward iterators must refer to objects in memory and therefore must meet this requirement. // -// Unlike the serial algorithms library, which can stay in the difference_type domain, -// here we need to talk with vector (which speaks size_t), and with Windows, which wants -// to speak unsigned int. +// Unlike the serial algorithms library, which can stay in the difference_type domain, here we need +// to talk with vector (which speaks size_t), and with Windows, which wants to speak unsigned int. // // This assumption should be localized to the chunk calculation functions; the rest of // the library assumes that chunk numbers can be static_cast into the difference_type domain. @@ -415,8 +410,8 @@ struct _Parallel_choose_min_chunk { return _Selected_chunk.load(memory_order_relaxed) != _Still_active; } - void _Imbue( - const size_t _Chunk, const _Ty _Local_result) { // atomically sets the result to the lowest chunk's value + void _Imbue(const size_t _Chunk, const _Ty _Local_result) { + // atomically sets the result to the lowest chunk's value size_t _Expected = _Still_active; while (!_Selected_chunk.compare_exchange_weak(_Expected, _Chunk)) { // note: _Still_active is the maximum possible value, so it gets ignored implicitly @@ -450,8 +445,8 @@ struct _Parallel_choose_max_chunk { return _Selected_chunk.load(memory_order_relaxed) != _Still_active; } - void _Imbue( - const size_t _Chunk, const _Ty _Local_result) { // atomically sets the result to the highest chunk's value + void _Imbue(const size_t _Chunk, const _Ty _Local_result) { + // atomically sets the result to the highest chunk's value size_t _Expected = _Still_active; while (!_Selected_chunk.compare_exchange_weak(_Expected, _Chunk)) { // wrap _Still_active down to 0 so that only 1 branch is necessary: @@ -482,8 +477,8 @@ struct alignas(_Ty) alignas(size_t) alignas(_Atomic_counter_t) _Circular_buffer } } - static _Circular_buffer* _Allocate_circular_buffer( - const size_t _New_log_size) { // allocate a circular buffer with space for 2^_New_log_size elements + static _Circular_buffer* _Allocate_circular_buffer(const size_t _New_log_size) { + // allocate a circular buffer with space for 2^_New_log_size elements if (_New_log_size >= 32) { _Throw_parallelism_resources_exhausted(); } @@ -540,9 +535,9 @@ struct alignas(_Ty) alignas(size_t) alignas(_Atomic_counter_t) _Circular_buffer #pragma warning(disable : 4324) // structure was padded due to alignment specifier template class alignas(hardware_destructive_interference_size) _Work_stealing_deque { - // thread-local work-stealing deque, which allows efficient access from a single owner thread at the "bottom" of the - // queue, and any thread access to the "top" of the queue. Originally described in the paper "Dynamic Circular - // Work-Stealing Deque" by David Chase and Yossi Lev + // thread-local work-stealing deque, which allows efficient access from a single owner thread at the "bottom" + // of the queue, and any thread access to the "top" of the queue. Originally described in the paper + // "Dynamic Circular Work-Stealing Deque" by David Chase and Yossi Lev public: _Work_stealing_deque() = default; _Work_stealing_deque(const _Work_stealing_deque&) = delete; @@ -557,8 +552,8 @@ public: // may be accessed by owning thread only const auto _Local_b = _Bottom.load(); if (_Local_b == SIZE_MAX) { - // we assume that any input range won't be divided into more than SIZE_MAX subproblems; treat overflow of - // that kind as OOM + // we assume that any input range won't be divided into more than SIZE_MAX subproblems; + // treat overflow of that kind as OOM _Throw_parallelism_resources_exhausted(); } @@ -600,10 +595,9 @@ public: _Val = _Stealing_segment->_Subscript(_Local_t); // speculative read/write data race _Stealing_segment->_Release(); - // The above is technically prohibited by the C++ memory model, but - // happens to be well defined on all hardware this implementation - // targets. Hardware with trap representations or similar must not - // use this implementation. + // The above is technically prohibited by the C++ memory model, but happens + // to be well defined on all hardware this implementation targets. + // Hardware with trap representations or similar must not use this implementation. _Desired_t = _Local_t + 1U; } while (!_Top.compare_exchange_strong(_Local_t, _Desired_t)); // if a data race occurred, try again @@ -634,9 +628,8 @@ public: return true; } - // We're trying to read the last element that another thread may be - // trying to steal; see who gets to keep the element through _Top - // (effectively, steal from ourselves) + // We're trying to read the last element that another thread may be trying to steal; + // see who gets to keep the element through _Top (effectively, steal from ourselves) const auto _Desired_top = _Local_t + 1U; if (_Top.compare_exchange_strong(_Local_t, _Desired_top)) { _Bottom.store(_Desired_top); @@ -791,8 +784,8 @@ struct _Static_partition_team { // common data for all static partitioned ops auto _This_chunk_size = _Chunk_size; auto _This_chunk_start_at = static_cast<_Diff>(_This_chunk_diff * _This_chunk_size); if (_This_chunk_diff < _Unchunked_items) { - // chunks at index lower than _Unchunked_items get an extra item, and need to shift forward by all their - // predecessors' extra items + // chunks at index lower than _Unchunked_items get an extra item, + // and need to shift forward by all their predecessors' extra items _This_chunk_start_at += _This_chunk_diff; ++_This_chunk_size; } else { // chunks without an extra item need to account for all the extra items @@ -808,8 +801,8 @@ struct _Static_partition_team { // common data for all static partitioned ops } _Static_partition_key<_Diff> _Get_next_key() { - // retrieves the next static partition key to process, if it exists; otherwise, retrieves an invalid partition - // key + // retrieves the next static partition key to process, if it exists; + // otherwise, retrieves an invalid partition key const auto _This_chunk = _Consumed_chunks++; if (_This_chunk < _Chunks) { return _Get_chunk_key(_This_chunk); @@ -837,30 +830,30 @@ struct _Static_partition_range<_RanIt, _Diff, true> { _URanIt _Start_at; using _Chunk_type = _Iterator_range<_URanIt>; - _RanIt _Populate(const _Static_partition_team<_Diff>& _Team, - _RanIt _First) { // statically partition a random-access iterator range and return next(_First, _Team._Count) - // pre: _Populate hasn't yet been called on this instance + _RanIt _Populate(const _Static_partition_team<_Diff>& _Team, _RanIt _First) { + // statically partition a random-access iterator range and return next(_First, _Team._Count) + // pre: _Populate hasn't yet been called on this instance auto _Result = _First + static_cast<_Target_diff>(_Team._Count); // does verification _Start_at = _Get_unwrapped(_First); return _Result; } - bool _Populate(const _Static_partition_team<_Diff>& _Team, _RanIt _First, - _RanIt _Last) { // statically partition a random-access iterator range and check if the range ends at _Last - // pre: _Populate hasn't yet been called on this instance + bool _Populate(const _Static_partition_team<_Diff>& _Team, _RanIt _First, _RanIt _Last) { + // statically partition a random-access iterator range and check if the range ends at _Last + // pre: _Populate hasn't yet been called on this instance _Adl_verify_range(_First, _Last); _Start_at = _Get_unwrapped(_First); return _Team._Count == _Last - _First; } - _URanIt _Get_first(size_t /* _Chunk_number */, - const _Diff _Offset) { // get the first iterator for _Chunk _Chunk_number (which is at offset _Offset) + _URanIt _Get_first(size_t /* _Chunk_number */, const _Diff _Offset) { + // get the first iterator for _Chunk _Chunk_number (which is at offset _Offset) return _Start_at + static_cast<_Target_diff>(_Offset); } - _Chunk_type _Get_chunk(const _Static_partition_key<_Diff> _Key) - const { // get a static partition chunk from a random-access range - // pre: _Key was generated by the _Static_partition_team instance passed to a previous call to _Populate + _Chunk_type _Get_chunk(const _Static_partition_key<_Diff> _Key) const { + // get a static partition chunk from a random-access range + // pre: _Key was generated by the _Static_partition_team instance passed to a previous call to _Populate const auto _First = _Start_at + static_cast<_Target_diff>(_Key._Start_at); return {_First, _First + static_cast<_Target_diff>(_Key._Size)}; } @@ -873,9 +866,9 @@ struct _Static_partition_range<_FwdIt, _Diff, false> { _Parallel_vector<_UFwdIt> _Division_points; using _Chunk_type = _Iterator_range<_UFwdIt>; - _FwdIt _Populate(const _Static_partition_team<_Diff>& _Team, - _FwdIt _First) { // statically partition a forward iterator range and return next(_First, _Team._Count) - // pre: _Populate hasn't yet been called on this instance + _FwdIt _Populate(const _Static_partition_team<_Diff>& _Team, _FwdIt _First) { + // statically partition a forward iterator range and return next(_First, _Team._Count) + // pre: _Populate hasn't yet been called on this instance const auto _Chunks = _Team._Chunks; _Division_points.resize(_Chunks + 1); // The following potentially narrowing cast is OK because caller has ensured @@ -898,9 +891,9 @@ struct _Static_partition_range<_FwdIt, _Diff, false> { return _First; } - bool _Populate(const _Static_partition_team<_Diff>& _Team, _FwdIt _First, - _FwdIt _Last) { // statically partition a forward iterator range and check if the range ends at _Last - // pre: _Populate hasn't yet been called on this instance + bool _Populate(const _Static_partition_team<_Diff>& _Team, _FwdIt _First, _FwdIt _Last) { + // statically partition a forward iterator range and check if the range ends at _Last + // pre: _Populate hasn't yet been called on this instance const auto _Chunks = _Team._Chunks; _Division_points.resize(_Chunks + 1); const auto _Chunk_size = _Team._Chunk_size; @@ -1007,8 +1000,8 @@ struct _Static_partition_range_backward<_BidIt, _Diff, false> { // FUNCTION TEMPLATE _Distance_any template -_Common_diff_t<_InIt1, _InIt2> _Distance_any(_InIt1 _First1, _InIt1 _Last1, _InIt2 _First2, - _InIt2 _Last2) { // get the distance from 2 ranges which should have identical lengths +_Common_diff_t<_InIt1, _InIt2> _Distance_any(_InIt1 _First1, _InIt1 _Last1, _InIt2 _First2, _InIt2 _Last2) { + // get the distance from 2 ranges which should have identical lengths if constexpr (_Is_random_iter_v<_InIt1>) { (void) _First2; // TRANSITION, VSO-486357 (void) _Last2; // TRANSITION, VSO-486357 @@ -2057,13 +2050,13 @@ _NODISCARD bool equal(_ExPo&&, const _FwdIt1 _First1, const _FwdIt1 _Last1, cons if (_Count >= 2) { _TRY_BEGIN _Static_partitioned_equal2 _Operation{_Hw_threads, _Count, _Pass_fn(_Pred), _UFirst1, _UFirst2}; - if (!_Operation._Basis1._Populate( - _Operation._Team, _UFirst1, _ULast1)) { // left sequence didn't have length _Count + if (!_Operation._Basis1._Populate(_Operation._Team, _UFirst1, _ULast1)) { + // left sequence didn't have length _Count return false; } - if (!_Operation._Basis2._Populate( - _Operation._Team, _UFirst2, _ULast2)) { // right sequence didn't have length _Count + if (!_Operation._Basis2._Populate(_Operation._Team, _UFirst2, _ULast2)) { + // right sequence didn't have length _Count return false; } @@ -2963,8 +2956,8 @@ struct _Static_partitioned_stable_sort3 { _Basis._Get_first(_Base, _Team._Get_chunk_offset(_Base)), _Pred); } - if (!_Visitor._Go_to_parent()) { // temporary bits have been copied back to the input, no parent, so - // we're done + if (!_Visitor._Go_to_parent()) { + // temporary bits have been copied back to the input, no parent, so we're done _Temp_buf._Destroy_all(); return _Cancellation_status::_Canceled; } @@ -3214,8 +3207,8 @@ struct _Static_partitioned_is_partitioned { return _Cancellation_status::_Canceled; } - // after determining the is_partitioned status for this chunk, we need to update the chunk numbers for leftmost - // F and rightmost T + // after determining the is_partitioned status for this chunk, + // we need to update the chunk numbers for leftmost F and rightmost T auto _Old_true = _Rightmost_true.load(); if (_This_chunk_status & _Contains_true) { while (_Target_chunk_number > _Old_true) { @@ -3799,8 +3792,8 @@ struct _Static_partitioned_set_subtraction { auto [_Range1_chunk_first, _Range1_chunk_last] = _Basis._Get_chunk(_Key); const bool _Last_chunk = _Chunk_number == _Team._Chunks - 1; - // Get appropriate range for _Range1. We don't want any spans of equal elements to reach across chunk - // boundaries. + // Get appropriate range for _Range1. + // We don't want any spans of equal elements to reach across chunk boundaries. if (!_Last_chunk) { // Slide _Range1_chunk_last to the left so that there are no copies of *_Range1_chunk_last in _Range1_chunk. // Note that we know that this chunk is not the last, so we can look at the element at _Range1_chunk_last. @@ -3825,8 +3818,8 @@ struct _Static_partitioned_set_subtraction { // Publish results to rest of chunks. if (_Chunk_number == 0) { - // Chunk 0 is special as it has no predecessor; its local and total sums are the same and we can immediately - // put its results in _Dest. + // Chunk 0 is special as it has no predecessor; + // its local and total sums are the same and we can immediately put its results in _Dest. const auto _Num_results = _Set_oper_per_chunk._Update_dest( _Range1_chunk_first, _Range1_chunk_last, _Range2_chunk_first, _Range2_chunk_last, _Dest, _Pred); @@ -3972,8 +3965,8 @@ struct _Set_difference_per_chunk { template _Common_diff_t<_RanIt1, _RanIt2, _RanIt3> _Update_dest( _RanIt1 _First1, const _RanIt1 _Last1, _RanIt2 _First2, _RanIt2 _Last2, _RanIt3 _Dest, _Pr _Pred) { - // Copy elements from [_First1, _Last1), except those in [_First2, _Last2) according to _Pred, to _Dest. Returns - // the number of elements stored. + // Copy elements from [_First1, _Last1), except those in [_First2, _Last2) according to _Pred, to _Dest. + // Returns the number of elements stored. return _STD set_difference(_First1, _Last1, _First2, _Last2, _Dest, _Pred) - _Dest; } @@ -4141,9 +4134,8 @@ _NODISCARD _Ty reduce( _Static_partitioned_reduce2 _Operation{ _Count, _Chunks, _UFirst, _Passed_fn}; { - // we don't use _Run_chunked_parallel_work here because the initial value - // on background threads is synthesized from the input, but on this thread - // the initial value is _Val + // we don't use _Run_chunked_parallel_work here because the initial value on background threads + // is synthesized from the input, but on this thread the initial value is _Val const _Work_ptr _Work{_Operation}; // setup complete, hereafter nothrow or terminate _Work._Submit_for_chunks(_Hw_threads, _Chunks); @@ -4365,8 +4357,7 @@ _NODISCARD _Ty transform_reduce(_ExPo&&, const _FwdIt _First, const _FwdIt _Last } // PARALLEL FUNCTION TEMPLATE exclusive_scan -struct _No_init_tag { // tag to indicate that no initial value is to be used -}; +struct _No_init_tag {}; // tag to indicate that no initial value is to be used template _FwdIt2 _Exclusive_scan_per_chunk(_FwdIt1 _First, const _FwdIt1 _Last, _FwdIt2 _Dest, _BinOp _Reduce_op, _Ty& _Val) { diff --git a/stl/inc/xlocinfo.h b/stl/inc/xlocinfo.h index f6d462c4202..b9756b03dd2 100644 --- a/stl/inc/xlocinfo.h +++ b/stl/inc/xlocinfo.h @@ -136,10 +136,9 @@ _Success_(return > 0) _ACRTIMP size_t __cdecl _Wcsftime( _END_EXTERN_C #ifdef _M_CEE_PURE - [System::Runtime::InteropServices::DllImport(_CRT_MSVCP_CURRENT, EntryPoint = "_GetLocaleForCP", - CallingConvention = System::Runtime::InteropServices::CallingConvention::Cdecl)] extern "C" _locale_t +[System::Runtime::InteropServices::DllImport(_CRT_MSVCP_CURRENT, EntryPoint = "_GetLocaleForCP", + CallingConvention = System::Runtime::InteropServices::CallingConvention::Cdecl)] extern "C" _locale_t _GetLocaleForCP(unsigned int); - #else // _M_CEE_PURE _MRTIMP2 _locale_t __cdecl _GetLocaleForCP(unsigned int); #endif // _M_CEE_PURE diff --git a/stl/msbuild/stl_base/stl.files.settings.targets b/stl/msbuild/stl_base/stl.files.settings.targets index 99a3ac34d58..3349f56ca10 100644 --- a/stl/msbuild/stl_base/stl.files.settings.targets +++ b/stl/msbuild/stl_base/stl.files.settings.targets @@ -168,6 +168,7 @@ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception (controlled by IncludeInLink and IncludeInImportLib). --> "); - - [[nodiscard]] __std_win_error _Fs_space_attempt(wchar_t* const _Temp_buffer, const DWORD _Temp_buffer_characters, - const wchar_t* const _Target, uintmax_t* const _Available, uintmax_t* const _Total_bytes, - uintmax_t* const _Free_bytes) noexcept { - if (GetVolumePathNameW(_Target, _Temp_buffer, _Temp_buffer_characters)) { - if (GetDiskFreeSpaceExW(_Temp_buffer, reinterpret_cast(_Available), - reinterpret_cast(_Total_bytes), reinterpret_cast(_Free_bytes))) { - return __std_win_error::_Success; - } - } - - return __std_win_error{GetLastError()}; - } } // unnamed namespace _EXTERN_C - [[nodiscard]] __std_ulong_and_error __stdcall __std_fs_get_full_path_name( - const wchar_t* _Source, unsigned long _Target_size, wchar_t* _Target) noexcept { // calls GetFullPathNameW +[[nodiscard]] __std_ulong_and_error __stdcall __std_fs_get_full_path_name( + const wchar_t* _Source, unsigned long _Target_size, wchar_t* _Target) noexcept { // calls GetFullPathNameW const auto _Result = GetFullPathNameW(_Source, _Target_size, _Target, nullptr); return {_Result, _Result == 0 ? __std_win_error{GetLastError()} : __std_win_error::_Success}; } @@ -760,43 +744,6 @@ __std_win_error __stdcall __std_fs_get_file_id(__std_fs_file_id* const _Id, cons return __std_win_error::_Success; } -[[nodiscard]] __std_win_error __stdcall __std_fs_space(const wchar_t* const _Target, uintmax_t* const _Available, - uintmax_t* const _Total_bytes, uintmax_t* const _Free_bytes) noexcept { - // get capacity information for the volume on which the file _Target resides - __std_win_error _Last_error; - if (GetFileAttributesW(_Target) == INVALID_FILE_ATTRIBUTES) { - _Last_error = __std_win_error{GetLastError()}; - } else { - { - constexpr DWORD _Static_size = MAX_PATH; - wchar_t _Temp_buf[_Static_size]; - _Last_error = _Fs_space_attempt(_Temp_buf, _Static_size, _Target, _Available, _Total_bytes, _Free_bytes); - if (_Last_error == __std_win_error::_Success) { - return __std_win_error::_Success; - } - } - - if (_Last_error == __std_win_error::_Filename_exceeds_range) { - constexpr DWORD _Dynamic_size = USHRT_MAX + 1; // assuming maximum NT path fits in a UNICODE_STRING - const auto _Temp_buf = _malloc_crt_t(wchar_t, _Dynamic_size); - if (_Temp_buf) { - _Last_error = - _Fs_space_attempt(_Temp_buf.get(), _Dynamic_size, _Target, _Available, _Total_bytes, _Free_bytes); - if (_Last_error == __std_win_error::_Success) { - return __std_win_error::_Success; - } - } else { - _Last_error = __std_win_error::_Not_enough_memory; - } - } - } - - *_Available = ~0ull; - *_Total_bytes = ~0ull; - *_Free_bytes = ~0ull; - return _Last_error; -} - [[nodiscard]] __std_ulong_and_error __stdcall __std_fs_get_temp_path(wchar_t* const _Target) noexcept { // calls GetTempPathW // If getting the path failed, returns 0 size; otherwise, returns the size of the diff --git a/stl/src/filesystem_space.cpp b/stl/src/filesystem_space.cpp new file mode 100644 index 00000000000..48253129e38 --- /dev/null +++ b/stl/src/filesystem_space.cpp @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +// This must be as small as possible, because its contents are +// injected into the msvcprt.lib and msvcprtd.lib import libraries. +// Do not include or define anything else here. +// In particular, basic_string must not be included here. + +// TRANSITION, the code in this file should be moved back to filesystem.cpp +// when a Windows 10 SDK beyond version 1903 is available (see GH-322). + +#include +#include +#include + +#include + +namespace { + static_assert(sizeof(uintmax_t) == sizeof(ULARGE_INTEGER) && alignof(uintmax_t) == alignof(ULARGE_INTEGER), + "Size and alignment must match for reinterpret_cast"); + + [[nodiscard]] __std_win_error _Fs_space_attempt(wchar_t* const _Temp_buffer, const DWORD _Temp_buffer_characters, + const wchar_t* const _Target, uintmax_t* const _Available, uintmax_t* const _Total_bytes, + uintmax_t* const _Free_bytes) noexcept { + if (GetVolumePathNameW(_Target, _Temp_buffer, _Temp_buffer_characters)) { + if (GetDiskFreeSpaceExW(_Temp_buffer, reinterpret_cast(_Available), + reinterpret_cast(_Total_bytes), reinterpret_cast(_Free_bytes))) { + return __std_win_error::_Success; + } + } + + return __std_win_error{GetLastError()}; + } +} // unnamed namespace + +_EXTERN_C +[[nodiscard]] __std_win_error __stdcall __std_fs_space(const wchar_t* const _Target, uintmax_t* const _Available, + uintmax_t* const _Total_bytes, uintmax_t* const _Free_bytes) noexcept { + // get capacity information for the volume on which the file _Target resides + __std_win_error _Last_error; + if (GetFileAttributesW(_Target) == INVALID_FILE_ATTRIBUTES) { + _Last_error = __std_win_error{GetLastError()}; + } else { + { + constexpr DWORD _Static_size = MAX_PATH; + wchar_t _Temp_buf[_Static_size]; + _Last_error = _Fs_space_attempt(_Temp_buf, _Static_size, _Target, _Available, _Total_bytes, _Free_bytes); + if (_Last_error == __std_win_error::_Success) { + return __std_win_error::_Success; + } + } + + if (_Last_error == __std_win_error::_Filename_exceeds_range) { + constexpr DWORD _Dynamic_size = USHRT_MAX + 1; // assuming maximum NT path fits in a UNICODE_STRING + const auto _Temp_buf = _malloc_crt_t(wchar_t, _Dynamic_size); + if (_Temp_buf) { + _Last_error = + _Fs_space_attempt(_Temp_buf.get(), _Dynamic_size, _Target, _Available, _Total_bytes, _Free_bytes); + if (_Last_error == __std_win_error::_Success) { + return __std_win_error::_Success; + } + } else { + _Last_error = __std_win_error::_Not_enough_memory; + } + } + } + + *_Available = ~0ull; + *_Total_bytes = ~0ull; + *_Free_bytes = ~0ull; + return _Last_error; +} +_END_EXTERN_C diff --git a/stl/src/future.cpp b/stl/src/future.cpp index 7400efc01ce..77264b2de59 100644 --- a/stl/src/future.cpp +++ b/stl/src/future.cpp @@ -4,15 +4,14 @@ #include #include #include + _STD_BEGIN - [[noreturn]] _CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL - _Throw_future_error(const error_code& _Code) { // throw an exception +[[noreturn]] _CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL _Throw_future_error(const error_code& _Code) { _THROW(future_error(_Code)); } -[[noreturn]] _CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL _Rethrow_future_exception( - exception_ptr _Ptr) { // rethrow an exception +[[noreturn]] _CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL _Rethrow_future_exception(exception_ptr _Ptr) { _STD rethrow_exception(_Ptr); } diff --git a/stl/src/locale0.cpp b/stl/src/locale0.cpp index cb1bafd5415..8fa4fee5f77 100644 --- a/stl/src/locale0.cpp +++ b/stl/src/locale0.cpp @@ -18,9 +18,7 @@ #pragma init_seg(lib) _STD_BEGIN - - [[noreturn]] _CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL - _Xbad_alloc(); +[[noreturn]] _CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL _Xbad_alloc(); struct _Fac_node { // node for lazy facet recording _Fac_node(_Fac_node* _Nextarg, _Facet_base* _Facptrarg) diff --git a/stl/src/xthrow.cpp b/stl/src/xthrow.cpp index 52eb1722304..a9afc6054d6 100644 --- a/stl/src/xthrow.cpp +++ b/stl/src/xthrow.cpp @@ -9,9 +9,7 @@ #include _STD_BEGIN - - [[noreturn]] _CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL - _Xbad_alloc() { +[[noreturn]] _CRTIMP2_PURE void __CLRCALL_PURE_OR_CDECL _Xbad_alloc() { _THROW(bad_alloc{}); }