Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -67,11 +67,6 @@ elseif(VCLIBS_TARGET_ARCHITECTURE STREQUAL "x64")
set(VCLIBS_I386_OR_AMD64 "amd64")
set(VCLIBS_X86_OR_X64 "x64")
add_compile_definitions(_AMD64_ _VCRT_WIN32_WINNT=0x0501 _STL_WIN32_WINNT=0x0501)
elseif(VCLIBS_TARGET_ARCHITECTURE MATCHES "^(arm|armv7)$")
set(VCLIBS_TARGET_ARCHITECTURE "arm")
set(VCLIBS_I386_OR_AMD64 "arm")
set(VCLIBS_X86_OR_X64 "arm")
add_compile_definitions(_ARM_ _VCRT_WIN32_WINNT=0x0602 _STL_WIN32_WINNT=0x0602)
elseif(VCLIBS_TARGET_ARCHITECTURE STREQUAL "arm64")
set(VCLIBS_TARGET_ARCHITECTURE "arm64")
set(VCLIBS_I386_OR_AMD64 "arm64")
Expand Down
25 changes: 0 additions & 25 deletions CMakePresets.json
Original file line number Diff line number Diff line change
Expand Up @@ -46,26 +46,6 @@
]
}
},
{
"name": "ARM",
"inherits": "base",
"description": "ARM Ninja Config",
"architecture": {
"strategy": "external",
"value": "ARM"
},
"condition": {
"type": "inList",
"string": "$env{Platform}",
"list": [
"arm",
""
]
},
"cacheVariables": {
"TESTS_BUILD_ONLY": true
}
},
{
"name": "ARM64",
"inherits": "base",
Expand Down Expand Up @@ -119,11 +99,6 @@
"configurePreset": "x64",
"description": "Build x64 STL"
},
{
"name": "ARM",
"configurePreset": "ARM",
"description": "Build ARM STL"
},
{
"name": "ARM64",
"configurePreset": "ARM64",
Expand Down
14 changes: 1 addition & 13 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ soon as possible.)
and fully ported libcxx to run under [lit][] using the various configurations/compilers we test internally.

* Continuous Integration: **In progress.** We've set up Azure Pipelines to validate changes to the repository.
Currently, it builds the STL (native desktop for x86, x64, ARM, and ARM64). Also, it strictly verifies that all of our
Currently, it builds the STL (native desktop for x86, x64, and ARM64). Also, it strictly verifies that all of our
files have been formatted with [clang-format][] and follow our other whitespace conventions.

* Contribution Guidelines: **Coming soon.** Working on the STL's code involves following many rules. We have codebase
Expand Down Expand Up @@ -145,8 +145,6 @@ Just try to follow these rules, so we can spend more time fixing bugs and implem
* Select "Windows 11 SDK (10.0.26100.3916)" in the VS Installer.
* Select "MSVC v143 - VS 2022 C++ ARM64/ARM64EC build tools (Latest)" in the VS Installer
if you would like to build the ARM64/ARM64EC target.
* Select "MSVC v143 - VS 2022 C++ ARM build tools (Latest)" in the VS Installer
if you would like to build the ARM target.
* We recommend selecting "C++ CMake tools for Windows" in the VS Installer.
This will ensure that you're using supported versions of CMake and Ninja.
* Otherwise, install [CMake][] 3.31.0 or later, and [Ninja][] 1.12.1 or later.
Expand All @@ -164,8 +162,6 @@ Just try to follow these rules, so we can spend more time fixing bugs and implem
* Select "Windows 11 SDK (10.0.26100.3916)" in the VS Installer.
* Select "MSVC v143 - VS 2022 C++ ARM64/ARM64EC build tools (Latest)" in the VS Installer
if you would like to build the ARM64/ARM64EC target.
* Select "MSVC v143 - VS 2022 C++ ARM build tools (Latest)" in the VS Installer
if you would like to build the ARM target.
* We recommend selecting "C++ CMake tools for Windows" in the VS Installer.
This will ensure that you're using supported versions of CMake and Ninja.
* Otherwise, install [CMake][] 3.31.0 or later, and [Ninja][] 1.12.1 or later.
Expand All @@ -188,14 +184,6 @@ To build the x64 target (recommended):
3. `cmake --preset x64`
4. `cmake --build --preset x64`

To build the ARM target:

1. `"C:\Program Files\Microsoft Visual Studio\2022\Preview\VC\Auxiliary\Build\vcvarsall.bat" x64_arm`
* If you installed VS to a non-default location, change this path accordingly.
2. Change directories to the previously cloned `STL` directory.
3. `cmake --preset ARM`
4. `cmake --build --preset ARM`

To build the ARM64 target:

1. `"C:\Program Files\Microsoft Visual Studio\2022\Preview\VC\Auxiliary\Build\vcvarsall.bat" x64_arm64`
Expand Down
2 changes: 1 addition & 1 deletion azure-devops/asan-pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,4 @@ stages:
asanBuild: true
testTargets: STL-ASan-CI

# no coverage for ARM and ARM64
# no coverage for ARM64
1 change: 0 additions & 1 deletion azure-devops/provision-image.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ $VisualStudioWorkloads = @(
'Microsoft.VisualStudio.Component.VC.CMake.Project',
'Microsoft.VisualStudio.Component.VC.CoreIde',
'Microsoft.VisualStudio.Component.VC.Llvm.Clang',
'Microsoft.VisualStudio.Component.VC.Tools.ARM',
'Microsoft.VisualStudio.Component.VC.Tools.ARM64',
'Microsoft.VisualStudio.Component.VC.Tools.ARM64EC',
'Microsoft.VisualStudio.Component.VC.Tools.x86.x64',
Expand Down
33 changes: 1 addition & 32 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception

# Build STL targeting x86, x64, arm, arm64, arm64ec
# Build STL targeting x86, x64, arm64, arm64ec

variables:
- template: azure-devops/config.yml
Expand Down Expand Up @@ -52,22 +52,6 @@ stages:
numShards: 1
skipTesting: true

# - stage: Early_Build_ARM
# dependsOn: []
# displayName: 'Early Build ARM'
# pool:
# name: ${{ variables.poolName }}
# demands: ${{ variables.poolDemands }}
# jobs:
# - template: azure-devops/build-and-test.yml
# parameters:
# hostArch: x64
# targetArch: arm
# targetPlatform: arm
# analyzeBuild: true
# numShards: 1
# skipTesting: true

- stage: Early_Build_ARM64
dependsOn: []
displayName: 'Early Build ARM64'
Expand Down Expand Up @@ -107,7 +91,6 @@ stages:
- Code_Format
- Early_Build_x64
- Early_Build_x86
# - Early_Build_ARM
- Early_Build_ARM64
- Early_Build_ARM64EC
displayName: 'Build and Test x64'
Expand All @@ -134,20 +117,6 @@ stages:
targetArch: x86
targetPlatform: x86

# - stage: Build_And_Test_ARM
# dependsOn: Build_And_Test_x64
# displayName: 'Build and Test ARM'
# pool:
# name: ${{ variables.poolName }}
# demands: ${{ variables.poolDemands }}
# jobs:
# - template: azure-devops/build-and-test.yml
# parameters:
# hostArch: x64
# targetArch: arm
# targetPlatform: arm
# testsBuildOnly: true

- stage: Build_And_Test_ARM64
dependsOn: Build_And_Test_x64
displayName: 'Build and Test ARM64'
Expand Down
2 changes: 0 additions & 2 deletions benchmarks/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@ if(DEFINED STL_BINARY_DIR)
set(VCLIBS_I386_OR_AMD64 "i386")
elseif(VCLIBS_TARGET_ARCHITECTURE STREQUAL "x64")
set(VCLIBS_I386_OR_AMD64 "amd64")
elseif(VCLIBS_TARGET_ARCHITECTURE MATCHES "^(arm|armv7)$")
set(VCLIBS_I386_OR_AMD64 "arm")
elseif(VCLIBS_TARGET_ARCHITECTURE STREQUAL "arm64")
set(VCLIBS_I386_OR_AMD64 "arm64")
elseif(VCLIBS_TARGET_ARCHITECTURE STREQUAL "arm64ec")
Expand Down
10 changes: 5 additions & 5 deletions stl/inc/__msvc_bit_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,9 @@ _NODISCARD int _Checked_x86_x64_countl_zero(const _Ty _Val) noexcept {
}
#endif // (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || (defined(_M_X64) && !defined(_M_ARM64EC))

#if defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#if defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
template <class _Ty>
_NODISCARD int _Checked_arm_arm64_countl_zero(const _Ty _Val) noexcept {
_NODISCARD int _Checked_arm64_countl_zero(const _Ty _Val) noexcept {
constexpr int _Digits = _Unsigned_integer_digits<_Ty>;
if (_Val == 0) {
return _Digits;
Expand All @@ -140,7 +140,7 @@ _NODISCARD int _Checked_arm_arm64_countl_zero(const _Ty _Val) noexcept {
return static_cast<int>(_CountLeadingZeros64(_Val));
}
}
#endif // defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#endif // defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#endif // _HAS_COUNTL_ZERO_INTRINSICS

// Implementation of countr_zero without using specialized CPU instructions.
Expand All @@ -157,14 +157,14 @@ _NODISCARD constexpr int _Countr_zero_fallback(const _Ty _Val) noexcept {
template <class _Ty>
_NODISCARD constexpr int _Popcount_fallback(_Ty _Val) noexcept {
constexpr int _Digits = _Unsigned_integer_digits<_Ty>;
#if (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || defined(_M_ARM)
#if defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
if constexpr (_Digits == 64) {
// 64-bit bit operations on architectures without 64-bit registers are less efficient,
// hence we split the value so that it fits in 32-bit registers
return _Popcount_fallback(static_cast<unsigned long>(_Val))
+ _Popcount_fallback(static_cast<unsigned long>(_Val >> 32));
} else
#endif // (defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)) || defined(_M_ARM)
#endif // defined(_M_IX86) && !defined(_M_HYBRID_X86_ARM64)
{
// we static_cast these bit patterns in order to truncate them to the correct size
_Val = static_cast<_Ty>(_Val - ((_Val >> 1) & static_cast<_Ty>(0x5555'5555'5555'5555ull)));
Expand Down
2 changes: 1 addition & 1 deletion stl/inc/__msvc_chrono.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -669,7 +669,7 @@ namespace chrono {
constexpr long long _Multiplier = period::den / _TenMHz;
return time_point(duration(_Ctr * _Multiplier));
} else if (_Freq == _TwentyFourMHz) {
// 24 MHz is a common frequency on ARM/ARM64, including cases where it emulates x86/x64.
// 24 MHz is a common frequency on ARM64, including cases where it emulates x86/x64.
const long long _Whole = (_Ctr / _TwentyFourMHz) * period::den;
const long long _Part = (_Ctr % _TwentyFourMHz) * period::den / _TwentyFourMHz;
return time_point(duration(_Whole + _Part));
Expand Down
12 changes: 4 additions & 8 deletions stl/inc/__msvc_int128.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,21 +56,17 @@ _NODISCARD constexpr int _Countl_zero_internal(const _Ty _Val) noexcept {
if (!_Is_constant_evaluated()) {
return _Checked_x86_x64_countl_zero(_Val);
}
#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#elif defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
if (!_Is_constant_evaluated()) {
return _Checked_arm_arm64_countl_zero(_Val);
return _Checked_arm64_countl_zero(_Val);
}
#endif // defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#endif // defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#endif // _HAS_COUNTL_ZERO_INTRINSICS

return _Countl_zero_fallback(_Val);
}

struct
#ifndef _M_ARM
alignas(16)
#endif
_Base128 {
struct alignas(16) _Base128 {
uint64_t _Word[2];

constexpr void _Left_shift(const unsigned char _Count) noexcept {
Expand Down
4 changes: 2 additions & 2 deletions stl/inc/__msvc_sanitizer_annotate_container.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ _STL_DISABLE_CLANG_WARNINGS

#if !defined(_DISABLE_STL_ANNOTATION) && !defined(_ENABLE_STL_ANNOTATION_ON_UNSUPPORTED_PLATFORMS)

#if defined(_M_ARM64EC) || defined(_M_ARM64) || defined(_M_ARM) || defined(_M_CEE_PURE)
#if defined(_M_ARM64EC) || defined(_M_ARM64) || defined(_M_CEE_PURE)
#define _DISABLE_STL_ANNOTATION
#endif // ^^^ unsupported platform ^^^

Expand Down Expand Up @@ -157,7 +157,7 @@ void __cdecl __sanitizer_annotate_contiguous_container(
"/alternatename:___sanitizer_annotate_contiguous_container=___sanitizer_annotate_contiguous_container_default")
#pragma comment(linker, "/alternatename:__Asan_vector_should_annotate=__Asan_vector_should_annotate_default")
#pragma comment(linker, "/alternatename:__Asan_string_should_annotate=__Asan_string_should_annotate_default")
#elif defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)
#elif defined(_M_X64) || defined(_M_ARM64)
#pragma comment(linker, \
"/alternatename:__sanitizer_annotate_contiguous_container=__sanitizer_annotate_contiguous_container_default")
#pragma comment(linker, "/alternatename:_Asan_vector_should_annotate=_Asan_vector_should_annotate_default")
Expand Down
35 changes: 9 additions & 26 deletions stl/inc/atomic
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ extern "C" inline void _Check_memory_order(const unsigned int _Order) noexcept {
#define _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _Intrinsic, ...) \
_Check_memory_order(_Order); \
_Result = _Intrinsic(__VA_ARGS__)
#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#elif defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#define _ATOMIC_CHOOSE_INTRINSIC(_Order, _Result, _Intrinsic, ...) \
switch (_Order) { \
case _Atomic_memory_order_relaxed: \
Expand Down Expand Up @@ -150,11 +150,6 @@ extern "C" inline void _Check_memory_order(const unsigned int _Order) noexcept {
_Compiler_or_memory_barrier(); \
}

#define _ATOMIC_STORE_SEQ_CST_ARM(_Width, _Ptr, _Desired) \
_Memory_barrier(); \
__iso_volatile_store##_Width((_Ptr), (_Desired)); \
_Memory_barrier();

#define _ATOMIC_STORE_SEQ_CST_ARM64(_Width, _Ptr, _Desired) \
__STORE_RELEASE(_Width, _Ptr, _Desired); \
_Memory_barrier();
Expand All @@ -168,12 +163,7 @@ extern "C" inline void _Check_memory_order(const unsigned int _Order) noexcept {
__iso_volatile_store64((_Ptr), (_Desired)); \
_Atomic_thread_fence(_Atomic_memory_order_seq_cst);

#if defined(_M_ARM)
#define _ATOMIC_STORE_SEQ_CST(_Width, _Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM(_Width, (_Ptr), (_Desired))
#define _ATOMIC_STORE_32_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM(32, (_Ptr), (_Desired))
#define _ATOMIC_STORE_64_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM(64, (_Ptr), (_Desired))
#elif defined(_M_ARM64) || defined(_M_ARM64EC) \
|| defined(_M_HYBRID_X86_ARM64) // ^^^ ARM32 / ARM64/ARM64EC/HYBRID_X86_ARM64 vvv
#if defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#define _ATOMIC_STORE_SEQ_CST(_Width, _Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM64(_Width, (_Ptr), (_Desired))
#define _ATOMIC_STORE_32_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM64(32, (_Ptr), (_Desired))
#define _ATOMIC_STORE_64_SEQ_CST(_Ptr, _Desired) _ATOMIC_STORE_SEQ_CST_ARM64(64, (_Ptr), (_Desired))
Expand Down Expand Up @@ -209,13 +199,13 @@ extern "C" inline void _Atomic_thread_fence(const unsigned int _Order) noexcept
(void) _InterlockedIncrement(&_Guard);
_Compiler_barrier();
}
#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#elif defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
if (_Order == _Atomic_memory_order_acquire || _Order == _Atomic_memory_order_consume) {
_Memory_load_acquire_barrier();
} else {
_Memory_barrier();
}
#else // ^^^ ARM32/ARM64/ARM64EC/HYBRID_X86_ARM64 / unsupported hardware vvv
#else // ^^^ ARM64/ARM64EC/HYBRID_X86_ARM64 / unsupported hardware vvv
#error Unsupported hardware
#endif // ^^^ unsupported hardware ^^^
}
Expand Down Expand Up @@ -476,13 +466,13 @@ inline void _Atomic_lock_acquire(long& _Spinlock) noexcept {
_Current_backoff = _Current_backoff < _Max_backoff ? _Current_backoff << 1 : _Max_backoff;
}
}
#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#elif defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
while (_InterlockedExchange_acq(&_Spinlock, 1) != 0) {
while (__iso_volatile_load32(&reinterpret_cast<int&>(_Spinlock)) != 0) {
__yield();
}
}
#else // ^^^ defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64) ^^^
#else // ^^^ defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64) ^^^
#error Unsupported hardware
#endif
}
Expand Down Expand Up @@ -1035,13 +1025,7 @@ struct _Atomic_storage<_Ty, 8> { // lock-free using 8-byte intrinsics
#if _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1
_ATOMIC_LOAD_ARM64(_As_bytes, 64, _Mem, _Order)
#else // ^^^ _STD_ATOMIC_USE_ARM64_LDAR_STLR == 1 / _STD_ATOMIC_USE_ARM64_LDAR_STLR != 1 vvv

#ifdef _M_ARM
_As_bytes = __ldrexd(_Mem);
#else
_As_bytes = __iso_volatile_load64(_Mem);
#endif

_ATOMIC_POST_LOAD_BARRIER_AS_NEEDED(_Order)
#endif // ^^^ _STD_ATOMIC_USE_ARM64_LDAR_STLR != 1 ^^^
return reinterpret_cast<_TVal&>(_As_bytes);
Expand Down Expand Up @@ -1949,7 +1933,7 @@ struct _Atomic_pointer : _Atomic_storage<_Ty> {
const ptrdiff_t _Shift_bytes =
static_cast<ptrdiff_t>(static_cast<size_t>(_Diff) * sizeof(remove_pointer_t<_Ty>));
ptrdiff_t _Result;
#if defined(_M_IX86) || defined(_M_ARM)
#if defined(_M_IX86)
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedExchangeAdd,
_STD _Atomic_address_as<long>(this->_Storage), _Shift_bytes);
#else // ^^^ 32 bits / 64 bits vvv
Expand Down Expand Up @@ -2047,7 +2031,7 @@ struct _Atomic_pointer<_Ty&> : _Atomic_storage<_Ty&> {
const ptrdiff_t _Shift_bytes =
static_cast<ptrdiff_t>(static_cast<size_t>(_Diff) * sizeof(remove_pointer_t<_Ty>));
ptrdiff_t _Result;
#if defined(_M_IX86) || defined(_M_ARM)
#if defined(_M_IX86)
_ATOMIC_CHOOSE_INTRINSIC(static_cast<unsigned int>(_Order), _Result, _InterlockedExchangeAdd,
_STD _Atomic_address_as<long>(this->_Storage), _Shift_bytes);
#else // ^^^ 32 bits / 64 bits vvv
Expand Down Expand Up @@ -2236,7 +2220,7 @@ public:
}

bool compare_exchange_weak(_Ty& _Expected, const _Ty _Desired) volatile noexcept {
// we have no weak CAS intrinsics, even on ARM32/ARM64, so fall back to strong
// we have no weak CAS intrinsics, even on ARM64, so fall back to strong
static_assert(_Deprecate_non_lock_free_volatile<_Ty>, "Never fails");
return this->compare_exchange_strong(_Expected, _Desired);
}
Expand Down Expand Up @@ -3019,7 +3003,6 @@ _STD_END

#undef _ATOMIC_CHOOSE_INTRINSIC
#undef _ATOMIC_POST_LOAD_BARRIER_AS_NEEDED
#undef _ATOMIC_STORE_SEQ_CST_ARM
#undef _ATOMIC_STORE_SEQ_CST_X86_X64
#undef _ATOMIC_STORE_32_SEQ_CST_X86_X64
#undef _ATOMIC_STORE_SEQ_CST
Expand Down
6 changes: 3 additions & 3 deletions stl/inc/bit
Original file line number Diff line number Diff line change
Expand Up @@ -203,11 +203,11 @@ _NODISCARD constexpr int countl_zero(const _Ty _Val) noexcept {
if (!_STD is_constant_evaluated()) {
return _Checked_x86_x64_countl_zero(_Val);
}
#elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#elif defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
if (!_STD is_constant_evaluated()) {
return _Checked_arm_arm64_countl_zero(_Val);
return _Checked_arm64_countl_zero(_Val);
}
#endif // defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#endif // defined(_M_ARM64) || defined(_M_ARM64EC) || defined(_M_HYBRID_X86_ARM64)
#endif // _HAS_COUNTL_ZERO_INTRINSICS

return _Countl_zero_fallback(_Val);
Expand Down
Loading