diff --git a/Directory.Build.props b/Directory.Build.props
index 6a688d500c4cc..799d054f3b1af 100644
--- a/Directory.Build.props
+++ b/Directory.Build.props
@@ -112,7 +112,7 @@
Properties
- $([System.Runtime.InteropServices.RuntimeInformation]::ProcessArchitecture.ToString().ToLowerInvariant())
+ $([System.Runtime.InteropServices.RuntimeInformation]::ProcessArchitecture.ToString().ToLowerInvariant())
diff --git a/eng/build.sh b/eng/build.sh
index 1179f8efd27a1..5f7a012c9b55b 100755
--- a/eng/build.sh
+++ b/eng/build.sh
@@ -133,8 +133,8 @@ initDistroRid()
local isCrossBuild="$3"
local isPortableBuild="$4"
- # Only pass ROOTFS_DIR if __DoCrossArchBuild is specified.
- if (( isCrossBuild == 1 )); then
+ # Only pass ROOTFS_DIR if __DoCrossArchBuild is specified and the current platform is not OSX that doesn't use rootfs
+ if [[ $isCrossBuild == 1 && "$targetOs" != "OSX" ]]; then
passedRootfsDir=${ROOTFS_DIR}
fi
initDistroRidGlobal ${targetOs} ${buildArch} ${isPortableBuild} ${passedRootfsDir}
@@ -153,6 +153,8 @@ portableBuild=1
source $scriptroot/native/init-os-and-arch.sh
+hostArch=$arch
+
# Check if an action is passed in
declare -a actions=("b" "build" "r" "restore" "rebuild" "testnobuild" "sign" "publish" "clean")
actInt=($(comm -12 <(printf '%s\n' "${actions[@]/#/-}" | sort) <(printf '%s\n' "${@/#--/-}" | sort)))
@@ -436,6 +438,6 @@ initDistroRid $os $arch $crossBuild $portableBuild
# URL-encode space (%20) to avoid quoting issues until the msbuild call in /eng/common/tools.sh.
# In *proj files (XML docs), URL-encoded string are rendered in their decoded form.
cmakeargs="${cmakeargs// /%20}"
-arguments="$arguments /p:TargetArchitecture=$arch"
+arguments="$arguments /p:TargetArchitecture=$arch /p:BuildArchitecture=$hostArch"
arguments="$arguments /p:CMakeArgs=\"$cmakeargs\" $extraargs"
"$scriptroot/common/build.sh" $arguments
diff --git a/eng/native/build-commons.sh b/eng/native/build-commons.sh
index 2e94d68fe692f..303840f1a0620 100755
--- a/eng/native/build-commons.sh
+++ b/eng/native/build-commons.sh
@@ -6,8 +6,8 @@ initTargetDistroRid()
local passedRootfsDir=""
- # Only pass ROOTFS_DIR if cross is specified.
- if [[ "$__CrossBuild" == 1 ]]; then
+ # Only pass ROOTFS_DIR if cross is specified and the target platform is not Darwin that doesn't use rootfs
+ if [[ "$__CrossBuild" == 1 && "$platform" != "Darwin" ]]; then
passedRootfsDir="$ROOTFS_DIR"
fi
@@ -68,15 +68,28 @@ check_prereqs()
build_native()
{
- platformArch="$1"
- cmakeDir="$2"
- tryrunDir="$3"
- intermediatesDir="$4"
- message="$5"
+ targetOS="$1"
+ platformArch="$2"
+ cmakeDir="$3"
+ tryrunDir="$4"
+ intermediatesDir="$5"
+ cmakeArgs="$6"
+ message="$7"
# All set to commence the build
echo "Commencing build of \"$message\" for $__TargetOS.$__BuildArch.$__BuildType in $intermediatesDir"
+ if [[ "$targetOS" == OSX ]]; then
+ if [[ "$platformArch" == x64 ]]; then
+ cmakeArgs="-DCMAKE_OSX_ARCHITECTURES=\"x86_64\" $cmakeArgs"
+ elif [[ "$platformArch" == arm64 ]]; then
+ cmakeArgs="-DCMAKE_OSX_ARCHITECTURES=\"arm64\" $cmakeArgs"
+ else
+ echo "Error: Unknown OSX architecture $platformArch."
+ exit 1
+ fi
+ fi
+
if [[ "$__UseNinja" == 1 ]]; then
generator="ninja"
buildTool="$(command -v ninja || command -v ninja-build)"
@@ -134,8 +147,8 @@ EOF
fi
engNativeDir="$__RepoRootDir/eng/native"
- __CMakeArgs="-DCLR_ENG_NATIVE_DIR=\"$engNativeDir\" $__CMakeArgs"
- nextCommand="\"$engNativeDir/gen-buildsys.sh\" \"$cmakeDir\" \"$tryrunDir\" \"$intermediatesDir\" $platformArch $__Compiler \"$__CompilerMajorVersion\" \"$__CompilerMinorVersion\" $__BuildType \"$generator\" $scan_build $__CMakeArgs"
+ cmakeArgs="-DCLR_ENG_NATIVE_DIR=\"$engNativeDir\" $cmakeArgs"
+ nextCommand="\"$engNativeDir/gen-buildsys.sh\" \"$cmakeDir\" \"$tryrunDir\" \"$intermediatesDir\" $platformArch $__Compiler \"$__CompilerMajorVersion\" \"$__CompilerMinorVersion\" $__BuildType \"$generator\" $scan_build $cmakeArgs"
echo "Invoking $nextCommand"
eval $nextCommand
@@ -448,7 +461,8 @@ fi
if [[ "$__CrossBuild" == 1 ]]; then
CROSSCOMPILE=1
export CROSSCOMPILE
- if [[ ! -n "$ROOTFS_DIR" ]]; then
+ # Darwin that doesn't use rootfs
+ if [[ ! -n "$ROOTFS_DIR" && "$platform" != "Darwin" ]]; then
ROOTFS_DIR="$__RepoRootDir/.tools/rootfs/$__BuildArch"
export ROOTFS_DIR
fi
diff --git a/eng/native/configurecompiler.cmake b/eng/native/configurecompiler.cmake
index 373cc3dd4e2c8..6dba57f8648f0 100644
--- a/eng/native/configurecompiler.cmake
+++ b/eng/native/configurecompiler.cmake
@@ -238,6 +238,7 @@ if (CLR_CMAKE_HOST_UNIX)
add_definitions(-DHOST_UNIX)
if(CLR_CMAKE_HOST_OSX)
+ add_definitions(-DHOST_OSX)
if(CLR_CMAKE_HOST_UNIX_AMD64)
message("Detected OSX x86_64")
elseif(CLR_CMAKE_HOST_UNIX_ARM64)
@@ -374,7 +375,16 @@ if (CLR_CMAKE_HOST_UNIX)
# Specify the minimum supported version of macOS
if(CLR_CMAKE_HOST_OSX)
- set(MACOS_VERSION_MIN_FLAGS -mmacosx-version-min=10.13)
+ if(CLR_CMAKE_HOST_ARCH_ARM64)
+ # 'pthread_jit_write_protect_np' is only available on macOS 11.0 or newer
+ set(MACOS_VERSION_MIN_FLAGS -mmacosx-version-min=11.0)
+ add_compile_options(-arch arm64)
+ elseif(CLR_CMAKE_HOST_ARCH_AMD64)
+ set(MACOS_VERSION_MIN_FLAGS -mmacosx-version-min=10.13)
+ add_compile_options(-arch x86_64)
+ else()
+ clr_unknown_arch()
+ endif()
add_compile_options(${MACOS_VERSION_MIN_FLAGS})
add_linker_flag(${MACOS_VERSION_MIN_FLAGS})
endif(CLR_CMAKE_HOST_OSX)
diff --git a/eng/native/configureplatform.cmake b/eng/native/configureplatform.cmake
index 2bdb152292654..0d4ee0dae6def 100644
--- a/eng/native/configureplatform.cmake
+++ b/eng/native/configureplatform.cmake
@@ -78,9 +78,9 @@ endif(CLR_CMAKE_HOST_OS STREQUAL Linux)
if(CLR_CMAKE_HOST_OS STREQUAL Darwin)
set(CLR_CMAKE_HOST_UNIX 1)
set(CLR_CMAKE_HOST_OSX 1)
- if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64)
+ if(CMAKE_OSX_ARCHITECTURES STREQUAL x86_64)
set(CLR_CMAKE_HOST_UNIX_AMD64 1)
- elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL arm64)
+ elseif(CMAKE_OSX_ARCHITECTURES STREQUAL arm64)
set(CLR_CMAKE_HOST_UNIX_ARM64 1)
else()
clr_unknown_arch()
diff --git a/eng/native/gen-buildsys.sh b/eng/native/gen-buildsys.sh
index 1b4c2e02c597f..9e166550616f6 100755
--- a/eng/native/gen-buildsys.sh
+++ b/eng/native/gen-buildsys.sh
@@ -63,7 +63,9 @@ done
cmake_extra_defines=
if [[ "$CROSSCOMPILE" == "1" ]]; then
- if ! [[ -n "$ROOTFS_DIR" ]]; then
+ platform="$(uname)"
+ # OSX doesn't use rootfs
+ if ! [[ -n "$ROOTFS_DIR" || "$platform" == "Darwin" ]]; then
echo "ROOTFS_DIR not set for crosscompile"
exit 1
fi
@@ -74,7 +76,12 @@ if [[ "$CROSSCOMPILE" == "1" ]]; then
if [[ -n "$tryrun_dir" ]]; then
cmake_extra_defines="$cmake_extra_defines -C $tryrun_dir/tryrun.cmake"
fi
- cmake_extra_defines="$cmake_extra_defines -DCMAKE_TOOLCHAIN_FILE=$scriptroot/../common/cross/toolchain.cmake"
+
+ if [[ "$platform" == "Darwin" ]]; then
+ cmake_extra_defines="$cmake_extra_defines -DCMAKE_SYSTEM_NAME=Darwin"
+ else
+ cmake_extra_defines="$cmake_extra_defines -DCMAKE_TOOLCHAIN_FILE=$scriptroot/../common/cross/toolchain.cmake"
+ fi
fi
if [[ "$build_arch" == "armel" ]]; then
diff --git a/eng/native/init-os-and-arch.sh b/eng/native/init-os-and-arch.sh
index 0a6018386575e..7bda16f77a275 100644
--- a/eng/native/init-os-and-arch.sh
+++ b/eng/native/init-os-and-arch.sh
@@ -27,6 +27,17 @@ if [ "$os" = "SunOS" ]; then
os="Solaris"
fi
CPUName=$(isainfo -n)
+elif [ "$os" = "OSX" ]; then
+ # On OSX universal binaries make uname -m unreliable. The uname -m response changes
+ # based on what hardware is being emulated.
+ # Use sysctl instead
+ if [ "$(sysctl -q -n hw.optional.arm64)" = "1" ]; then
+ CPUName=arm64
+ elif [ "$(sysctl -q -n hw.optional.x86_64)" = "1" ]; then
+ CPUName=x86_64
+ else
+ CPUName=$(uname -m)
+ fi
else
# For rest of the operating systems, use uname(1) to determine what the CPU is.
CPUName=$(uname -m)
diff --git a/src/coreclr/build-runtime.sh b/src/coreclr/build-runtime.sh
index f7dd55e288310..a2e7313f4f3db 100755
--- a/src/coreclr/build-runtime.sh
+++ b/src/coreclr/build-runtime.sh
@@ -99,7 +99,7 @@ build_cross_architecture_components()
export __CMakeBinDir CROSSCOMPILE
__CMakeArgs="-DCLR_CMAKE_TARGET_ARCH=$__BuildArch -DCLR_CROSS_COMPONENTS_BUILD=1 $__CMakeArgs"
- build_native "$__CrossArch" "$__ProjectRoot" "$__ProjectRoot" "$intermediatesForBuild" "cross-architecture components"
+ build_native "$__TargetOS" "$__CrossArch" "$__ProjectRoot" "$__ProjectRoot" "$intermediatesForBuild" "$__CMakeArgs" "cross-architecture components"
CROSSCOMPILE=1
export CROSSCOMPILE
@@ -198,10 +198,6 @@ __BuildRuntime=1
source "$__ProjectRoot"/_build-commons.sh
-if [[ "${__BuildArch}" != "${__HostArch}" ]]; then
- __CrossBuild=1
-fi
-
# Set dependent variables
# Set the remaining variables based upon the determined build configuration
@@ -261,7 +257,7 @@ fi
if [[ "$__SkipNative" == 1 ]]; then
echo "Skipping CoreCLR component build."
else
- build_native "$__BuildArch" "$__ProjectRoot" "$__ProjectRoot" "$__IntermediatesDir" "CoreCLR component"
+ build_native "$__TargetOS" "$__BuildArch" "$__ProjectRoot" "$__ProjectRoot" "$__IntermediatesDir" "$__CMakeArgs" "CoreCLR component"
# Build cross-architecture components
if [[ "$__SkipCrossArchNative" != 1 ]]; then
diff --git a/src/coreclr/crosscomponents.cmake b/src/coreclr/crosscomponents.cmake
index b2b010fdd6659..c66531887daa0 100644
--- a/src/coreclr/crosscomponents.cmake
+++ b/src/coreclr/crosscomponents.cmake
@@ -23,7 +23,7 @@ if (CLR_CMAKE_HOST_OS STREQUAL CLR_CMAKE_TARGET_OS)
endif(CLR_CMAKE_TARGET_UNIX)
endif()
-if(NOT CLR_CMAKE_HOST_LINUX AND NOT FEATURE_CROSSBITNESS)
+if(NOT CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_OSX AND NOT FEATURE_CROSSBITNESS)
list (APPEND CLR_CROSS_COMPONENTS_LIST
mscordaccore
mscordbi
diff --git a/src/coreclr/src/debug/createdump/mac.h b/src/coreclr/src/debug/createdump/mac.h
index 16b198e935514..87745903ab183 100644
--- a/src/coreclr/src/debug/createdump/mac.h
+++ b/src/coreclr/src/debug/createdump/mac.h
@@ -68,6 +68,7 @@ typedef struct elf64_note {
Elf64_Word n_type; /* Content type */
} Elf64_Nhdr;
+#if defined(TARGET_AMD64)
struct user_fpregs_struct
{
unsigned short int cwd;
@@ -113,6 +114,25 @@ struct user_regs_struct
unsigned long long int fs;
unsigned long long int gs;
};
+#elif defined(TARGET_ARM64)
+struct user_fpsimd_struct
+{
+ uint64_t vregs[2*32];
+ uint32_t fpcr;
+ uint32_t fpsr;
+};
+
+struct user_regs_struct
+{
+ uint64_t regs[31];
+ uint64_t sp;
+ uint64_t pc;
+ uint32_t pstate;
+};
+#else
+#error Unexpected architecture
+#endif
+
typedef pid_t __pid_t;
diff --git a/src/coreclr/src/debug/createdump/memoryregion.h b/src/coreclr/src/debug/createdump/memoryregion.h
index f1b8a795e0128..9176d3576eaf2 100644
--- a/src/coreclr/src/debug/createdump/memoryregion.h
+++ b/src/coreclr/src/debug/createdump/memoryregion.h
@@ -1,7 +1,7 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-#if defined(__arm__) || defined(__aarch64__)
+#if !defined(PAGE_SIZE) && (defined(__arm__) || defined(__aarch64__))
#define PAGE_SIZE sysconf(_SC_PAGESIZE)
#endif
diff --git a/src/coreclr/src/debug/createdump/threadinfomac.cpp b/src/coreclr/src/debug/createdump/threadinfomac.cpp
index a33395f41dad8..c9c8db8bb5058 100644
--- a/src/coreclr/src/debug/createdump/threadinfomac.cpp
+++ b/src/coreclr/src/debug/createdump/threadinfomac.cpp
@@ -25,6 +25,7 @@ ThreadInfo::Initialize()
m_ppid = 0;
m_tgid = 0;
+#if defined(TARGET_AMD64)
x86_thread_state64_t state;
mach_msg_type_number_t stateCount = x86_THREAD_STATE64_COUNT;
kern_return_t result = ::thread_get_state(Port(), x86_THREAD_STATE64, (thread_state_t)&state, &stateCount);
@@ -88,6 +89,38 @@ ThreadInfo::Initialize()
memcpy(m_fpRegisters.st_space, &fpstate.__fpu_stmm0, sizeof(m_fpRegisters.st_space));
memcpy(m_fpRegisters.xmm_space, &fpstate.__fpu_xmm0, sizeof(m_fpRegisters.xmm_space));
+#elif defined(TARGET_ARM64)
+ arm_thread_state64_t state;
+ mach_msg_type_number_t stateCount = ARM_THREAD_STATE64_COUNT;
+ kern_return_t result = ::thread_get_state(Port(), ARM_THREAD_STATE64, (thread_state_t)&state, &stateCount);
+ if (result != KERN_SUCCESS)
+ {
+ fprintf(stderr, "thread_get_state(%x) FAILED %x %s\n", m_tid, result, mach_error_string(result));
+ return false;
+ }
+
+ memcpy(m_gpRegisters.regs, &state.__x, sizeof(state.__x));
+ m_gpRegisters.regs[29] = arm_thread_state64_get_fp(state);
+ m_gpRegisters.regs[30] = (uint64_t)arm_thread_state64_get_lr_fptr(state);
+
+ m_gpRegisters.sp = arm_thread_state64_get_sp(state);
+ m_gpRegisters.pc = (uint64_t)arm_thread_state64_get_pc_fptr(state);
+
+ arm_neon_state64_t fpstate;
+ stateCount = ARM_NEON_STATE64_COUNT;
+ result = ::thread_get_state(Port(), ARM_NEON_STATE64, (thread_state_t)&fpstate, &stateCount);
+ if (result != KERN_SUCCESS)
+ {
+ fprintf(stderr, "thread_get_state(%x) FAILED %x %s\n", m_tid, result, mach_error_string(result));
+ return false;
+ }
+
+ memcpy(m_fpRegisters.vregs, &fpstate.__v, sizeof(m_fpRegisters.vregs));
+ m_fpRegisters.fpsr = fpstate.__fpsr;
+ m_fpRegisters.fpcr = fpstate.__fpcr;
+#else
+#error Unexpected architecture
+#endif
return true;
}
diff --git a/src/coreclr/src/debug/ee/arm64/dbghelpers.S b/src/coreclr/src/debug/ee/arm64/dbghelpers.S
index 8fc88fa257350..4b9504efec859 100644
--- a/src/coreclr/src/debug/ee/arm64/dbghelpers.S
+++ b/src/coreclr/src/debug/ee/arm64/dbghelpers.S
@@ -22,7 +22,7 @@ NESTED_ENTRY FuncEvalHijack, _TEXT, UnhandledExceptionHandlerUnix
PROLOG_SAVE_REG_PAIR_INDEXED fp, lr, -32
str x0, [sp, #16]
// FuncEvalHijackWorker returns the address we should jump to.
- bl FuncEvalHijackWorker
+ bl C_FUNC(FuncEvalHijackWorker)
EPILOG_STACK_FREE 32
EPILOG_BRANCH_REG x0
@@ -33,7 +33,7 @@ NESTED_END FuncEvalHijack
NESTED_ENTRY ExceptionHijack, _TEXT, UnhandledExceptionHandlerUnix
// make the call
- bl ExceptionHijackWorker
+ bl C_FUNC(ExceptionHijackWorker)
// effective NOP to terminate unwind
mov x3, x3
diff --git a/src/coreclr/src/dlls/mscordac/CMakeLists.txt b/src/coreclr/src/dlls/mscordac/CMakeLists.txt
index f1acbdce5f907..be18d5e4bf6cb 100644
--- a/src/coreclr/src/dlls/mscordac/CMakeLists.txt
+++ b/src/coreclr/src/dlls/mscordac/CMakeLists.txt
@@ -179,11 +179,11 @@ if(CLR_CMAKE_HOST_WIN32 AND CLR_CMAKE_TARGET_UNIX)
)
endif(CLR_CMAKE_HOST_WIN32 AND CLR_CMAKE_TARGET_UNIX)
-if(CLR_CMAKE_HOST_OSX)
+if(CLR_CMAKE_HOST_UNIX)
list(APPEND COREDAC_LIBRARIES
coreclrpal_dac
)
-endif(CLR_CMAKE_HOST_OSX)
+endif(CLR_CMAKE_HOST_UNIX)
target_link_libraries(mscordaccore PRIVATE ${COREDAC_LIBRARIES})
diff --git a/src/coreclr/src/gc/unix/config.gc.h.in b/src/coreclr/src/gc/unix/config.gc.h.in
index 42b6429be80e5..e633193218a2d 100644
--- a/src/coreclr/src/gc/unix/config.gc.h.in
+++ b/src/coreclr/src/gc/unix/config.gc.h.in
@@ -16,7 +16,7 @@
#cmakedefine01 HAVE_SWAPCTL
#cmakedefine01 HAVE_SYSCTLBYNAME
#cmakedefine01 HAVE_PTHREAD_CONDATTR_SETCLOCK
-#cmakedefine01 HAVE_MACH_ABSOLUTE_TIME
+#cmakedefine01 HAVE_CLOCK_GETTIME_NSEC_NP
#cmakedefine01 HAVE_SCHED_GETAFFINITY
#cmakedefine01 HAVE_SCHED_SETAFFINITY
#cmakedefine01 HAVE_PTHREAD_SETAFFINITY_NP
diff --git a/src/coreclr/src/gc/unix/configure.cmake b/src/coreclr/src/gc/unix/configure.cmake
index 6d190a8c46735..54c3d0f899d9c 100644
--- a/src/coreclr/src/gc/unix/configure.cmake
+++ b/src/coreclr/src/gc/unix/configure.cmake
@@ -72,17 +72,15 @@ check_cxx_source_runs("
check_library_exists(pthread pthread_condattr_setclock "" HAVE_PTHREAD_CONDATTR_SETCLOCK)
check_cxx_source_runs("
- #include
- #include
- int main()
- {
- int ret;
- mach_timebase_info_data_t timebaseInfo;
- ret = mach_timebase_info(&timebaseInfo);
- mach_absolute_time();
- exit(ret);
- }
- " HAVE_MACH_ABSOLUTE_TIME)
+#include
+#include
+
+int main()
+{
+ int ret;
+ ret = clock_gettime_nsec_np(CLOCK_UPTIME_RAW);
+ exit((ret == 0) ? 1 : 0);
+}" HAVE_CLOCK_GETTIME_NSEC_NP)
check_library_exists(c sched_getaffinity "" HAVE_SCHED_GETAFFINITY)
diff --git a/src/coreclr/src/gc/unix/events.cpp b/src/coreclr/src/gc/unix/events.cpp
index 88797741fa7ea..7cc55680aaf2f 100644
--- a/src/coreclr/src/gc/unix/events.cpp
+++ b/src/coreclr/src/gc/unix/events.cpp
@@ -16,10 +16,6 @@
#include "gcenv.os.h"
#include "globals.h"
-#if HAVE_MACH_ABSOLUTE_TIME
-mach_timebase_info_data_t g_TimebaseInfo;
-#endif // MACH_ABSOLUTE_TIME
-
namespace
{
@@ -37,7 +33,7 @@ void TimeSpecAdd(timespec* time, uint32_t milliseconds)
}
#endif // HAVE_PTHREAD_CONDATTR_SETCLOCK
-#if HAVE_MACH_ABSOLUTE_TIME
+#if HAVE_CLOCK_GETTIME_NSEC_NP
// Convert nanoseconds to the timespec structure
// Parameters:
// nanoseconds - time in nanoseconds to convert
@@ -47,7 +43,7 @@ void NanosecondsToTimeSpec(uint64_t nanoseconds, timespec* t)
t->tv_sec = nanoseconds / tccSecondsToNanoSeconds;
t->tv_nsec = nanoseconds % tccSecondsToNanoSeconds;
}
-#endif // HAVE_PTHREAD_CONDATTR_SETCLOCK
+#endif // HAVE_CLOCK_GETTIME_NSEC_NP
} // anonymous namespace
@@ -81,7 +77,7 @@ class GCEvent::Impl
// TODO(segilles) implement this for CoreCLR
//PthreadCondAttrHolder attrsHolder(&attrs);
-#if HAVE_PTHREAD_CONDATTR_SETCLOCK && !HAVE_MACH_ABSOLUTE_TIME
+#if HAVE_PTHREAD_CONDATTR_SETCLOCK && !HAVE_CLOCK_GETTIME_NSEC_NP
// Ensure that the pthread_cond_timedwait will use CLOCK_MONOTONIC
st = pthread_condattr_setclock(&attrs, CLOCK_MONOTONIC);
if (st != 0)
@@ -89,7 +85,7 @@ class GCEvent::Impl
assert(!"Failed to set UnixEvent condition variable wait clock");
return false;
}
-#endif // HAVE_PTHREAD_CONDATTR_SETCLOCK && !HAVE_MACH_ABSOLUTE_TIME
+#endif // HAVE_PTHREAD_CONDATTR_SETCLOCK && !HAVE_CLOCK_GETTIME_NSEC_NP
st = pthread_mutex_init(&m_mutex, NULL);
if (st != 0)
@@ -130,13 +126,12 @@ class GCEvent::Impl
UNREFERENCED_PARAMETER(alertable);
timespec endTime;
-#if HAVE_MACH_ABSOLUTE_TIME
+#if HAVE_CLOCK_GETTIME_NSEC_NP
uint64_t endMachTime;
if (milliseconds != INFINITE)
{
uint64_t nanoseconds = (uint64_t)milliseconds * tccMilliSecondsToNanoSeconds;
- NanosecondsToTimeSpec(nanoseconds, &endTime);
- endMachTime = mach_absolute_time() + nanoseconds * g_TimebaseInfo.denom / g_TimebaseInfo.numer;
+ endMachTime = clock_gettime_nsec_np(CLOCK_UPTIME_RAW) + nanoseconds;
}
#elif HAVE_PTHREAD_CONDATTR_SETCLOCK
if (milliseconds != INFINITE)
@@ -159,17 +154,17 @@ class GCEvent::Impl
}
else
{
-#if HAVE_MACH_ABSOLUTE_TIME
+#if HAVE_CLOCK_GETTIME_NSEC_NP
// Since OSX doesn't support CLOCK_MONOTONIC, we use relative variant of the
// timed wait and we need to handle spurious wakeups properly.
st = pthread_cond_timedwait_relative_np(&m_condition, &m_mutex, &endTime);
if ((st == 0) && !m_state)
{
- uint64_t machTime = mach_absolute_time();
+ uint64_t machTime = clock_gettime_nsec_np(CLOCK_UPTIME_RAW);
if (machTime < endMachTime)
{
// The wake up was spurious, recalculate the relative endTime
- uint64_t remainingNanoseconds = (endMachTime - machTime) * g_TimebaseInfo.numer / g_TimebaseInfo.denom;
+ uint64_t remainingNanoseconds = endMachTime - machTime;
NanosecondsToTimeSpec(remainingNanoseconds, &endTime);
}
else
@@ -180,9 +175,9 @@ class GCEvent::Impl
st = ETIMEDOUT;
}
}
-#else // HAVE_MACH_ABSOLUTE_TIME
+#else // HAVE_CLOCK_GETTIME_NSEC_NP
st = pthread_cond_timedwait(&m_condition, &m_mutex, &endTime);
-#endif // HAVE_MACH_ABSOLUTE_TIME
+#endif // HAVE_CLOCK_GETTIME_NSEC_NP
// Verify that if the wait timed out, the event was not set
assert((st != ETIMEDOUT) || !m_state);
}
diff --git a/src/coreclr/src/gc/unix/gcenv.unix.cpp b/src/coreclr/src/gc/unix/gcenv.unix.cpp
index fcba54551a8de..f1e411982f766 100644
--- a/src/coreclr/src/gc/unix/gcenv.unix.cpp
+++ b/src/coreclr/src/gc/unix/gcenv.unix.cpp
@@ -366,14 +366,6 @@ bool GCToOSInterface::Initialize()
}
}
-#if HAVE_MACH_ABSOLUTE_TIME
- kern_return_t machRet;
- if ((machRet = mach_timebase_info(&g_TimebaseInfo)) != KERN_SUCCESS)
- {
- return false;
- }
-#endif // HAVE_MACH_ABSOLUTE_TIME
-
InitializeCGroup();
#if HAVE_SCHED_GETAFFINITY
@@ -857,7 +849,7 @@ static size_t GetLogicalProcessorCacheSizeFromOS()
}
#endif
-#if defined(HOST_ARM64)
+#if defined(HOST_ARM64) && !defined(TARGET_OSX)
if (cacheSize == 0)
{
// It is currently expected to be missing cache size info
diff --git a/src/coreclr/src/gc/unix/globals.h b/src/coreclr/src/gc/unix/globals.h
index fe0d76a36a4cd..aca43064f28c4 100644
--- a/src/coreclr/src/gc/unix/globals.h
+++ b/src/coreclr/src/gc/unix/globals.h
@@ -4,10 +4,6 @@
#ifndef __GLOBALS_H__
#define __GLOBALS_H__
-#if HAVE_MACH_ABSOLUTE_TIME
-#include
-#endif // HAVE_MACH_ABSOLUTE_TIME
-
const int tccSecondsToMilliSeconds = 1000;
// The number of microseconds in a second.
@@ -22,8 +18,4 @@ const int tccMilliSecondsToMicroSeconds = 1000;
// The number of nanoseconds in a millisecond.
const int tccMilliSecondsToNanoSeconds = 1000000;
-#if HAVE_MACH_ABSOLUTE_TIME
-extern mach_timebase_info_data_t g_TimebaseInfo;
-#endif // HAVE_MACH_ABSOLUTE_TIME
-
#endif // __GLOBALS_H__
diff --git a/src/coreclr/src/inc/crosscomp.h b/src/coreclr/src/inc/crosscomp.h
index ce494af3700d1..e942db59e4a63 100644
--- a/src/coreclr/src/inc/crosscomp.h
+++ b/src/coreclr/src/inc/crosscomp.h
@@ -297,6 +297,17 @@ typedef struct _T_RUNTIME_FUNCTION {
} T_RUNTIME_FUNCTION, *PT_RUNTIME_FUNCTION;
+#ifdef HOST_UNIX
+
+typedef
+EXCEPTION_DISPOSITION
+(*PEXCEPTION_ROUTINE) (
+ PEXCEPTION_RECORD ExceptionRecord,
+ ULONG64 EstablisherFrame,
+ PCONTEXT ContextRecord,
+ PVOID DispatcherContext
+ );
+#endif
//
// Define exception dispatch context structure.
//
@@ -348,6 +359,27 @@ typedef struct _T_KNONVOLATILE_CONTEXT_POINTERS {
} T_KNONVOLATILE_CONTEXT_POINTERS, *PT_KNONVOLATILE_CONTEXT_POINTERS;
+#if defined(HOST_UNIX) && defined(TARGET_ARM64) && !defined(HOST_ARM64)
+enum
+{
+ UNW_AARCH64_X19 = 19,
+ UNW_AARCH64_X20 = 20,
+ UNW_AARCH64_X21 = 21,
+ UNW_AARCH64_X22 = 22,
+ UNW_AARCH64_X23 = 23,
+ UNW_AARCH64_X24 = 24,
+ UNW_AARCH64_X25 = 25,
+ UNW_AARCH64_X26 = 26,
+ UNW_AARCH64_X27 = 27,
+ UNW_AARCH64_X28 = 28,
+ UNW_AARCH64_X29 = 29,
+ UNW_AARCH64_X30 = 30,
+ UNW_AARCH64_SP = 31,
+ UNW_AARCH64_PC = 32
+};
+
+#endif // TARGET_ARM64 && !HOST_ARM64
+
#else
#define T_CONTEXT CONTEXT
diff --git a/src/coreclr/src/inc/loaderheap.h b/src/coreclr/src/inc/loaderheap.h
index 8008a3a829b8b..68d50f6d637d3 100644
--- a/src/coreclr/src/inc/loaderheap.h
+++ b/src/coreclr/src/inc/loaderheap.h
@@ -549,6 +549,9 @@ class LoaderHeap : public UnlockedLoaderHeap, public ILoaderHeapBackout
{
WRAPPER_NO_CONTRACT;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
void *pResult;
TaggedMemAllocPtr tmap;
@@ -568,6 +571,7 @@ class LoaderHeap : public UnlockedLoaderHeap, public ILoaderHeapBackout
tmap.m_szFile = szFile;
tmap.m_lineNum = lineNum;
#endif
+
return tmap;
}
@@ -625,6 +629,10 @@ class LoaderHeap : public UnlockedLoaderHeap, public ILoaderHeapBackout
{
WRAPPER_NO_CONTRACT;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
CRITSEC_Holder csh(m_CriticalSection);
diff --git a/src/coreclr/src/pal/inc/pal.h b/src/coreclr/src/pal/inc/pal.h
index 5a099bae890ca..727522cf85bfa 100644
--- a/src/coreclr/src/pal/inc/pal.h
+++ b/src/coreclr/src/pal/inc/pal.h
@@ -2638,6 +2638,33 @@ VirtualFree(
IN SIZE_T dwSize,
IN DWORD dwFreeType);
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+#ifdef __cplusplus
+extern "C++" {
+struct PAL_JITWriteEnableHolder
+{
+public:
+ PAL_JITWriteEnableHolder(bool jitWriteEnable)
+ {
+ m_jitWriteEnableRestore = JITWriteEnable(jitWriteEnable);
+ };
+ ~PAL_JITWriteEnableHolder()
+ {
+ JITWriteEnable(m_jitWriteEnableRestore);
+ }
+
+private:
+ bool JITWriteEnable(bool enable);
+ bool m_jitWriteEnableRestore;
+};
+
+inline
+PAL_JITWriteEnableHolder
+PAL_JITWriteEnable(IN bool enable) { return PAL_JITWriteEnableHolder(enable); }
+}
+#endif // __cplusplus
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
PALIMPORT
BOOL
PALAPI
diff --git a/src/coreclr/src/pal/inc/pal_error.h b/src/coreclr/src/pal/inc/pal_error.h
index be9350d333e93..b387e68540060 100644
--- a/src/coreclr/src/pal/inc/pal_error.h
+++ b/src/coreclr/src/pal/inc/pal_error.h
@@ -133,7 +133,6 @@
#define ERROR_PALINIT_INITIALIZE_MACH_EXCEPTION 65280L
#define ERROR_PALINIT_PROCABORT_INITIALIZE 65281L
#define ERROR_PALINIT_INITIALIZE_FLUSH_PROCESS_WRITE_BUFFERS 65282L
-#define ERROR_PALINIT_TIME 65283L
#define ERROR_PALINIT_MAP 65284L
#define ERROR_PALINIT_VIRTUAL 65285L
#define ERROR_PALINIT_SEH 65286L
diff --git a/src/coreclr/src/pal/inc/unixasmmacrosarm64.inc b/src/coreclr/src/pal/inc/unixasmmacrosarm64.inc
index 69cdcb3f48ba5..9545afb9f8e58 100644
--- a/src/coreclr/src/pal/inc/unixasmmacrosarm64.inc
+++ b/src/coreclr/src/pal/inc/unixasmmacrosarm64.inc
@@ -4,7 +4,11 @@
.macro NESTED_ENTRY Name, Section, Handler
LEAF_ENTRY \Name, \Section
.ifnc \Handler, NoHandler
+#if defined(__APPLE__)
+ .cfi_personality 0x9b, C_FUNC(\Handler) // 0x9b == DW_EH_PE_indirect | DW_EH_PE_pcrel | DW_EH_PE_sdata4
+#else
.cfi_personality 0x1b, C_FUNC(\Handler) // 0x1b == DW_EH_PE_pcrel | DW_EH_PE_sdata4
+#endif
.endif
.endm
@@ -19,13 +23,20 @@ C_FUNC(\Name):
.macro LEAF_ENTRY Name, Section
.global C_FUNC(\Name)
+#if defined(__APPLE__)
+ .text
+ .p2align 2
+#else
.type \Name, %function
+#endif
C_FUNC(\Name):
.cfi_startproc
.endm
.macro LEAF_END Name, Section
+#if !defined(__APPLE__)
.size \Name, .-\Name
+#endif
.cfi_endproc
.endm
@@ -36,8 +47,13 @@ C_FUNC(\Name\()_End):
.endm
.macro PREPARE_EXTERNAL_VAR Name, HelperReg
- adrp \HelperReg, \Name
- add \HelperReg, \HelperReg, :lo12:\Name
+#if defined(__APPLE__)
+ adrp \HelperReg, C_FUNC(\Name)@GOTPAGE
+ ldr \HelperReg, [\HelperReg, C_FUNC(\Name)@GOTPAGEOFF]
+#else
+ adrp \HelperReg, C_FUNC(\Name)
+ add \HelperReg, \HelperReg, :lo12:C_FUNC(\Name)
+#endif
.endm
.macro PROLOG_STACK_ALLOC Size
diff --git a/src/coreclr/src/pal/src/CMakeLists.txt b/src/coreclr/src/pal/src/CMakeLists.txt
index 1525e6938474b..4d4839675db0d 100644
--- a/src/coreclr/src/pal/src/CMakeLists.txt
+++ b/src/coreclr/src/pal/src/CMakeLists.txt
@@ -41,17 +41,29 @@ include_directories(include)
# Compile options
+if(CLR_CMAKE_HOST_ARCH_AMD64)
+ set(PAL_ARCH_SOURCES_DIR amd64)
+elseif(CLR_CMAKE_HOST_ARCH_ARM)
+ set(PAL_ARCH_SOURCES_DIR arm)
+elseif(CLR_CMAKE_HOST_ARCH_ARM64)
+ set(PAL_ARCH_SOURCES_DIR arm64)
+elseif(CLR_CMAKE_HOST_ARCH_I386)
+ set(PAL_ARCH_SOURCES_DIR i386)
+endif()
+
if(CLR_CMAKE_USE_SYSTEM_LIBUNWIND)
add_definitions(-DFEATURE_USE_SYSTEM_LIBUNWIND)
endif(CLR_CMAKE_USE_SYSTEM_LIBUNWIND)
if(CLR_CMAKE_TARGET_OSX)
add_definitions(-DTARGET_OSX)
- add_definitions(-DXSTATE_SUPPORTED)
+ if(CLR_CMAKE_TARGET_ARCH_AMD64)
+ add_definitions(-DXSTATE_SUPPORTED)
+ endif()
set(PLATFORM_SOURCES
- arch/amd64/activationhandlerwrapper.S
- arch/amd64/context.S
- arch/amd64/dispatchexceptionwrapper.S
+ arch/${PAL_ARCH_SOURCES_DIR}/activationhandlerwrapper.S
+ arch/${PAL_ARCH_SOURCES_DIR}/context.S
+ arch/${PAL_ARCH_SOURCES_DIR}/dispatchexceptionwrapper.S
exception/machexception.cpp
exception/machmessage.cpp
)
@@ -64,15 +76,6 @@ add_definitions(-DLP64COMPATIBLE)
add_definitions(-DCORECLR)
add_definitions(-DPIC)
add_definitions(-D_FILE_OFFSET_BITS=64)
-if(CLR_CMAKE_HOST_ARCH_AMD64)
- set(PAL_ARCH_SOURCES_DIR amd64)
-elseif(CLR_CMAKE_HOST_ARCH_ARM)
- set(PAL_ARCH_SOURCES_DIR arm)
-elseif(CLR_CMAKE_HOST_ARCH_ARM64)
- set(PAL_ARCH_SOURCES_DIR arm64)
-elseif(CLR_CMAKE_HOST_ARCH_I386)
- set(PAL_ARCH_SOURCES_DIR i386)
-endif()
if(CLR_CMAKE_HOST_ARCH_AMD64 AND CLR_CMAKE_TARGET_LINUX AND NOT CLR_CMAKE_HOST_ALPINE_LINUX)
# Currently the _xstate is not available on Alpine Linux
@@ -218,12 +221,6 @@ set(SOURCES
thread/threadsusp.cpp
)
-if(NOT CLR_CMAKE_TARGET_OSX)
- list(APPEND SOURCES
- exception/remote-unwind.cpp
- )
-endif(NOT CLR_CMAKE_TARGET_OSX)
-
if(NOT CLR_CMAKE_USE_SYSTEM_LIBUNWIND)
set(LIBUNWIND_OBJECTS $)
endif(NOT CLR_CMAKE_USE_SYSTEM_LIBUNWIND)
@@ -249,7 +246,15 @@ if(CLR_CMAKE_TARGET_OSX)
"-D_XOPEN_SOURCE"
"-DUNW_REMOTE_ONLY"
)
+else()
+ if(NOT FEATURE_CROSSBITNESS)
+ add_library(coreclrpal_dac STATIC
+ exception/remote-unwind.cpp
+ )
+ endif(NOT FEATURE_CROSSBITNESS)
+endif(CLR_CMAKE_TARGET_OSX)
+if(NOT FEATURE_CROSSBITNESS)
target_include_directories(coreclrpal_dac PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/libunwind/include
${CMAKE_CURRENT_SOURCE_DIR}/libunwind/include/tdep
@@ -257,7 +262,7 @@ if(CLR_CMAKE_TARGET_OSX)
${CMAKE_CURRENT_BINARY_DIR}/libunwind/include
${CMAKE_CURRENT_BINARY_DIR}/libunwind/include/tdep
)
-endif(CLR_CMAKE_TARGET_OSX)
+endif(NOT FEATURE_CROSSBITNESS)
# There is only one function exported in 'tracepointprovider.cpp' namely 'PAL_InitializeTracing',
# which is guarded with '#if defined(__linux__)'. On macOS, Xcode issues the following warning:
diff --git a/src/coreclr/src/pal/src/arch/arm64/activationhandlerwrapper.S b/src/coreclr/src/pal/src/arch/arm64/activationhandlerwrapper.S
new file mode 100644
index 0000000000000..240757fa506e2
--- /dev/null
+++ b/src/coreclr/src/pal/src/arch/arm64/activationhandlerwrapper.S
@@ -0,0 +1,22 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+// Offset of the return address from the ActivationHandler in the ActivationHandlerWrapper
+.globl C_FUNC(ActivationHandlerReturnOffset)
+C_FUNC(ActivationHandlerReturnOffset):
+ .int LOCAL_LABEL(ActivationHandlerReturn)-C_FUNC(ActivationHandlerWrapper)
+
+NESTED_ENTRY ActivationHandlerWrapper, _TEXT, NoHandler
+ PROLOG_SAVE_REG_PAIR_INDEXED fp, lr, -(16 + CONTEXT_Size)
+ // Should never actually run
+ EMIT_BREAKPOINT
+ bl EXTERNAL_C_FUNC(ActivationHandler)
+LOCAL_LABEL(ActivationHandlerReturn):
+ // Should never return
+ EMIT_BREAKPOINT
+ EPILOG_RESTORE_REG_PAIR_INDEXED fp, lr, (16 + CONTEXT_Size)
+ ret
+NESTED_END ActivationHandlerWrapper, _TEXT
diff --git a/src/coreclr/src/pal/src/arch/arm64/asmconstants.h b/src/coreclr/src/pal/src/arch/arm64/asmconstants.h
index ad7d09e6efbbf..a657b8e5eb1c5 100644
--- a/src/coreclr/src/pal/src/arch/arm64/asmconstants.h
+++ b/src/coreclr/src/pal/src/arch/arm64/asmconstants.h
@@ -90,5 +90,6 @@
#define CONTEXT_FLOAT_CONTROL_OFFSET CONTEXT_V31+16
#define CONTEXT_Fpcr 0
#define CONTEXT_Fpsr CONTEXT_Fpcr+8
+#define CONTEXT_Size ((CONTEXT_NEON_OFFSET + CONTEXT_Fpsr + 8 + 0xf) & ~0xf)
#endif
diff --git a/src/coreclr/src/pal/src/arch/arm64/context.S b/src/coreclr/src/pal/src/arch/arm64/context.S
new file mode 100644
index 0000000000000..1323c72fa2ca1
--- /dev/null
+++ b/src/coreclr/src/pal/src/arch/arm64/context.S
@@ -0,0 +1,15 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "unixasmmacros.inc"
+
+#if defined(_DEBUG)
+NESTED_ENTRY DBG_CheckStackAlignment, _TEXT, NoHandler
+ PROLOG_SAVE_REG_PAIR_INDEXED fp, lr, -16
+ // Reading from an unaligned stack pointer will trigger a stack alignment fault
+ ldr x0, [sp]
+ EPILOG_RESTORE_REG_PAIR_INDEXED fp, lr, 16
+ ret
+NESTED_END _DBG_CheckStackAlignment, _TEXT
+#endif
+
diff --git a/src/coreclr/src/pal/src/arch/arm64/dispatchexceptionwrapper.S b/src/coreclr/src/pal/src/arch/arm64/dispatchexceptionwrapper.S
new file mode 100644
index 0000000000000..c7989377758f7
--- /dev/null
+++ b/src/coreclr/src/pal/src/arch/arm64/dispatchexceptionwrapper.S
@@ -0,0 +1,49 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+// ==++==
+//
+
+// ==--==
+//
+// Implementation of the PAL_DispatchExceptionWrapper that is
+// interposed between a function that caused a hardware fault
+// and PAL_DispatchException that throws an SEH exception for
+// the fault, to make the stack unwindable.
+//
+
+#include "unixasmmacros.inc"
+
+// Offset of the return address from the PAL_DispatchException in the PAL_DispatchExceptionWrapper
+.globl C_FUNC(PAL_DispatchExceptionReturnOffset)
+C_FUNC(PAL_DispatchExceptionReturnOffset):
+ .int LOCAL_LABEL(PAL_DispatchExceptionReturn) - C_FUNC(PAL_DispatchExceptionWrapper)
+
+//
+// PAL_DispatchExceptionWrapper will never be called; it only serves
+// to be referenced from a stack frame on the faulting thread. Its
+// unwinding behavior is equivalent to any standard function.
+// It is analogous to the following source file.
+//
+// extern "C" void PAL_DispatchException(CONTEXT *pContext, EXCEPTION_RECORD *pExceptionRecord, MachExceptionInfo *pMachExceptionInfo);
+//
+// extern "C" void PAL_DispatchExceptionWrapper()
+// {
+// CONTEXT Context;
+// EXCEPTION_RECORD ExceptionRecord;
+// MachExceptionInfo MachExceptionInfo;
+// PAL_DispatchException(&Context, &ExceptionRecord, &MachExceptionInfo);
+// }
+//
+
+NESTED_ENTRY PAL_DispatchExceptionWrapper, _TEXT, NoHandler
+ PROLOG_SAVE_REG_PAIR_INDEXED fp, lr, -16
+ // Should never actually run
+ EMIT_BREAKPOINT
+ bl EXTERNAL_C_FUNC(PAL_DispatchException)
+LOCAL_LABEL(PAL_DispatchExceptionReturn):
+ // Should never return
+ EMIT_BREAKPOINT
+ EPILOG_RESTORE_REG_PAIR_INDEXED fp, lr, 16
+ ret
+NESTED_END PAL_DispatchExceptionWrapper, _TEXT
diff --git a/src/coreclr/src/pal/src/config.h.in b/src/coreclr/src/pal/src/config.h.in
index 0319c6da77989..728c49d350ba1 100644
--- a/src/coreclr/src/pal/src/config.h.in
+++ b/src/coreclr/src/pal/src/config.h.in
@@ -114,7 +114,7 @@
#cmakedefine01 HAVE_WORKING_CLOCK_GETTIME
#cmakedefine01 HAVE_CLOCK_MONOTONIC
#cmakedefine01 HAVE_CLOCK_MONOTONIC_COARSE
-#cmakedefine01 HAVE_MACH_ABSOLUTE_TIME
+#cmakedefine01 HAVE_CLOCK_GETTIME_NSEC_NP
#cmakedefine01 HAVE_CLOCK_THREAD_CPUTIME
#cmakedefine01 HAVE_PTHREAD_CONDATTR_SETCLOCK
#cmakedefine01 HAVE_MMAP_DEV_ZERO
diff --git a/src/coreclr/src/pal/src/configure.cmake b/src/coreclr/src/pal/src/configure.cmake
index 0a2a4411aa96f..dfa14fa0928a9 100644
--- a/src/coreclr/src/pal/src/configure.cmake
+++ b/src/coreclr/src/pal/src/configure.cmake
@@ -44,7 +44,6 @@ check_include_files(runetype.h HAVE_RUNETYPE_H)
check_include_files(semaphore.h HAVE_SEMAPHORE_H)
check_include_files(sys/prctl.h HAVE_PRCTL_H)
check_include_files(numa.h HAVE_NUMA_H)
-check_include_files(pthread_np.h HAVE_PTHREAD_NP_H)
check_include_files("sys/auxv.h;asm/hwcap.h" HAVE_AUXV_HWCAP_H)
check_include_files("sys/ptrace.h" HAVE_SYS_PTRACE_H)
check_symbol_exists(getauxval sys/auxv.h HAVE_GETAUXVAL)
@@ -446,16 +445,14 @@ set(CMAKE_REQUIRED_LIBRARIES)
check_cxx_source_runs("
#include
-#include
+#include
int main()
{
int ret;
- mach_timebase_info_data_t timebaseInfo;
- ret = mach_timebase_info(&timebaseInfo);
- mach_absolute_time();
- exit(ret);
-}" HAVE_MACH_ABSOLUTE_TIME)
+ ret = clock_gettime_nsec_np(CLOCK_UPTIME_RAW);
+ exit((ret == 0) ? 1 : 0);
+}" HAVE_CLOCK_GETTIME_NSEC_NP)
set(CMAKE_REQUIRED_LIBRARIES ${CMAKE_RT_LIBS})
check_cxx_source_runs("
diff --git a/src/coreclr/src/pal/src/cruntime/printfcpp.cpp b/src/coreclr/src/pal/src/cruntime/printfcpp.cpp
index a71814e0dd4e8..0f7cb682650f8 100644
--- a/src/coreclr/src/pal/src/cruntime/printfcpp.cpp
+++ b/src/coreclr/src/pal/src/cruntime/printfcpp.cpp
@@ -59,6 +59,7 @@ static int Internal_Convertfwrite(CPalThread *pthrCurrent, const void *buffer, s
clearerr (stream);
#endif
+
if(convert)
{
int nsize;
@@ -66,6 +67,8 @@ static int Internal_Convertfwrite(CPalThread *pthrCurrent, const void *buffer, s
nsize = WideCharToMultiByte(CP_ACP, 0,(LPCWSTR)buffer, count, 0, 0, 0, 0);
if (!nsize)
{
+ if (count == 0)
+ return 0;
ASSERT("WideCharToMultiByte failed. Error is %d\n", GetLastError());
return -1;
}
diff --git a/src/coreclr/src/pal/src/exception/machexception.cpp b/src/coreclr/src/pal/src/exception/machexception.cpp
index de50fd6be99da..442e77d4a15e2 100644
--- a/src/coreclr/src/pal/src/exception/machexception.cpp
+++ b/src/coreclr/src/pal/src/exception/machexception.cpp
@@ -356,11 +356,16 @@ PAL_ERROR CorUnix::CPalThread::DisableMachExceptions()
return palError;
}
+#if defined(HOST_AMD64)
// Since HijackFaultingThread pushed the context, exception record and info on the stack, we need to adjust the
// signature of PAL_DispatchException such that the corresponding arguments are considered to be on the stack
// per GCC64 calling convention rules. Hence, the first 6 dummy arguments (corresponding to RDI, RSI, RDX,RCX, R8, R9).
extern "C"
void PAL_DispatchException(DWORD64 dwRDI, DWORD64 dwRSI, DWORD64 dwRDX, DWORD64 dwRCX, DWORD64 dwR8, DWORD64 dwR9, PCONTEXT pContext, PEXCEPTION_RECORD pExRecord, MachExceptionInfo *pMachExceptionInfo)
+#elif defined(HOST_ARM64)
+extern "C"
+void PAL_DispatchException(PCONTEXT pContext, PEXCEPTION_RECORD pExRecord, MachExceptionInfo *pMachExceptionInfo)
+#endif
{
CPalThread *pThread = InternalGetCurrentThread();
@@ -441,12 +446,36 @@ BuildExceptionRecord(
}
else
{
+#if defined(HOST_AMD64)
exceptionCode = EXCEPTION_ACCESS_VIOLATION;
+#elif defined(HOST_ARM64)
+ switch (exceptionInfo.Subcodes[0])
+ {
+ case EXC_ARM_DA_ALIGN:
+ exceptionCode = EXCEPTION_DATATYPE_MISALIGNMENT;
+ break;
+ case EXC_ARM_DA_DEBUG:
+ exceptionCode = EXCEPTION_BREAKPOINT;
+ break;
+ case EXC_ARM_SP_ALIGN:
+ exceptionCode = EXCEPTION_DATATYPE_MISALIGNMENT;
+ break;
+ case EXC_ARM_SWP:
+ exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
+ break;
+ case EXC_ARM_PAC_FAIL:
+ // PAC Authentication failure fall through
+ default:
+ exceptionCode = EXCEPTION_ACCESS_VIOLATION;
+ }
+#else
+#error Unexpected architecture
+#endif
pExceptionRecord->NumberParameters = 2;
pExceptionRecord->ExceptionInformation[0] = 0;
pExceptionRecord->ExceptionInformation[1] = exceptionInfo.Subcodes[1];
- NONPAL_TRACE("subcodes[1] = %llx\n", exceptionInfo.Subcodes[1]);
+ NONPAL_TRACE("subcodes[1] = %llx\n", (uint64_t) exceptionInfo.Subcodes[1]);
}
break;
@@ -468,6 +497,7 @@ BuildExceptionRecord(
{
switch (exceptionInfo.Subcodes[0])
{
+#if defined(HOST_AMD64)
case EXC_I386_DIV:
exceptionCode = EXCEPTION_INT_DIVIDE_BY_ZERO;
break;
@@ -480,6 +510,28 @@ BuildExceptionRecord(
case EXC_I386_BOUND:
exceptionCode = EXCEPTION_ARRAY_BOUNDS_EXCEEDED;
break;
+#elif defined(HOST_ARM64)
+ case EXC_ARM_FP_IO:
+ exceptionCode = EXCEPTION_FLT_INVALID_OPERATION;
+ break;
+ case EXC_ARM_FP_DZ:
+ exceptionCode = EXCEPTION_FLT_DIVIDE_BY_ZERO;
+ break;
+ case EXC_ARM_FP_OF:
+ exceptionCode = EXCEPTION_FLT_OVERFLOW;
+ break;
+ case EXC_ARM_FP_UF:
+ exceptionCode = EXCEPTION_FLT_UNDERFLOW;
+ break;
+ case EXC_ARM_FP_IX:
+ exceptionCode = EXCEPTION_FLT_INEXACT_RESULT;
+ break;
+ case EXC_ARM_FP_ID:
+ exceptionCode = EXCEPTION_FLT_DENORMAL_OPERAND;
+ break;
+#else
+#error Unexpected architecture
+#endif
default:
exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
break;
@@ -493,6 +545,7 @@ BuildExceptionRecord(
// Trace, breakpoint, etc. Details in subcode field.
case EXC_BREAKPOINT:
+#if defined(HOST_AMD64)
if (exceptionInfo.Subcodes[0] == EXC_I386_SGL)
{
exceptionCode = EXCEPTION_SINGLE_STEP;
@@ -501,6 +554,14 @@ BuildExceptionRecord(
{
exceptionCode = EXCEPTION_BREAKPOINT;
}
+#elif defined(HOST_ARM64)
+ if (exceptionInfo.Subcodes[0] == EXC_ARM_BREAKPOINT)
+ {
+ exceptionCode = EXCEPTION_BREAKPOINT;
+ }
+#else
+#error Unexpected architecture
+#endif
else
{
WARN("unexpected subcode %d for EXC_BREAKPOINT", exceptionInfo.Subcodes[0]);
@@ -594,12 +655,26 @@ HijackFaultingThread(
// Fill in the exception record from the exception info
BuildExceptionRecord(exceptionInfo, &exceptionRecord);
+#if defined(HOST_AMD64)
threadContext.ContextFlags = CONTEXT_FLOATING_POINT;
CONTEXT_GetThreadContextFromThreadState(x86_FLOAT_STATE, (thread_state_t)&exceptionInfo.FloatState, &threadContext);
threadContext.ContextFlags |= CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS;
CONTEXT_GetThreadContextFromThreadState(x86_THREAD_STATE, (thread_state_t)&exceptionInfo.ThreadState, &threadContext);
+ void **targetSP = (void **)threadContext.Rsp;
+#elif defined(HOST_ARM64)
+ threadContext.ContextFlags = CONTEXT_FLOATING_POINT;
+ CONTEXT_GetThreadContextFromThreadState(ARM_NEON_STATE64, (thread_state_t)&exceptionInfo.FloatState, &threadContext);
+
+ threadContext.ContextFlags |= CONTEXT_CONTROL | CONTEXT_INTEGER;
+ CONTEXT_GetThreadContextFromThreadState(ARM_THREAD_STATE64, (thread_state_t)&exceptionInfo.ThreadState, &threadContext);
+
+ void **targetSP = (void **)threadContext.Sp;
+#else
+#error Unexpected architecture
+#endif
+
// For CoreCLR we look more deeply at access violations to determine whether they're the result of a stack
// overflow. If so we'll terminate the process immediately (the current default policy of the CoreCLR EE).
// Otherwise we'll either A/V ourselves trying to set up the SEH exception record and context on the
@@ -653,7 +728,7 @@ HijackFaultingThread(
// Calculate the page base addresses for the fault and the faulting thread's SP.
int cbPage = getpagesize();
char *pFaultPage = (char*)(exceptionRecord.ExceptionInformation[1] & ~(cbPage - 1));
- char *pStackTopPage = (char*)(threadContext.Rsp & ~(cbPage - 1));
+ char *pStackTopPage = (char*)((size_t)targetSP & ~(cbPage - 1));
if (pFaultPage == pStackTopPage || pFaultPage == (pStackTopPage - cbPage))
{
@@ -697,7 +772,6 @@ HijackFaultingThread(
{
// Check if we can read pointer sizeD bytes below the target thread's stack pointer.
// If we are unable to, then it implies we have run into SO.
- void **targetSP = (void **)threadContext.Rsp;
vm_address_t targetAddr = (mach_vm_address_t)(targetSP);
targetAddr -= sizeof(void *);
vm_size_t vm_size = sizeof(void *);
@@ -711,6 +785,15 @@ HijackFaultingThread(
}
}
+ if (fIsStackOverflow)
+ {
+ exceptionRecord.ExceptionCode = EXCEPTION_STACK_OVERFLOW;
+ }
+
+ exceptionRecord.ExceptionFlags = EXCEPTION_IS_SIGNAL;
+ exceptionRecord.ExceptionRecord = NULL;
+
+#if defined(HOST_AMD64)
NONPAL_ASSERTE(exceptionInfo.ThreadState.tsh.flavor == x86_THREAD_STATE64);
// Make a copy of the thread state because the one in exceptionInfo needs to be preserved to restore
@@ -723,16 +806,20 @@ HijackFaultingThread(
ts64.__rflags &= ~EFL_TF;
}
- if (fIsStackOverflow)
- {
- exceptionRecord.ExceptionCode = EXCEPTION_STACK_OVERFLOW;
- }
-
- exceptionRecord.ExceptionFlags = EXCEPTION_IS_SIGNAL;
- exceptionRecord.ExceptionRecord = NULL;
exceptionRecord.ExceptionAddress = (void *)ts64.__rip;
- void **FramePointer;
+ void **FramePointer = (void **)ts64.__rsp;
+#elif defined(HOST_ARM64)
+ // Make a copy of the thread state because the one in exceptionInfo needs to be preserved to restore
+ // the state if the exception is forwarded.
+ arm_thread_state64_t ts64 = exceptionInfo.ThreadState;
+
+ exceptionRecord.ExceptionAddress = (void *)arm_thread_state64_get_pc_fptr(ts64);
+
+ void **FramePointer = (void **)arm_thread_state64_get_sp(ts64);
+#else
+#error Unexpected architecture
+#endif
if (fIsStackOverflow)
{
@@ -751,12 +838,6 @@ HijackFaultingThread(
FramePointer = (void**)((size_t)stackOverflowHandlerStack + stackOverflowStackSize);
}
- else
- {
- FramePointer = (void **)ts64.__rsp;
- }
-
- *--FramePointer = (void *)ts64.__rip;
// Construct a stack frame for a pretend activation of the function
// PAL_DispatchExceptionWrapper that serves only to make the stack
@@ -764,8 +845,19 @@ HijackFaultingThread(
// PAL_DispatchExceptionWrapper has an ebp frame, its local variables
// are the context and exception record, and it has just "called"
// PAL_DispatchException.
+#if defined(HOST_AMD64)
+ *--FramePointer = (void *)ts64.__rip;
*--FramePointer = (void *)ts64.__rbp;
+
ts64.__rbp = (SIZE_T)FramePointer;
+#elif defined(HOST_ARM64)
+ *--FramePointer = (void *)arm_thread_state64_get_pc_fptr(ts64);
+ *--FramePointer = (void *)arm_thread_state64_get_fp(ts64);
+
+ arm_thread_state64_set_fp(ts64, FramePointer);
+#else
+#error Unexpected architecture
+#endif
// Put the context on the stack
FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(CONTEXT));
@@ -783,6 +875,7 @@ HijackFaultingThread(
MachExceptionInfo *pMachExceptionInfo = (MachExceptionInfo *)FramePointer;
*pMachExceptionInfo = exceptionInfo;
+#if defined(HOST_AMD64)
// Push arguments to PAL_DispatchException
FramePointer = (void **)((ULONG_PTR)FramePointer - 3 * sizeof(void *));
@@ -802,6 +895,26 @@ HijackFaultingThread(
// Now set the thread state for the faulting thread so that PAL_DispatchException executes next
machret = thread_set_state(thread, x86_THREAD_STATE64, (thread_state_t)&ts64, x86_THREAD_STATE64_COUNT);
CHECK_MACH("thread_set_state(thread)", machret);
+#elif defined(HOST_ARM64)
+ // Setup arguments to PAL_DispatchException
+ ts64.__x[0] = (uint64_t)pContext;
+ ts64.__x[1] = (uint64_t)pExceptionRecord;
+ ts64.__x[2] = (uint64_t)pMachExceptionInfo;
+
+ // Make sure it's aligned - SP has 16-byte alignment
+ FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 16));
+ arm_thread_state64_set_sp(ts64, FramePointer);
+
+ // Make the call to DispatchException
+ arm_thread_state64_set_lr_fptr(ts64, (uint64_t)PAL_DispatchExceptionWrapper + PAL_DispatchExceptionReturnOffset);
+ arm_thread_state64_set_pc_fptr(ts64, PAL_DispatchException);
+
+ // Now set the thread state for the faulting thread so that PAL_DispatchException executes next
+ machret = thread_set_state(thread, ARM_THREAD_STATE64, (thread_state_t)&ts64, ARM_THREAD_STATE64_COUNT);
+ CHECK_MACH("thread_set_state(thread)", machret);
+#else
+#error Unexpected architecture
+#endif
}
/*++
@@ -932,8 +1045,9 @@ SEHExceptionThread(void *args)
int subcode_count = sMessage.GetExceptionCodeCount();
for (int i = 0; i < subcode_count; i++)
- NONPAL_TRACE("ExceptionNotification subcode[%d] = %llx\n", i, sMessage.GetExceptionCode(i));
+ NONPAL_TRACE("ExceptionNotification subcode[%d] = %llx\n", i, (uint64_t) sMessage.GetExceptionCode(i));
+#if defined(HOST_AMD64)
x86_thread_state64_t threadStateActual;
unsigned int count = sizeof(threadStateActual) / sizeof(unsigned);
machret = thread_get_state(thread, x86_THREAD_STATE64, (thread_state_t)&threadStateActual, &count);
@@ -957,6 +1071,31 @@ SEHExceptionThread(void *args)
threadExceptionState.__cpu,
threadExceptionState.__err,
threadExceptionState.__faultvaddr);
+#elif defined(HOST_ARM64)
+ arm_thread_state64_t threadStateActual;
+ unsigned int count = sizeof(threadStateActual) / sizeof(unsigned);
+ machret = thread_get_state(thread, ARM_THREAD_STATE64, (thread_state_t)&threadStateActual, &count);
+ CHECK_MACH("thread_get_state", machret);
+
+ NONPAL_TRACE("ExceptionNotification actual lr %p sp %016llx fp %016llx pc %p cpsr %08x\n",
+ arm_thread_state64_get_lr_fptr(threadStateActual),
+ arm_thread_state64_get_sp(threadStateActual),
+ arm_thread_state64_get_fp(threadStateActual),
+ arm_thread_state64_get_pc_fptr(threadStateActual),
+ threadStateActual.__cpsr);
+
+ arm_exception_state64_t threadExceptionState;
+ unsigned int ehStateCount = sizeof(threadExceptionState) / sizeof(unsigned);
+ machret = thread_get_state(thread, ARM_EXCEPTION_STATE64, (thread_state_t)&threadExceptionState, &ehStateCount);
+ CHECK_MACH("thread_get_state", machret);
+
+ NONPAL_TRACE("ExceptionNotification far %016llx esr %08x exception %08x\n",
+ threadExceptionState.__far,
+ threadExceptionState.__esr,
+ threadExceptionState.__exception);
+#else
+#error Unexpected architecture
+#endif
}
#endif // _DEBUG
@@ -1081,6 +1220,7 @@ MachExceptionInfo::MachExceptionInfo(mach_port_t thread, MachMessage& message)
for (int i = 0; i < SubcodeCount; i++)
Subcodes[i] = message.GetExceptionCode(i);
+#if defined(HOST_AMD64)
mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
machret = thread_get_state(thread, x86_THREAD_STATE, (thread_state_t)&ThreadState, &count);
CHECK_MACH("thread_get_state", machret);
@@ -1092,6 +1232,21 @@ MachExceptionInfo::MachExceptionInfo(mach_port_t thread, MachMessage& message)
count = x86_DEBUG_STATE_COUNT;
machret = thread_get_state(thread, x86_DEBUG_STATE, (thread_state_t)&DebugState, &count);
CHECK_MACH("thread_get_state(debug)", machret);
+#elif defined(HOST_ARM64)
+ mach_msg_type_number_t count = ARM_THREAD_STATE64_COUNT;
+ machret = thread_get_state(thread, ARM_THREAD_STATE64, (thread_state_t)&ThreadState, &count);
+ CHECK_MACH("thread_get_state", machret);
+
+ count = ARM_NEON_STATE64_COUNT;
+ machret = thread_get_state(thread, ARM_NEON_STATE64, (thread_state_t)&FloatState, &count);
+ CHECK_MACH("thread_get_state(float)", machret);
+
+ count = ARM_DEBUG_STATE64_COUNT;
+ machret = thread_get_state(thread, ARM_DEBUG_STATE64, (thread_state_t)&DebugState, &count);
+ CHECK_MACH("thread_get_state(debug)", machret);
+#else
+#error Unexpected architecture
+#endif
}
/*++
@@ -1108,6 +1263,7 @@ Return value :
--*/
void MachExceptionInfo::RestoreState(mach_port_t thread)
{
+#if defined(HOST_AMD64)
// If we are restarting a breakpoint, we need to bump the IP back one to
// point at the actual int 3 instructions.
if (ExceptionType == EXC_BREAKPOINT)
@@ -1125,6 +1281,18 @@ void MachExceptionInfo::RestoreState(mach_port_t thread)
machret = thread_set_state(thread, x86_DEBUG_STATE, (thread_state_t)&DebugState, x86_DEBUG_STATE_COUNT);
CHECK_MACH("thread_set_state(debug)", machret);
+#elif defined(HOST_ARM64)
+ kern_return_t machret = thread_set_state(thread, ARM_THREAD_STATE64, (thread_state_t)&ThreadState, ARM_THREAD_STATE64_COUNT);
+ CHECK_MACH("thread_set_state(thread)", machret);
+
+ machret = thread_set_state(thread, ARM_NEON_STATE64, (thread_state_t)&FloatState, ARM_NEON_STATE64_COUNT);
+ CHECK_MACH("thread_set_state(float)", machret);
+
+ machret = thread_set_state(thread, ARM_DEBUG_STATE64, (thread_state_t)&DebugState, ARM_DEBUG_STATE64_COUNT);
+ CHECK_MACH("thread_set_state(debug)", machret);
+#else
+#error Unexpected architecture
+#endif
}
/*++
@@ -1257,7 +1425,13 @@ ActivationHandler(CONTEXT* context)
g_activationFunction(context);
}
+#ifdef TARGET_ARM64
+ // RtlRestoreContext assembly corrupts X16 & X17, so it cannot be
+ // used for Activation restore
+ MachSetThreadContext(context);
+#else
RtlRestoreContext(context, NULL);
+#endif
DebugBreak();
}
@@ -1265,6 +1439,30 @@ extern "C" void ActivationHandlerWrapper();
extern "C" int ActivationHandlerReturnOffset;
extern "C" unsigned int XmmYmmStateSupport();
+#if defined(HOST_AMD64)
+bool IsHardwareException(x86_exception_state64_t exceptionState)
+{
+ static const int MaxHardwareExceptionVector = 31;
+ return exceptionState.__trapno <= MaxHardwareExceptionVector;
+}
+#elif defined(HOST_ARM64)
+bool IsHardwareException(arm_exception_state64_t exceptionState)
+{
+ // Infer exception state from the ESR_EL* register value.
+ // Bits 31-26 represent the ESR.EC field
+ const int ESR_EC_SHIFT = 26;
+ const int ESR_EC_MASK = 0x3f;
+ const int esr_ec = (exceptionState.__esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
+
+ const int ESR_EC_SVC = 0x15; // Supervisor Call exception from aarch64.
+
+ // Assume only supervisor calls from aarch64 are not hardware exceptions
+ return (esr_ec != ESR_EC_SVC);
+}
+#else
+#error Unexpected architecture
+#endif
+
/*++
Function :
InjectActivationInternal
@@ -1289,28 +1487,44 @@ InjectActivationInternal(CPalThread* pThread)
if (palError == NO_ERROR)
{
- mach_msg_type_number_t count;
-
+#if defined(HOST_AMD64)
x86_exception_state64_t ExceptionState;
- count = x86_EXCEPTION_STATE64_COUNT;
+ const thread_state_flavor_t exceptionFlavor = x86_EXCEPTION_STATE64;
+ const mach_msg_type_number_t exceptionCount = x86_EXCEPTION_STATE64_COUNT;
+
+ x86_thread_state64_t ThreadState;
+ const thread_state_flavor_t threadFlavor = x86_THREAD_STATE64;
+ const mach_msg_type_number_t threadCount = x86_THREAD_STATE64_COUNT;
+#elif defined(HOST_ARM64)
+ arm_exception_state64_t ExceptionState;
+ const thread_state_flavor_t exceptionFlavor = ARM_EXCEPTION_STATE64;
+ const mach_msg_type_number_t exceptionCount = ARM_EXCEPTION_STATE64_COUNT;
+
+ arm_thread_state64_t ThreadState;
+ const thread_state_flavor_t threadFlavor = ARM_THREAD_STATE64;
+ const mach_msg_type_number_t threadCount = ARM_THREAD_STATE64_COUNT;
+#else
+#error Unexpected architecture
+#endif
+ mach_msg_type_number_t count = exceptionCount;
+
MachRet = thread_get_state(threadPort,
- x86_EXCEPTION_STATE64,
+ exceptionFlavor,
(thread_state_t)&ExceptionState,
&count);
- _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_get_state for x86_EXCEPTION_STATE64\n");
+ _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_get_state for *_EXCEPTION_STATE64\n");
// Inject the activation only if the thread doesn't have a pending hardware exception
- static const int MaxHardwareExceptionVector = 31;
- if (ExceptionState.__trapno > MaxHardwareExceptionVector)
+ if (!IsHardwareException(ExceptionState))
{
- x86_thread_state64_t ThreadState;
- count = x86_THREAD_STATE64_COUNT;
+ count = threadCount;
MachRet = thread_get_state(threadPort,
- x86_THREAD_STATE64,
+ threadFlavor,
(thread_state_t)&ThreadState,
&count);
- _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_get_state for x86_THREAD_STATE64\n");
+ _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_get_state for *_THREAD_STATE64\n");
+#if defined(HOST_AMD64)
if ((g_safeActivationCheckFunction != NULL) && g_safeActivationCheckFunction(ThreadState.__rip, /* checkingCurrentThread */ FALSE))
{
// TODO: it would be nice to preserve the red zone in case a jitter would want to use it
@@ -1319,15 +1533,29 @@ InjectActivationInternal(CPalThread* pThread)
*(--sp) = ThreadState.__rip;
*(--sp) = ThreadState.__rbp;
size_t rbpAddress = (size_t)sp;
+#elif defined(HOST_ARM64)
+ if ((g_safeActivationCheckFunction != NULL) && g_safeActivationCheckFunction((size_t)arm_thread_state64_get_pc_fptr(ThreadState), /* checkingCurrentThread */ FALSE))
+ {
+ // TODO: it would be nice to preserve the red zone in case a jitter would want to use it
+ // Do we really care about unwinding through the wrapper?
+ size_t* sp = (size_t*)arm_thread_state64_get_sp(ThreadState);
+ *(--sp) = (size_t)arm_thread_state64_get_pc_fptr(ThreadState);
+ *(--sp) = arm_thread_state64_get_fp(ThreadState);
+ size_t fpAddress = (size_t)sp;
+#else
+#error Unexpected architecture
+#endif
size_t contextAddress = (((size_t)sp) - sizeof(CONTEXT)) & ~15;
- size_t returnAddressAddress = contextAddress - sizeof(size_t);
- *(size_t*)(returnAddressAddress) = ActivationHandlerReturnOffset + (size_t)ActivationHandlerWrapper;
// Fill in the context in the helper frame with the full context of the suspended thread.
// The ActivationHandler will use the context to resume the execution of the thread
// after the activation function returns.
CONTEXT *pContext = (CONTEXT *)contextAddress;
+#if defined(HOST_AMD64)
pContext->ContextFlags = CONTEXT_FULL | CONTEXT_SEGMENTS;
+#else
+ pContext->ContextFlags = CONTEXT_FULL;
+#endif
#ifdef XSTATE_SUPPORTED
if (XmmYmmStateSupport() == 1)
{
@@ -1337,16 +1565,30 @@ InjectActivationInternal(CPalThread* pThread)
MachRet = CONTEXT_GetThreadContextFromPort(threadPort, pContext);
_ASSERT_MSG(MachRet == KERN_SUCCESS, "CONTEXT_GetThreadContextFromPort\n");
+#if defined(HOST_AMD64)
+ size_t returnAddressAddress = contextAddress - sizeof(size_t);
+ *(size_t*)(returnAddressAddress) = ActivationHandlerReturnOffset + (size_t)ActivationHandlerWrapper;
+
// Make the instruction register point to ActivationHandler
ThreadState.__rip = (size_t)ActivationHandler;
ThreadState.__rsp = returnAddressAddress;
ThreadState.__rbp = rbpAddress;
ThreadState.__rdi = contextAddress;
+#elif defined(HOST_ARM64)
+ // Make the call to ActivationHandler
+ arm_thread_state64_set_lr_fptr(ThreadState, ActivationHandlerReturnOffset + (size_t)ActivationHandlerWrapper);
+ arm_thread_state64_set_pc_fptr(ThreadState, ActivationHandler);
+ arm_thread_state64_set_sp(ThreadState, contextAddress);
+ arm_thread_state64_set_fp(ThreadState, fpAddress);
+ ThreadState.__x[0] = contextAddress;
+#else
+#error Unexpected architecture
+#endif
MachRet = thread_set_state(threadPort,
- x86_THREAD_STATE64,
+ threadFlavor,
(thread_state_t)&ThreadState,
- count);
+ threadCount);
_ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_set_state\n");
}
}
diff --git a/src/coreclr/src/pal/src/exception/machmessage.cpp b/src/coreclr/src/pal/src/exception/machmessage.cpp
index 0c0021855395f..049b93500bcec 100644
--- a/src/coreclr/src/pal/src/exception/machmessage.cpp
+++ b/src/coreclr/src/pal/src/exception/machmessage.cpp
@@ -1013,6 +1013,10 @@ thread_act_t MachMessage::GetThreadFromState(thread_state_flavor_t eFlavor, thre
case x86_THREAD_STATE64:
targetSP = ((x86_thread_state64_t*)pState)->__rsp;
break;
+#elif defined(HOST_ARM64)
+ case ARM_THREAD_STATE64:
+ targetSP = arm_thread_state64_get_sp(*(arm_thread_state64_t*)pState);
+ break;
#else
#error Unexpected architecture.
#endif
@@ -1031,9 +1035,17 @@ thread_act_t MachMessage::GetThreadFromState(thread_state_flavor_t eFlavor, thre
for (mach_msg_type_number_t i = 0; i < cThreads; i++)
{
// Get the general register state of each thread.
+#if defined(HOST_AMD64)
x86_thread_state_t threadState;
mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
machret = thread_get_state(pThreads[i], x86_THREAD_STATE, (thread_state_t)&threadState, &count);
+#elif defined(HOST_ARM64)
+ arm_thread_state64_t threadState;
+ mach_msg_type_number_t count = ARM_THREAD_STATE64_COUNT;
+ machret = thread_get_state(pThreads[i], ARM_THREAD_STATE64, (thread_state_t)&threadState, &count);
+#else
+#error Unexpected architecture
+#endif
if (machret == KERN_SUCCESS)
{
// If a thread has the same SP as our target it should be the same thread (otherwise we have two
@@ -1044,6 +1056,8 @@ thread_act_t MachMessage::GetThreadFromState(thread_state_flavor_t eFlavor, thre
if (threadState.uts.ts32.esp == targetSP)
#elif defined(HOST_AMD64)
if (threadState.uts.ts64.__rsp == targetSP)
+#elif defined(HOST_ARM64)
+ if (arm_thread_state64_get_sp(threadState) == targetSP)
#else
#error Unexpected architecture.
#endif
diff --git a/src/coreclr/src/pal/src/exception/machmessage.h b/src/coreclr/src/pal/src/exception/machmessage.h
index bf544d66f98c4..ff288ad6f25b2 100644
--- a/src/coreclr/src/pal/src/exception/machmessage.h
+++ b/src/coreclr/src/pal/src/exception/machmessage.h
@@ -87,10 +87,17 @@ struct MachExceptionInfo
exception_type_t ExceptionType;
mach_msg_type_number_t SubcodeCount;
MACH_EH_TYPE(exception_data_type_t) Subcodes[2];
+#if defined(HOST_AMD64)
x86_thread_state_t ThreadState;
x86_float_state_t FloatState;
x86_debug_state_t DebugState;
-
+#elif defined(HOST_ARM64)
+ arm_thread_state64_t ThreadState;
+ arm_neon_state64_t FloatState;
+ arm_debug_state64_t DebugState;
+#else
+#error Unexpected architecture
+#endif
MachExceptionInfo(mach_port_t thread, MachMessage& message);
void RestoreState(mach_port_t thread);
};
diff --git a/src/coreclr/src/pal/src/exception/remote-unwind.cpp b/src/coreclr/src/pal/src/exception/remote-unwind.cpp
index 91f819370f405..4226e55097ca3 100644
--- a/src/coreclr/src/pal/src/exception/remote-unwind.cpp
+++ b/src/coreclr/src/pal/src/exception/remote-unwind.cpp
@@ -75,6 +75,11 @@ SET_DEFAULT_DEBUG_CHANNEL(EXCEPT);
#define TRACE_VERBOSE
+#include "crosscomp.h"
+
+#define KNONVOLATILE_CONTEXT_POINTERS T_KNONVOLATILE_CONTEXT_POINTERS
+#define CONTEXT T_CONTEXT
+
#else // HOST_UNIX
#include
@@ -1296,6 +1301,7 @@ GetProcInfo(unw_word_t ip, unw_proc_info_t *pip, const libunwindInfo* info, bool
return false;
}
+#if defined(TARGET_AMD64)
static bool
StepWithCompactEncodingRBPFrame(const libunwindInfo* info, compact_unwind_encoding_t compactEncoding)
{
@@ -1364,10 +1370,143 @@ StepWithCompactEncodingRBPFrame(const libunwindInfo* info, compact_unwind_encodi
compactEncoding, (void*)context->Rip, (void*)context->Rsp, (void*)context->Rbp);
return true;
}
+#endif
+
+#if defined(TARGET_ARM64)
+inline static bool
+ReadCompactEncodingRegister(const libunwindInfo* info, unw_word_t* addr, DWORD64* reg)
+{
+ *addr -= sizeof(uint64_t);
+ if (!ReadValue64(info, addr, (uint64_t*)reg)) {
+ return false;
+ }
+ return true;
+}
+
+inline static bool
+ReadCompactEncodingRegisterPair(const libunwindInfo* info, unw_word_t* addr, DWORD64*second, DWORD64* first)
+{
+ // Registers are effectively pushed in pairs
+ //
+ // *addr -= 8
+ // **addr = *first
+ // *addr -= 8
+ // **addr = *second
+ if (!ReadCompactEncodingRegister(info, addr, first)) {
+ return false;
+ }
+ if (!ReadCompactEncodingRegister(info, addr, second)) {
+ return false;
+ }
+ return true;
+}
+
+inline static bool
+ReadCompactEncodingRegisterPair(const libunwindInfo* info, unw_word_t* addr, NEON128*second, NEON128* first)
+{
+ if (!ReadCompactEncodingRegisterPair(info, addr, &first->Low, &second->Low)) {
+ return false;
+ }
+ first->High = 0;
+ second->High = 0;
+ return true;
+}
+
+// Saved registers are pushed
+// + in pairs
+// + in register number order (after the option frame registers)
+// + after the callers SP
+//
+// Given C++ code that generates this prologue spill sequence
+//
+// sub sp, sp, #128 ; =128
+// stp d15, d14, [sp, #16] ; 16-byte Folded Spill
+// stp d13, d12, [sp, #32] ; 16-byte Folded Spill
+// stp d11, d10, [sp, #48] ; 16-byte Folded Spill
+// stp d9, d8, [sp, #64] ; 16-byte Folded Spill
+// stp x22, x21, [sp, #80] ; 16-byte Folded Spill
+// stp x20, x19, [sp, #96] ; 16-byte Folded Spill
+// stp x29, x30, [sp, #112] ; 16-byte Folded Spill
+// add x29, sp, #112 ; =112
+//
+// The compiler generates:
+// compactEncoding = 0x04000f03;
+static bool
+StepWithCompactEncodingArm64(const libunwindInfo* info, compact_unwind_encoding_t compactEncoding, bool hasFrame)
+{
+ CONTEXT* context = info->Context;
+
+ unw_word_t callerSp;
+
+ if (hasFrame) {
+ // caller Sp is callee Fp plus saved FP and LR
+ callerSp = context->Fp + 2 * sizeof(uint64_t);
+ } else {
+ // Get the leat significant bit in UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK
+ uint64_t stackSizeScale = UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK & ~(UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK - 1);
+ uint64_t stackSize = (compactEncoding & UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK) / stackSizeScale * 16;
+
+ callerSp = context->Sp + stackSize;
+ }
+
+ context->Sp = callerSp;
+
+ // return address is stored in Lr
+ context->Pc = context->Lr;
+
+ unw_word_t addr = callerSp;
+
+ if (hasFrame &&
+ !ReadCompactEncodingRegisterPair(info, &addr, &context->Lr, &context->Fp)) {
+ return false;
+ }
+ if (compactEncoding & UNWIND_ARM64_FRAME_X19_X20_PAIR &&
+ !ReadCompactEncodingRegisterPair(info, &addr, &context->X[19], &context->X[20])) {
+ return false;
+ }
+ if (compactEncoding & UNWIND_ARM64_FRAME_X21_X22_PAIR &&
+ !ReadCompactEncodingRegisterPair(info, &addr, &context->X[21], &context->X[22])) {
+ return false;
+ }
+ if (compactEncoding & UNWIND_ARM64_FRAME_X23_X24_PAIR &&
+ !ReadCompactEncodingRegisterPair(info, &addr, &context->X[23], &context->X[24])) {
+ return false;
+ }
+ if (compactEncoding & UNWIND_ARM64_FRAME_X25_X26_PAIR &&
+ !ReadCompactEncodingRegisterPair(info, &addr, &context->X[25], &context->X[26])) {
+ return false;
+ }
+ if (compactEncoding & UNWIND_ARM64_FRAME_X27_X28_PAIR &&
+ !ReadCompactEncodingRegisterPair(info, &addr, &context->X[27], &context->X[28])) {
+ return false;
+ }
+ if (compactEncoding & UNWIND_ARM64_FRAME_D8_D9_PAIR &&
+ !ReadCompactEncodingRegisterPair(info, &addr, &context->V[8], &context->V[9])) {
+ return false;
+ }
+ if (compactEncoding & UNWIND_ARM64_FRAME_D10_D11_PAIR &&
+ !ReadCompactEncodingRegisterPair(info, &addr, &context->V[10], &context->V[11])) {
+ return false;
+ }
+ if (compactEncoding & UNWIND_ARM64_FRAME_D12_D13_PAIR &&
+ !ReadCompactEncodingRegisterPair(info, &addr, &context->V[12], &context->V[13])) {
+ return false;
+ }
+ if (compactEncoding & UNWIND_ARM64_FRAME_D14_D15_PAIR &&
+ !ReadCompactEncodingRegisterPair(info, &addr, &context->V[14], &context->V[15])) {
+ return false;
+ }
+
+ TRACE("SUCCESS: compact step encoding %08x pc %p sp %p fp %p lr %p\n",
+ compactEncoding, (void*)context->Pc, (void*)context->Sp, (void*)context->Fp, (void*)context->Lr);
+ return true;
+}
+#endif
static bool
StepWithCompactEncoding(const libunwindInfo* info, compact_unwind_encoding_t compactEncoding, unw_word_t functionStart)
{
+#if defined(TARGET_AMD64)
if (compactEncoding == 0)
{
TRACE("Compact unwind missing for %p\n", (void*)info->Context->Rip);
@@ -1381,8 +1520,30 @@ StepWithCompactEncoding(const libunwindInfo* info, compact_unwind_encoding_t com
case UNWIND_X86_64_MODE_STACK_IMMD:
case UNWIND_X86_64_MODE_STACK_IND:
break;
-
+
+ case UNWIND_X86_64_MODE_DWARF:
+ return false;
}
+#elif defined(TARGET_ARM64)
+ if (compactEncoding == 0)
+ {
+ TRACE("Compact unwind missing for %p\n", (void*)info->Context->Pc);
+ return false;
+ }
+ switch (compactEncoding & UNWIND_ARM64_MODE_MASK)
+ {
+ case UNWIND_ARM64_MODE_FRAME:
+ return StepWithCompactEncodingArm64(info, compactEncoding, true);
+
+ case UNWIND_ARM64_MODE_FRAMELESS:
+ return StepWithCompactEncodingArm64(info, compactEncoding, false);
+
+ case UNWIND_ARM64_MODE_DWARF:
+ return false;
+ }
+#else
+#error unsupported architecture
+#endif
ERROR("Invalid encoding %08x\n", compactEncoding);
return false;
}
@@ -1411,19 +1572,19 @@ static void GetContextPointer(unw_cursor_t *cursor, unw_context_t *unwContext, i
static void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext, KNONVOLATILE_CONTEXT_POINTERS *contextPointers)
{
-#if (defined(HOST_UNIX) && defined(HOST_AMD64)) || (defined(HOST_WINDOWS) && defined(TARGET_AMD64))
+#if defined(TARGET_AMD64)
GetContextPointer(cursor, unwContext, UNW_X86_64_RBP, &contextPointers->Rbp);
GetContextPointer(cursor, unwContext, UNW_X86_64_RBX, &contextPointers->Rbx);
GetContextPointer(cursor, unwContext, UNW_X86_64_R12, &contextPointers->R12);
GetContextPointer(cursor, unwContext, UNW_X86_64_R13, &contextPointers->R13);
GetContextPointer(cursor, unwContext, UNW_X86_64_R14, &contextPointers->R14);
GetContextPointer(cursor, unwContext, UNW_X86_64_R15, &contextPointers->R15);
-#elif (defined(HOST_UNIX) && defined(HOST_X86)) || (defined(HOST_WINDOWS) && defined(TARGET_X86))
+#elif defined(TARGET_X86)
GetContextPointer(cursor, unwContext, UNW_X86_EBX, &contextPointers->Ebx);
GetContextPointer(cursor, unwContext, UNW_X86_EBP, &contextPointers->Ebp);
GetContextPointer(cursor, unwContext, UNW_X86_ESI, &contextPointers->Esi);
GetContextPointer(cursor, unwContext, UNW_X86_EDI, &contextPointers->Edi);
-#elif (defined(HOST_UNIX) && defined(HOST_ARM)) || (defined(HOST_WINDOWS) && defined(TARGET_ARM))
+#elif defined(TARGET_ARM)
GetContextPointer(cursor, unwContext, UNW_ARM_R4, &contextPointers->R4);
GetContextPointer(cursor, unwContext, UNW_ARM_R5, &contextPointers->R5);
GetContextPointer(cursor, unwContext, UNW_ARM_R6, &contextPointers->R6);
@@ -1432,7 +1593,7 @@ static void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext,
GetContextPointer(cursor, unwContext, UNW_ARM_R9, &contextPointers->R9);
GetContextPointer(cursor, unwContext, UNW_ARM_R10, &contextPointers->R10);
GetContextPointer(cursor, unwContext, UNW_ARM_R11, &contextPointers->R11);
-#elif (defined(HOST_UNIX) && defined(HOST_ARM64)) || (defined(HOST_WINDOWS) && defined(TARGET_ARM64))
+#elif defined(TARGET_ARM64)
GetContextPointer(cursor, unwContext, UNW_AARCH64_X19, &contextPointers->X19);
GetContextPointer(cursor, unwContext, UNW_AARCH64_X20, &contextPointers->X20);
GetContextPointer(cursor, unwContext, UNW_AARCH64_X21, &contextPointers->X21);
@@ -1451,7 +1612,7 @@ static void GetContextPointers(unw_cursor_t *cursor, unw_context_t *unwContext,
static void UnwindContextToContext(unw_cursor_t *cursor, CONTEXT *winContext)
{
-#if (defined(HOST_UNIX) && defined(HOST_AMD64)) || (defined(HOST_WINDOWS) && defined(TARGET_AMD64))
+#if defined(TARGET_AMD64)
unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Rip);
unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Rsp);
unw_get_reg(cursor, UNW_X86_64_RBP, (unw_word_t *) &winContext->Rbp);
@@ -1460,14 +1621,14 @@ static void UnwindContextToContext(unw_cursor_t *cursor, CONTEXT *winContext)
unw_get_reg(cursor, UNW_X86_64_R13, (unw_word_t *) &winContext->R13);
unw_get_reg(cursor, UNW_X86_64_R14, (unw_word_t *) &winContext->R14);
unw_get_reg(cursor, UNW_X86_64_R15, (unw_word_t *) &winContext->R15);
-#elif (defined(HOST_UNIX) && defined(HOST_X86)) || (defined(HOST_WINDOWS) && defined(TARGET_X86))
+#elif defined(TARGET_X86)
unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Eip);
unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Esp);
unw_get_reg(cursor, UNW_X86_EBP, (unw_word_t *) &winContext->Ebp);
unw_get_reg(cursor, UNW_X86_EBX, (unw_word_t *) &winContext->Ebx);
unw_get_reg(cursor, UNW_X86_ESI, (unw_word_t *) &winContext->Esi);
unw_get_reg(cursor, UNW_X86_EDI, (unw_word_t *) &winContext->Edi);
-#elif (defined(HOST_UNIX) && defined(HOST_ARM)) || (defined(HOST_WINDOWS) && defined(TARGET_ARM))
+#elif defined(TARGET_ARM)
unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Pc);
unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Sp);
unw_get_reg(cursor, UNW_ARM_R4, (unw_word_t *) &winContext->R4);
@@ -1480,7 +1641,7 @@ static void UnwindContextToContext(unw_cursor_t *cursor, CONTEXT *winContext)
unw_get_reg(cursor, UNW_ARM_R11, (unw_word_t *) &winContext->R11);
unw_get_reg(cursor, UNW_ARM_R14, (unw_word_t *) &winContext->Lr);
TRACE("sp %p pc %p lr %p\n", winContext->Sp, winContext->Pc, winContext->Lr);
-#elif (defined(HOST_UNIX) && defined(HOST_ARM64)) || (defined(HOST_WINDOWS) && defined(TARGET_ARM64))
+#elif defined(TARGET_ARM64)
unw_get_reg(cursor, UNW_REG_IP, (unw_word_t *) &winContext->Pc);
unw_get_reg(cursor, UNW_REG_SP, (unw_word_t *) &winContext->Sp);
unw_get_reg(cursor, UNW_AARCH64_X19, (unw_word_t *) &winContext->X19);
@@ -1541,7 +1702,7 @@ access_reg(unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write
switch (regnum)
{
-#if (defined(HOST_UNIX) && defined(HOST_AMD64)) || (defined(HOST_WINDOWS) && defined(TARGET_AMD64))
+#if defined(TARGET_AMD64)
case UNW_REG_IP: *valp = (unw_word_t)winContext->Rip; break;
case UNW_REG_SP: *valp = (unw_word_t)winContext->Rsp; break;
case UNW_X86_64_RBP: *valp = (unw_word_t)winContext->Rbp; break;
@@ -1550,14 +1711,14 @@ access_reg(unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write
case UNW_X86_64_R13: *valp = (unw_word_t)winContext->R13; break;
case UNW_X86_64_R14: *valp = (unw_word_t)winContext->R14; break;
case UNW_X86_64_R15: *valp = (unw_word_t)winContext->R15; break;
-#elif (defined(HOST_UNIX) && defined(HOST_X86)) || (defined(HOST_WINDOWS) && defined(TARGET_X86))
+#elif defined(TARGET_X86)
case UNW_REG_IP: *valp = (unw_word_t)winContext->Eip; break;
case UNW_REG_SP: *valp = (unw_word_t)winContext->Esp; break;
case UNW_X86_EBX: *valp = (unw_word_t)winContext->Ebx; break;
case UNW_X86_ESI: *valp = (unw_word_t)winContext->Esi; break;
case UNW_X86_EDI: *valp = (unw_word_t)winContext->Edi; break;
case UNW_X86_EBP: *valp = (unw_word_t)winContext->Ebp; break;
-#elif (defined(HOST_UNIX) && defined(HOST_ARM)) || (defined(HOST_WINDOWS) && defined(TARGET_ARM))
+#elif defined(TARGET_ARM)
case UNW_ARM_R4: *valp = (unw_word_t)winContext->R4; break;
case UNW_ARM_R5: *valp = (unw_word_t)winContext->R5; break;
case UNW_ARM_R6: *valp = (unw_word_t)winContext->R6; break;
@@ -1569,7 +1730,7 @@ access_reg(unw_addr_space_t as, unw_regnum_t regnum, unw_word_t *valp, int write
case UNW_ARM_R13: *valp = (unw_word_t)winContext->Sp; break;
case UNW_ARM_R14: *valp = (unw_word_t)winContext->Lr; break;
case UNW_ARM_R15: *valp = (unw_word_t)winContext->Pc; break;
-#elif (defined(HOST_UNIX) && defined(HOST_ARM64)) || (defined(HOST_WINDOWS) && defined(TARGET_ARM64))
+#elif defined(TARGET_ARM64)
case UNW_AARCH64_X19: *valp = (unw_word_t)winContext->X19; break;
case UNW_AARCH64_X20: *valp = (unw_word_t)winContext->X20; break;
case UNW_AARCH64_X21: *valp = (unw_word_t)winContext->X21; break;
@@ -1853,10 +2014,17 @@ PAL_VirtualUnwindOutOfProc(CONTEXT *context, KNONVOLATILE_CONTEXT_POINTERS *cont
info.ReadMemory = readMemoryCallback;
#ifdef __APPLE__
- TRACE("Unwind: rip %p rsp %p rbp %p\n", (void*)context->Rip, (void*)context->Rsp, (void*)context->Rbp);
unw_proc_info_t procInfo;
bool step;
+#if defined(TARGET_AMD64)
+ TRACE("Unwind: rip %p rsp %p rbp %p\n", (void*)context->Rip, (void*)context->Rsp, (void*)context->Rbp);
result = GetProcInfo(context->Rip, &procInfo, &info, &step, false);
+#elif defined(TARGET_ARM64)
+ TRACE("Unwind: pc %p sp %p fp %p\n", (void*)context->Pc, (void*)context->Sp, (void*)context->Fp);
+ result = GetProcInfo(context->Pc, &procInfo, &info, &step, false);
+#else
+#error Unexpected architecture
+#endif
if (!result)
{
goto exit;
diff --git a/src/coreclr/src/pal/src/exception/seh-unwind.cpp b/src/coreclr/src/pal/src/exception/seh-unwind.cpp
index a63f56c636480..5db8a80a88cdf 100644
--- a/src/coreclr/src/pal/src/exception/seh-unwind.cpp
+++ b/src/coreclr/src/pal/src/exception/seh-unwind.cpp
@@ -54,6 +54,27 @@ Module Name:
#endif // HOST_UNIX
+#if defined(TARGET_OSX) && defined(TARGET_ARM64)
+// MacOS uses ARM64 instead of AARCH64 to describe these registers
+// Create aliases to reuse more code
+enum
+{
+ UNW_AARCH64_X19 = UNW_ARM64_X19,
+ UNW_AARCH64_X20 = UNW_ARM64_X20,
+ UNW_AARCH64_X21 = UNW_ARM64_X21,
+ UNW_AARCH64_X22 = UNW_ARM64_X22,
+ UNW_AARCH64_X23 = UNW_ARM64_X23,
+ UNW_AARCH64_X24 = UNW_ARM64_X24,
+ UNW_AARCH64_X25 = UNW_ARM64_X25,
+ UNW_AARCH64_X26 = UNW_ARM64_X26,
+ UNW_AARCH64_X27 = UNW_ARM64_X27,
+ UNW_AARCH64_X28 = UNW_ARM64_X28,
+ UNW_AARCH64_X29 = UNW_ARM64_X29,
+ UNW_AARCH64_X30 = UNW_ARM64_X30
+};
+#endif // defined(TARGET_OSX) && defined(TARGET_ARM64)
+
+
//----------------------------------------------------------------------
// Virtual Unwinding
//----------------------------------------------------------------------
@@ -129,7 +150,7 @@ static void WinContextToUnwindContext(CONTEXT *winContext, unw_context_t *unwCon
unwContext->regs[13] = winContext->Sp;
unwContext->regs[14] = winContext->Lr;
unwContext->regs[15] = winContext->Pc;
-#elif defined(HOST_ARM64)
+#elif defined(HOST_ARM64) && !defined(TARGET_OSX)
unwContext->uc_mcontext.pc = winContext->Pc;
unwContext->uc_mcontext.sp = winContext->Sp;
unwContext->uc_mcontext.regs[29] = winContext->Fp;
@@ -166,6 +187,24 @@ static void WinContextToUnwindCursor(CONTEXT *winContext, unw_cursor_t *cursor)
unw_set_reg(cursor, UNW_X86_EBX, winContext->Ebx);
unw_set_reg(cursor, UNW_X86_ESI, winContext->Esi);
unw_set_reg(cursor, UNW_X86_EDI, winContext->Edi);
+#elif defined(HOST_ARM64) && defined(TARGET_OSX)
+ // unw_cursor_t is an opaque data structure on macOS
+ // As noted in WinContextToUnwindContext this didn't work for Linux
+ // TBD whether this will work for macOS.
+ unw_set_reg(cursor, UNW_REG_IP, winContext->Pc);
+ unw_set_reg(cursor, UNW_REG_SP, winContext->Sp);
+ unw_set_reg(cursor, UNW_AARCH64_X29, winContext->Fp);
+ unw_set_reg(cursor, UNW_AARCH64_X30, winContext->Lr);
+ unw_set_reg(cursor, UNW_AARCH64_X19, winContext->X19);
+ unw_set_reg(cursor, UNW_AARCH64_X20, winContext->X20);
+ unw_set_reg(cursor, UNW_AARCH64_X21, winContext->X21);
+ unw_set_reg(cursor, UNW_AARCH64_X22, winContext->X22);
+ unw_set_reg(cursor, UNW_AARCH64_X23, winContext->X23);
+ unw_set_reg(cursor, UNW_AARCH64_X24, winContext->X24);
+ unw_set_reg(cursor, UNW_AARCH64_X25, winContext->X25);
+ unw_set_reg(cursor, UNW_AARCH64_X26, winContext->X26);
+ unw_set_reg(cursor, UNW_AARCH64_X27, winContext->X27);
+ unw_set_reg(cursor, UNW_AARCH64_X28, winContext->X28);
#endif
}
#endif
@@ -215,6 +254,13 @@ void UnwindContextToWinContext(unw_cursor_t *cursor, CONTEXT *winContext)
unw_get_reg(cursor, UNW_AARCH64_X26, (unw_word_t *) &winContext->X26);
unw_get_reg(cursor, UNW_AARCH64_X27, (unw_word_t *) &winContext->X27);
unw_get_reg(cursor, UNW_AARCH64_X28, (unw_word_t *) &winContext->X28);
+
+#if defined(TARGET_OSX) && defined(TARGET_ARM64)
+ // Strip pointer authentication bits which seem to be leaking out of libunwind
+ // Seems like ptrauth_strip() / __builtin_ptrauth_strip() should work, but currently
+ // errors with "this target does not support pointer authentication"
+ winContext->Pc = winContext->Pc & 0x7fffffffffffull;
+#endif // defined(TARGET_OSX) && defined(TARGET_ARM64)
#else
#error unsupported architecture
#endif
diff --git a/src/coreclr/src/pal/src/include/pal/context.h b/src/coreclr/src/pal/src/include/pal/context.h
index db0baec111919..73de448dfcf2f 100644
--- a/src/coreclr/src/pal/src/include/pal/context.h
+++ b/src/coreclr/src/pal/src/include/pal/context.h
@@ -305,6 +305,7 @@ inline void *FPREG_Xstate_Ymmh(const ucontext_t *uc)
#define MCREG_Cpsr(mc) ((mc).pstate)
+#ifndef TARGET_OSX
inline
fpsimd_context* GetNativeSigSimdContext(native_context_t *mc)
{
@@ -341,7 +342,9 @@ const fpsimd_context* GetConstNativeSigSimdContext(const native_context_t *mc)
return GetNativeSigSimdContext(const_cast(mc));
}
-#else
+#endif // TARGET_OSX
+
+#else // HOST_ARM64
// For FreeBSD, as found in x86/ucontext.h
#define MCREG_Rbp(mc) ((mc).mc_rbp)
#define MCREG_Rip(mc) ((mc).mc_rip)
diff --git a/src/coreclr/src/pal/src/include/pal/misc.h b/src/coreclr/src/pal/src/include/pal/misc.h
index 1a555f4fb2bb2..aa5b2b4852b6e 100644
--- a/src/coreclr/src/pal/src/include/pal/misc.h
+++ b/src/coreclr/src/pal/src/include/pal/misc.h
@@ -42,17 +42,6 @@ Function :
--*/
PAL_time_t __cdecl PAL_time(PAL_time_t*);
-/*++
-Function:
-TIMEInitialize
-
-Return value:
-TRUE if initialize succeeded
-FALSE otherwise
-
---*/
-BOOL TIMEInitialize( void );
-
/*++
Function :
MsgBoxInitialize
diff --git a/src/coreclr/src/pal/src/init/pal.cpp b/src/coreclr/src/pal/src/init/pal.cpp
index 0b4876eb0a551..488ff6a4d79bc 100644
--- a/src/coreclr/src/pal/src/init/pal.cpp
+++ b/src/coreclr/src/pal/src/init/pal.cpp
@@ -626,13 +626,6 @@ Initialize(
palError = ERROR_GEN_FAILURE;
- if (FALSE == TIMEInitialize())
- {
- ERROR("Unable to initialize TIME support\n");
- palError = ERROR_PALINIT_TIME;
- goto CLEANUP6;
- }
-
/* Initialize the File mapping critical section. */
if (FALSE == MAPInitialize())
{
diff --git a/src/coreclr/src/pal/src/libunwind_mac/src/missing-functions.c b/src/coreclr/src/pal/src/libunwind_mac/src/missing-functions.c
index 8399214e49520..9ccb1df07b6e6 100644
--- a/src/coreclr/src/pal/src/libunwind_mac/src/missing-functions.c
+++ b/src/coreclr/src/pal/src/libunwind_mac/src/missing-functions.c
@@ -47,11 +47,25 @@ unw_get_accessors_int (unw_addr_space_t as)
return unw_get_accessors(as);
}
+#if defined(TARGET_AMD64) && !defined(HOST_AMD64)
+#define X86_64_SCF_NONE 0
+#endif
+
+#if defined(TARGET_ARM64) && !defined(HOST_ARM64)
+#define AARCH64_SCF_NONE 0
+#endif
+
int
unw_is_signal_frame (unw_cursor_t *cursor)
{
struct cursor *c = (struct cursor *) cursor;
+#ifdef TARGET_AMD64
return c->sigcontext_format != X86_64_SCF_NONE;
+#elif defined(TARGET_ARM64)
+ return c->sigcontext_format != AARCH64_SCF_NONE;
+#else
+ #error Unexpected target
+#endif
}
int
diff --git a/src/coreclr/src/pal/src/map/virtual.cpp b/src/coreclr/src/pal/src/map/virtual.cpp
index edac877580216..456254bdbbb9a 100644
--- a/src/coreclr/src/pal/src/map/virtual.cpp
+++ b/src/coreclr/src/pal/src/map/virtual.cpp
@@ -1760,6 +1760,23 @@ VirtualProtect(
return bRetVal;
}
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+bool
+PAL_JITWriteEnableHolder::JITWriteEnable(bool writeEnable)
+{
+ // Use a thread local to track per thread JIT Write enable state
+ // Initialize threads to start with MAP_JIT pages readable and executable (R-X) by default.
+ thread_local bool enabled = (pthread_jit_write_protect_np(1), false);
+ bool result = enabled;
+ if (enabled != writeEnable)
+ {
+ pthread_jit_write_protect_np(writeEnable ? 0 : 1);
+ enabled = writeEnable;
+ }
+ return result;
+}
+#endif
+
#if HAVE_VM_ALLOCATE
//---------------------------------------------------------------------------------------
//
diff --git a/src/coreclr/src/pal/src/misc/jitsupport.cpp b/src/coreclr/src/pal/src/misc/jitsupport.cpp
index 89e90c23812aa..13ffa029424dc 100644
--- a/src/coreclr/src/pal/src/misc/jitsupport.cpp
+++ b/src/coreclr/src/pal/src/misc/jitsupport.cpp
@@ -13,6 +13,10 @@ SET_DEFAULT_DEBUG_CHANNEL(MISC);
#include
#endif
+#if HAVE_SYSCTLBYNAME
+#include
+#endif
+
#if defined(HOST_ARM64) && defined(__linux__)
struct CpuCapability
{
@@ -254,6 +258,16 @@ PAL_GetJitCpuCapabilityFlags(CORJIT_FLAGS *flags)
// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SVE);
#endif
#else // !HAVE_AUXV_HWCAP_H
+#if HAVE_SYSCTLBYNAME
+ int64_t valueFromSysctl = 0;
+ size_t sz = sizeof(valueFromSysctl);
+
+ if ((sysctlbyname("hw.optional.armv8_1_atomics", &valueFromSysctl, &sz, nullptr, 0) == 0) && (valueFromSysctl != 0))
+ CPUCompileFlags.Set(InstructionSet_Atomics);
+
+ if ((sysctlbyname("hw.optional.armv8_crc32", &valueFromSysctl, &sz, nullptr, 0) == 0) && (valueFromSysctl != 0))
+ CPUCompileFlags.Set(InstructionSet_Crc32);
+#endif // HAVE_SYSCTLBYNAME
// CoreCLR SIMD and FP support is included in ARM64 baseline
// On exceptional basis platforms may leave out support, but CoreCLR does not
// yet support such platforms
diff --git a/src/coreclr/src/pal/src/misc/sysinfo.cpp b/src/coreclr/src/pal/src/misc/sysinfo.cpp
index 6b23c17ac1482..7df3d5e34f1e5 100644
--- a/src/coreclr/src/pal/src/misc/sysinfo.cpp
+++ b/src/coreclr/src/pal/src/misc/sysinfo.cpp
@@ -588,7 +588,7 @@ PAL_GetLogicalProcessorCacheSizeFromOS()
}
#endif
-#if defined(HOST_ARM64)
+#if defined(HOST_ARM64) && !defined(TARGET_OSX)
if (cacheSize == 0)
{
// It is currently expected to be missing cache size info
diff --git a/src/coreclr/src/pal/src/misc/time.cpp b/src/coreclr/src/pal/src/misc/time.cpp
index 0d56e411e4ccc..ec71e5c72b06c 100644
--- a/src/coreclr/src/pal/src/misc/time.cpp
+++ b/src/coreclr/src/pal/src/misc/time.cpp
@@ -27,45 +27,10 @@ Module Name:
#include
#include
-#if HAVE_MACH_ABSOLUTE_TIME
-#include
-static mach_timebase_info_data_t s_TimebaseInfo;
-#endif
-
using namespace CorUnix;
SET_DEFAULT_DEBUG_CHANNEL(MISC);
-/*++
-Function :
-TIMEInitialize
-
-Initialize all Time-related stuff related
-
-(no parameters)
-
-Return value :
-TRUE if Time support initialization succeeded
-FALSE otherwise
---*/
-BOOL TIMEInitialize(void)
-{
- BOOL retval = TRUE;
-
-#if HAVE_MACH_ABSOLUTE_TIME
- kern_return_t result = mach_timebase_info(&s_TimebaseInfo);
-
- if (result != KERN_SUCCESS)
- {
- ASSERT("mach_timebase_info() failed: %s\n", mach_error_string(result));
- retval = FALSE;
- }
-#endif
-
- return retval;
-}
-
-
/*++
Function:
GetSystemTime
@@ -203,8 +168,8 @@ QueryPerformanceCounter(
PERF_ENTRY(QueryPerformanceCounter);
ENTRY("QueryPerformanceCounter()\n");
-#if HAVE_MACH_ABSOLUTE_TIME
- lpPerformanceCount->QuadPart = (LONGLONG)mach_absolute_time();
+#if HAVE_CLOCK_GETTIME_NSEC_NP
+ lpPerformanceCount->QuadPart = (LONGLONG)clock_gettime_nsec_np(CLOCK_UPTIME_RAW);
#elif HAVE_CLOCK_MONOTONIC
struct timespec ts;
int result = clock_gettime(CLOCK_MONOTONIC, &ts);
@@ -238,21 +203,8 @@ QueryPerformanceFrequency(
PERF_ENTRY(QueryPerformanceFrequency);
ENTRY("QueryPerformanceFrequency()\n");
-#if HAVE_MACH_ABSOLUTE_TIME
- // use denom == 0 to indicate that s_TimebaseInfo is uninitialised.
- if (s_TimebaseInfo.denom == 0)
- {
- ASSERT("s_TimebaseInfo is uninitialized.\n");
- retval = FALSE;
- }
- else
- {
- // (numer / denom) gives you the nanoseconds per tick, so the below code
- // computes the number of ticks per second. We explicitly do the multiplication
- // first in order to help minimize the error that is produced by integer division.
-
- lpFrequency->QuadPart = ((LONGLONG)(tccSecondsToNanoSeconds) * (LONGLONG)(s_TimebaseInfo.denom)) / (LONGLONG)(s_TimebaseInfo.numer);
- }
+#if HAVE_CLOCK_GETTIME_NSEC_NP
+ lpFrequency->QuadPart = (LONGLONG)(tccSecondsToNanoSeconds);
#elif HAVE_CLOCK_MONOTONIC
// clock_gettime() returns a result in terms of nanoseconds rather than a count. This
// means that we need to either always scale the result by the actual resolution (to
@@ -323,17 +275,8 @@ GetTickCount64()
{
LONGLONG retval = 0;
-#if HAVE_MACH_ABSOLUTE_TIME
- // use denom == 0 to indicate that s_TimebaseInfo is uninitialised.
- if (s_TimebaseInfo.denom == 0)
- {
- ASSERT("s_TimebaseInfo is uninitialized.\n");
- retval = FALSE;
- }
- else
- {
- retval = ((LONGLONG)mach_absolute_time() * (LONGLONG)(s_TimebaseInfo.numer)) / ((LONGLONG)(tccMillieSecondsToNanoSeconds) * (LONGLONG)(s_TimebaseInfo.denom));
- }
+#if HAVE_CLOCK_GETTIME_NSEC_NP
+ return (LONGLONG)clock_gettime_nsec_np(CLOCK_UPTIME_RAW) / (LONGLONG)(tccMillieSecondsToNanoSeconds);
#elif HAVE_CLOCK_MONOTONIC || HAVE_CLOCK_MONOTONIC_COARSE
struct timespec ts;
diff --git a/src/coreclr/src/pal/src/misc/utils.cpp b/src/coreclr/src/pal/src/misc/utils.cpp
index de604fe96dc47..f279ef3d580c1 100644
--- a/src/coreclr/src/pal/src/misc/utils.cpp
+++ b/src/coreclr/src/pal/src/misc/utils.cpp
@@ -332,6 +332,9 @@ void UTIL_SetLastErrorFromMach(kern_return_t MachReturn)
--*/
BOOL IsRunningOnMojaveHardenedRuntime()
{
+#if defined(TARGET_ARM64)
+ return true;
+#else // defined(TARGET_ARM64)
static volatile int isRunningOnMojaveHardenedRuntime = -1;
if (isRunningOnMojaveHardenedRuntime == -1)
@@ -359,6 +362,7 @@ BOOL IsRunningOnMojaveHardenedRuntime()
}
return (BOOL)isRunningOnMojaveHardenedRuntime;
+#endif // defined(TARGET_ARM64)
}
#endif // __APPLE__
diff --git a/src/coreclr/src/pal/src/thread/context.cpp b/src/coreclr/src/pal/src/thread/context.cpp
index ca0b60e449252..8d53e80af0115 100644
--- a/src/coreclr/src/pal/src/thread/context.cpp
+++ b/src/coreclr/src/pal/src/thread/context.cpp
@@ -949,12 +949,16 @@ CONTEXT_GetThreadContextFromPort(
mach_msg_type_number_t StateCount;
thread_state_flavor_t StateFlavor;
+#if defined(HOST_AMD64)
if (lpContext->ContextFlags & (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS) & CONTEXT_AREA_MASK)
{
-
-#ifdef HOST_AMD64
x86_thread_state64_t State;
StateFlavor = x86_THREAD_STATE64;
+#elif defined(HOST_ARM64)
+ if (lpContext->ContextFlags & (CONTEXT_CONTROL | CONTEXT_INTEGER) & CONTEXT_AREA_MASK)
+ {
+ arm_thread_state64_t State;
+ StateFlavor = ARM_THREAD_STATE64;
#else
#error Unexpected architecture.
#endif
@@ -969,7 +973,9 @@ CONTEXT_GetThreadContextFromPort(
CONTEXT_GetThreadContextFromThreadState(StateFlavor, (thread_state_t)&State, lpContext);
}
- if (lpContext->ContextFlags & CONTEXT_ALL_FLOATING & CONTEXT_AREA_MASK) {
+ if (lpContext->ContextFlags & CONTEXT_ALL_FLOATING & CONTEXT_AREA_MASK)
+ {
+#if defined(HOST_AMD64)
// The thread_get_state for floating point state can fail for some flavors when the processor is not
// in the right mode at the time we are taking the state. So we will try to get the AVX state first and
// if it fails, get the FLOAT state and if that fails, take AVX512 state. Both AVX and AVX512 states
@@ -1008,6 +1014,20 @@ CONTEXT_GetThreadContextFromPort(
}
}
}
+#elif defined(HOST_ARM64)
+ arm_neon_state64_t State;
+
+ StateFlavor = ARM_NEON_STATE64;
+ StateCount = sizeof(arm_neon_state64_t) / sizeof(natural_t);
+ MachRet = thread_get_state(Port, StateFlavor, (thread_state_t)&State, &StateCount);
+ if (MachRet != KERN_SUCCESS)
+ {
+ // We were unable to get any floating point state.
+ lpContext->ContextFlags &= ~((CONTEXT_ALL_FLOATING) & CONTEXT_AREA_MASK);
+ }
+#else
+#error Unexpected architecture.
+#endif
CONTEXT_GetThreadContextFromThreadState(StateFlavor, (thread_state_t)&State, lpContext);
}
@@ -1029,7 +1049,7 @@ CONTEXT_GetThreadContextFromThreadState(
{
switch (threadStateFlavor)
{
-#ifdef HOST_AMD64
+#if defined (HOST_AMD64)
case x86_THREAD_STATE64:
if (lpContext->ContextFlags & (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS) & CONTEXT_AREA_MASK)
{
@@ -1100,9 +1120,6 @@ CONTEXT_GetThreadContextFromThreadState(
memcpy(&lpContext->Xmm0, &pState->__fpu_xmm0, 16 * 16);
}
break;
-#else
-#error Unexpected architecture.
-#endif
case x86_THREAD_STATE:
{
x86_thread_state_t *pState = (x86_thread_state_t *)threadState;
@@ -1116,6 +1133,31 @@ CONTEXT_GetThreadContextFromThreadState(
CONTEXT_GetThreadContextFromThreadState((thread_state_flavor_t)pState->fsh.flavor, (thread_state_t)&pState->ufs, lpContext);
}
break;
+#elif defined(HOST_ARM64)
+ case ARM_THREAD_STATE64:
+ if (lpContext->ContextFlags & (CONTEXT_CONTROL | CONTEXT_INTEGER) & CONTEXT_AREA_MASK)
+ {
+ arm_thread_state64_t *pState = (arm_thread_state64_t*)threadState;
+ memcpy(&lpContext->X0, &pState->__x[0], 29 * 8);
+ lpContext->Cpsr = pState->__cpsr;
+ lpContext->Fp = arm_thread_state64_get_fp(*pState);
+ lpContext->Sp = arm_thread_state64_get_sp(*pState);
+ lpContext->Lr = (uint64_t)arm_thread_state64_get_lr_fptr(*pState);
+ lpContext->Pc = (uint64_t)arm_thread_state64_get_pc_fptr(*pState);
+ }
+ break;
+ case ARM_NEON_STATE64:
+ if (lpContext->ContextFlags & CONTEXT_FLOATING_POINT & CONTEXT_AREA_MASK)
+ {
+ arm_neon_state64_t *pState = (arm_neon_state64_t*)threadState;
+ memcpy(&lpContext->V[0], &pState->__v, 32 * 16);
+ lpContext->Fpsr = pState->__fpsr;
+ lpContext->Fpcr = pState->__fpcr;
+ }
+ break;
+#else
+#error Unexpected architecture.
+#endif
default:
ASSERT("Invalid thread state flavor %d\n", threadStateFlavor);
@@ -1216,6 +1258,16 @@ CONTEXT_SetThreadContextOnPort(
// State.es = lpContext->SegEs_PAL_Undefined;
State.__fs = lpContext->SegFs;
State.__gs = lpContext->SegGs;
+#elif defined(HOST_ARM64)
+ arm_thread_state64_t State;
+ StateFlavor = ARM_THREAD_STATE64;
+
+ memcpy(&State.__x[0], &lpContext->X0, 29 * 8);
+ State.__cpsr = lpContext->Cpsr;
+ arm_thread_state64_set_fp(State, lpContext->Fp);
+ arm_thread_state64_set_sp(State, lpContext->Sp);
+ arm_thread_state64_set_lr_fptr(State, lpContext->Lr);
+ arm_thread_state64_set_pc_fptr(State, lpContext->Pc);
#else
#error Unexpected architecture.
#endif
@@ -1261,6 +1313,10 @@ CONTEXT_SetThreadContextOnPort(
StateFlavor = x86_FLOAT_STATE64;
StateCount = sizeof(State) / sizeof(natural_t);
#endif
+#elif defined(HOST_ARM64)
+ arm_neon_state64_t State;
+ StateFlavor = ARM_NEON_STATE64;
+ StateCount = sizeof(State) / sizeof(natural_t);
#else
#error Unexpected architecture.
#endif
@@ -1306,6 +1362,10 @@ CONTEXT_SetThreadContextOnPort(
memcpy((&State.__fpu_stmm0)[i].__mmst_reg, &lpContext->FltSave.FloatRegisters[i], 10);
memcpy(&State.__fpu_xmm0, &lpContext->Xmm0, 16 * 16);
+#elif defined(HOST_ARM64)
+ memcpy(&State.__v, &lpContext->V[0], 32 * 16);
+ State.__fpsr = lpContext->Fpsr;
+ State.__fpcr = lpContext->Fpcr;
#else
#error Unexpected architecture.
#endif
diff --git a/src/coreclr/src/utilcode/loaderheap.cpp b/src/coreclr/src/utilcode/loaderheap.cpp
index 37d1e2f4ce569..8cfbba756592c 100644
--- a/src/coreclr/src/utilcode/loaderheap.cpp
+++ b/src/coreclr/src/utilcode/loaderheap.cpp
@@ -1159,6 +1159,11 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit)
LoaderHeapBlock *pNewBlock;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ // Always assume we are touching executable heap
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
pNewBlock = (LoaderHeapBlock *) pData;
pNewBlock->dwVirtualSize = dwSizeToReserve;
diff --git a/src/coreclr/src/vm/arm64/asmhelpers.S b/src/coreclr/src/vm/arm64/asmhelpers.S
index a8b0a7c07873a..8ae043c5ff8a4 100644
--- a/src/coreclr/src/vm/arm64/asmhelpers.S
+++ b/src/coreclr/src/vm/arm64/asmhelpers.S
@@ -125,7 +125,7 @@ NESTED_ENTRY NDirectImportThunk, _TEXT, NoHandler
SAVE_FLOAT_ARGUMENT_REGISTERS sp, 96
mov x0, x12
- bl NDirectImportWorker
+ bl C_FUNC(NDirectImportWorker)
mov x12, x0
// pop the stack and restore original register state
@@ -156,7 +156,7 @@ NESTED_ENTRY PrecodeFixupThunk, _TEXT, NoHandler
ldr x13, [x13, #SIZEOF__FixupPrecode]
add x12, x13, w14, uxtw #MethodDesc_ALIGNMENT_SHIFT
- b ThePreStub
+ b C_FUNC(ThePreStub)
NESTED_END PrecodeFixupThunk, _TEXT
// ------------------------------------------------------------------
@@ -167,7 +167,7 @@ NESTED_ENTRY ThePreStub, _TEXT, NoHandler
add x0, sp, #__PWTB_TransitionBlock // pTransitionBlock
mov x1, METHODDESC_REGISTER // pMethodDesc
- bl PreStubWorker
+ bl C_FUNC(PreStubWorker)
mov x9, x0
@@ -206,12 +206,6 @@ LEAF_END ThePreStubPatch, _TEXT
LEAF_END_MARKED \name, _TEXT
.endm
-// ------------------------------------------------------------------
-// Start of the writeable code region
-LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
- ret lr
-LEAF_END JIT_PatchedCodeStart, _TEXT
-
// void JIT_UpdateWriteBarrierState(bool skipEphemeralCheck)
//
// Update shadow copies of the various state info required for barrier
@@ -268,7 +262,12 @@ LOCAL_LABEL(EphemeralCheckEnabled):
ldr x7, [x12]
// Update wbs state
+#ifdef FEATURE_WRITEBARRIER_COPY
+ PREPARE_EXTERNAL_VAR JIT_WriteBarrier_Table_Loc, x12
+ ldr x12, [x12]
+#else // FEATURE_WRITEBARRIER_COPY
adr x12, LOCAL_LABEL(wbs_begin)
+#endif // FEATURE_WRITEBARRIER_COPY
stp x0, x1, [x12], 16
stp x2, x3, [x12], 16
@@ -277,35 +276,8 @@ LOCAL_LABEL(EphemeralCheckEnabled):
EPILOG_RESTORE_REG_PAIR_INDEXED fp, lr, 16
EPILOG_RETURN
-
- // Begin patchable literal pool
- .balign 64 // Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line
-LOCAL_LABEL(wbs_begin):
-LOCAL_LABEL(wbs_card_table):
- .quad 0
-LOCAL_LABEL(wbs_card_bundle_table):
- .quad 0
-LOCAL_LABEL(wbs_GCShadow):
- .quad 0
-LOCAL_LABEL(wbs_sw_ww_table):
- .quad 0
-LOCAL_LABEL(wbs_ephemeral_low):
- .quad 0
-LOCAL_LABEL(wbs_ephemeral_high):
- .quad 0
-LOCAL_LABEL(wbs_lowest_address):
- .quad 0
-LOCAL_LABEL(wbs_highest_address):
- .quad 0
WRITE_BARRIER_END JIT_UpdateWriteBarrierState
-
-// ------------------------------------------------------------------
-// End of the writeable code region
-LEAF_ENTRY JIT_PatchedCodeLast, _TEXT
- ret lr
-LEAF_END JIT_PatchedCodeLast, _TEXT
-
// void JIT_ByRefWriteBarrier
// On entry:
// x13 : the source address (points to object reference to write)
@@ -350,13 +322,44 @@ WRITE_BARRIER_ENTRY JIT_CheckedWriteBarrier
// branch below is not taken.
ccmp x14, x12, #0x2, hs
+#ifdef FEATURE_WRITEBARRIER_COPY
+ blo LOCAL_LABEL(Branch_JIT_WriteBarrier_Copy)
+#else // FEATURE_WRITEBARRIER_COPY
blo C_FUNC(JIT_WriteBarrier)
+#endif // FEATURE_WRITEBARRIER_COPY
LOCAL_LABEL(NotInHeap):
str x15, [x14], 8
ret lr
WRITE_BARRIER_END JIT_CheckedWriteBarrier
+// ------------------------// ------------------------------------------------------------------
+// __declspec(naked) void F_CALL_CONV JIT_WriteBarrier_Callable(Object **dst, Object* val)
+LEAF_ENTRY JIT_WriteBarrier_Callable, _TEXT
+
+ // Setup args for JIT_WriteBarrier. x14 = dst ; x15 = val
+ mov x14, x0 // x14 = dst
+ mov x15, x1 // x15 = val
+
+#ifdef FEATURE_WRITEBARRIER_COPY
+LOCAL_LABEL(Branch_JIT_WriteBarrier_Copy):
+ // Branch to the write barrier
+ PREPARE_EXTERNAL_VAR JIT_WriteBarrier_Loc, x17
+ ldr x17, [x17]
+ br x17
+#else // FEATURE_WRITEBARRIER_COPY
+ // Branch to the write barrier
+ b C_FUNC(JIT_WriteBarrier)
+#endif // FEATURE_WRITEBARRIER_COPY
+LEAF_END JIT_WriteBarrier_Callable, _TEXT
+
+.balign 64 // Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line
+//------------------------------------------
+// Start of the writeable code region
+LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
+ ret lr
+LEAF_END JIT_PatchedCodeStart, _TEXT
+
// void JIT_WriteBarrier(Object** dst, Object* src)
// On entry:
// x14 : the destination address (LHS of the assignment)
@@ -473,6 +476,35 @@ LOCAL_LABEL(Exit):
ret lr
WRITE_BARRIER_END JIT_WriteBarrier
+ // Begin patchable literal pool
+ .balign 64 // Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line
+WRITE_BARRIER_ENTRY JIT_WriteBarrier_Table
+LOCAL_LABEL(wbs_begin):
+LOCAL_LABEL(wbs_card_table):
+ .quad 0
+LOCAL_LABEL(wbs_card_bundle_table):
+ .quad 0
+LOCAL_LABEL(wbs_GCShadow):
+ .quad 0
+LOCAL_LABEL(wbs_sw_ww_table):
+ .quad 0
+LOCAL_LABEL(wbs_ephemeral_low):
+ .quad 0
+LOCAL_LABEL(wbs_ephemeral_high):
+ .quad 0
+LOCAL_LABEL(wbs_lowest_address):
+ .quad 0
+LOCAL_LABEL(wbs_highest_address):
+ .quad 0
+WRITE_BARRIER_END JIT_WriteBarrier_Table
+
+
+// ------------------------------------------------------------------
+// End of the writeable code region
+LEAF_ENTRY JIT_PatchedCodeLast, _TEXT
+ ret lr
+LEAF_END JIT_PatchedCodeLast, _TEXT
+
#ifdef FEATURE_PREJIT
//------------------------------------------------
// VirtualMethodFixupStub
@@ -610,9 +642,9 @@ NESTED_ENTRY ComCallPreStub, _TEXT, NoHandler
str x12, [sp, #(ComCallPreStub_FrameOffset + UnmanagedToManagedFrame__m_pvDatum)]
add x0, sp, #(ComCallPreStub_FrameOffset)
add x1, sp, #(ComCallPreStub_ErrorReturnOffset)
- bl ComPreStubWorker
+ bl C_FUNC(ComPreStubWorker)
- cbz x0, ComCallPreStub_ErrorExit
+ cbz x0, LOCAL_LABEL(ComCallPreStub_ErrorExit)
mov x12, x0
@@ -658,7 +690,7 @@ NESTED_END ComCallPreStub, _TEXT
str x12, [sp, #(GenericComCallStub_FrameOffset + UnmanagedToManagedFrame__m_pvDatum)]
add x1, sp, #GenericComCallStub_FrameOffset
- bl COMToCLRWorker
+ bl C_FUNC(COMToCLRWorker)
// pop the stack
EPILOG_STACK_FREE GenericComCallStub_StackAlloc
@@ -685,7 +717,7 @@ NESTED_END ComCallPreStub, _TEXT
PROLOG_SAVE_REG_PAIR fp, lr, -16!
- cbz x0, COMToCLRDispatchHelper_RegSetup
+ cbz x0, LOCAL_LABEL(COMToCLRDispatchHelper_RegSetup)
add x9, x1, #SIZEOF__ComMethodFrame
add x9, x9, x0, LSL #3
@@ -693,7 +725,7 @@ COMToCLRDispatchHelper_StackLoop
ldr x8, [x9, #-8]!
str x8, [sp, #-8]!
sub x0, x0, #1
- cbnz x0, COMToCLRDispatchHelper_StackLoop
+ cbnz x0, LOCAL_LABEL(COMToCLRDispatchHelper_StackLoop)
COMToCLRDispatchHelper_RegSetup
@@ -766,7 +798,7 @@ NESTED_ENTRY OnHijackTripThread, _TEXT, NoHandler
stp q2, q3, [sp, #144]
mov x0, sp
- bl OnHijackWorker
+ bl C_FUNC(OnHijackWorker)
// restore any integral return value(s)
ldp x0, x1, [sp, #96]
@@ -987,7 +1019,7 @@ LOCAL_LABEL(Promote):
mov x12, x9 // We pass the ResolveCacheElem to ResolveWorkerAsmStub instead of the DispatchToken
LOCAL_LABEL(Fail):
- b ResolveWorkerAsmStub // call the ResolveWorkerAsmStub method to transition into the VM
+ b C_FUNC(ResolveWorkerAsmStub) // call the ResolveWorkerAsmStub method to transition into the VM
NESTED_END ResolveWorkerChainLookupAsmStub, _TEXT
@@ -1003,7 +1035,7 @@ NESTED_ENTRY ResolveWorkerAsmStub, _TEXT, NoHandler
and x1, x11, #-4 // Indirection cell
mov x2, x12 // DispatchToken
and x3, x11, #3 // flag
- bl VSD_ResolveWorker
+ bl C_FUNC(VSD_ResolveWorker)
mov x9, x0
EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
@@ -1015,28 +1047,28 @@ NESTED_END ResolveWorkerAsmStub, _TEXT
#ifdef FEATURE_READYTORUN
NESTED_ENTRY DelayLoad_MethodCall_FakeProlog, _TEXT, NoHandler
-DelayLoad_MethodCall:
- .global DelayLoad_MethodCall
+C_FUNC(DelayLoad_MethodCall):
+ .global C_FUNC(DelayLoad_MethodCall)
PROLOG_WITH_TRANSITION_BLOCK
add x0, sp, #__PWTB_TransitionBlock // pTransitionBlock
mov x1, x11 // Indirection cell
mov x2, x9 // sectionIndex
mov x3, x10 // Module*
- bl ExternalMethodFixupWorker
+ bl C_FUNC(ExternalMethodFixupWorker)
mov x12, x0
EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
// Share patch label
- b ExternalMethodFixupPatchLabel
+ b C_FUNC(ExternalMethodFixupPatchLabel)
NESTED_END DelayLoad_MethodCall_FakeProlog, _TEXT
.macro DynamicHelper frameFlags, suffix
NESTED_ENTRY DelayLoad_Helper\suffix\()_FakeProlog, _TEXT, NoHandler
-DelayLoad_Helper\suffix:
- .global DelayLoad_Helper\suffix
+C_FUNC(DelayLoad_Helper\suffix):
+ .global C_FUNC(DelayLoad_Helper\suffix)
PROLOG_WITH_TRANSITION_BLOCK
@@ -1045,7 +1077,7 @@ DelayLoad_Helper\suffix:
mov x2, x9 // sectionIndex
mov x3, x10 // Module*
mov x4, \frameFlags
- bl DynamicHelperWorker
+ bl C_FUNC(DynamicHelperWorker)
cbnz x0, LOCAL_LABEL(FakeProlog\suffix\()_0)
ldr x0, [sp, #__PWTB_ArgumentRegister_FirstArg]
EPILOG_WITH_TRANSITION_BLOCK_RETURN
@@ -1176,19 +1208,6 @@ LEAF_ENTRY JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain, _TEXT
LEAF_END JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain, _TEXT
-// ------------------------------------------------------------------
-// __declspec(naked) void F_CALL_CONV JIT_WriteBarrier_Callable(Object **dst, Object* val)
-LEAF_ENTRY JIT_WriteBarrier_Callable, _TEXT
-
- // Setup args for JIT_WriteBarrier. x14 = dst ; x15 = val
- mov x14, x0 // x14 = dst
- mov x15, x1 // x15 = val
-
- // Branch to the write barrier (which is already correctly overwritten with
- // single or multi-proc code based on the current CPU
- b C_FUNC(JIT_WriteBarrier)
-LEAF_END JIT_WriteBarrier_Callable, _TEXT
-
#ifdef PROFILING_SUPPORTED
// ------------------------------------------------------------------
@@ -1226,7 +1245,7 @@ NESTED_ENTRY \helper\()Naked, _TEXT, NoHandler
mov x0, x10
mov x1, sp
- bl \helper
+ bl C_FUNC(\helper)
RESTORE_ARGUMENT_REGISTERS sp, 16 // Restore x8 and argument registers.
RESTORE_FLOAT_ARGUMENT_REGISTERS sp, 96 // Restore floating-point/SIMD registers.
diff --git a/src/coreclr/src/vm/arm64/gmscpu.h b/src/coreclr/src/vm/arm64/gmscpu.h
index 0c48f007b7bde..887a41b4f07c1 100644
--- a/src/coreclr/src/vm/arm64/gmscpu.h
+++ b/src/coreclr/src/vm/arm64/gmscpu.h
@@ -80,6 +80,7 @@ inline void LazyMachState::setLazyStateFromUnwind(MachState* copy)
}
*pDst++ = valueSrc;
+ captureX19_X29[i] = copy->captureX19_X29[i];
}
diff --git a/src/coreclr/src/vm/arm64/pinvokestubs.S b/src/coreclr/src/vm/arm64/pinvokestubs.S
index 8002ea2b4e03a..ff16e14674ef3 100644
--- a/src/coreclr/src/vm/arm64/pinvokestubs.S
+++ b/src/coreclr/src/vm/arm64/pinvokestubs.S
@@ -45,7 +45,7 @@
LOCAL_LABEL(\__PInvokeStubFuncName\()_0):
- EPILOG_BRANCH \__PInvokeGenStubFuncName
+ EPILOG_BRANCH C_FUNC(\__PInvokeGenStubFuncName)
NESTED_END \__PInvokeStubFuncName, _TEXT
@@ -69,7 +69,7 @@ LOCAL_LABEL(\__PInvokeStubFuncName\()_0):
// save VASigCookieReg
mov x20, \VASigCookieReg
- bl \__PInvokeStubWorkerName
+ bl C_FUNC(\__PInvokeStubWorkerName)
// restore VASigCookieReg
mov \VASigCookieReg, x20
@@ -80,7 +80,7 @@ LOCAL_LABEL(\__PInvokeStubFuncName\()_0):
EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
- EPILOG_BRANCH \__PInvokeStubFuncName
+ EPILOG_BRANCH C_FUNC(\__PInvokeStubFuncName)
NESTED_END \__PInvokeGenStubFuncName, _TEXT
.endm
diff --git a/src/coreclr/src/vm/arm64/stubs.cpp b/src/coreclr/src/vm/arm64/stubs.cpp
index ac8466c22faca..27943954b318a 100644
--- a/src/coreclr/src/vm/arm64/stubs.cpp
+++ b/src/coreclr/src/vm/arm64/stubs.cpp
@@ -14,8 +14,6 @@
#include "jitinterface.h"
#include "ecall.h"
-EXTERN_C void JIT_UpdateWriteBarrierState(bool skipEphemeralCheck);
-
#ifndef DACCESS_COMPILE
//-----------------------------------------------------------------------
@@ -571,6 +569,10 @@ void StubPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
{
WRAPPER_NO_CONTRACT;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
int n = 0;
m_rgCode[n++] = 0x10000089; // adr x9, #16
@@ -604,6 +606,10 @@ void NDirectImportPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocat
{
WRAPPER_NO_CONTRACT;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
int n = 0;
m_rgCode[n++] = 0x1000008B; // adr x11, #16
@@ -637,6 +643,10 @@ void FixupPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int
{
WRAPPER_NO_CONTRACT;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
InitCommon();
// Initialize chunk indices only if they are not initialized yet. This is necessary to make MethodDesc::Reset work.
@@ -1058,6 +1068,17 @@ void JIT_TailCall()
}
#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+EXTERN_C void JIT_UpdateWriteBarrierState(bool skipEphemeralCheck);
+
+static void UpdateWriteBarrierState(bool skipEphemeralCheck)
+{
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
+ JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
+}
+
void InitJITHelpers1()
{
STANDARD_VM_CONTRACT;
@@ -1083,11 +1104,12 @@ void InitJITHelpers1()
}
}
- JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
+ UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
}
+
#else
-EXTERN_C void JIT_UpdateWriteBarrierState(bool) {}
+void UpdateWriteBarrierState(bool) {}
#endif // !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispatcherContext)
@@ -1198,6 +1220,9 @@ UMEntryThunk * UMEntryThunk::Decode(void *pCallback)
void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
{
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
// adr x12, _label
// ldp x16, x12, [x12]
// br x16
@@ -1213,7 +1238,6 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
m_pTargetCode = (TADDR)pTargetCode;
m_pvSecretParam = (TADDR)pvSecretParam;
-
FlushInstructionCache(GetCurrentProcess(),&m_code,sizeof(m_code));
}
@@ -1221,11 +1245,14 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
void UMEntryThunkCode::Poison()
{
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
m_pTargetCode = (TADDR)UMEntryThunk::ReportViolation;
// ldp x16, x0, [x12]
m_code[1] = 0xa9400190;
-
ClrFlushInstructionCache(&m_code,sizeof(m_code));
}
@@ -1251,26 +1278,26 @@ void FlushWriteBarrierInstructionCache()
#ifndef CROSSGEN_COMPILE
int StompWriteBarrierEphemeral(bool isRuntimeSuspended)
{
- JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
+ UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
return SWB_PASS;
}
int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
{
- JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
+ UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
return SWB_PASS;
}
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
int SwitchToWriteWatchBarrier(bool isRuntimeSuspended)
{
- JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
+ UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
return SWB_PASS;
}
int SwitchToNonWriteWatchBarrier(bool isRuntimeSuspended)
{
- JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
+ UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
return SWB_PASS;
}
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
@@ -1804,6 +1831,20 @@ void StubLinkerCPU::EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall)
#define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR)
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
+ SIZE_T cb = size; \
+ SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * pStart = (BYTE *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
+ BYTE * p = pStart; \
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+
+#define END_DYNAMIC_HELPER_EMIT() \
+ _ASSERTE(pStart + cb == p); \
+ while (p < pStart + cbAligned) { *(DWORD*)p = 0xBADC0DF0; p += 4; }\
+ ClrFlushInstructionCache(pStart, cbAligned); \
+ return (PCODE)pStart
+#else // defined(HOST_OSX) && defined(HOST_ARM64)
#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
SIZE_T cb = size; \
SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
@@ -1815,6 +1856,7 @@ void StubLinkerCPU::EmitCallManagedMethod(MethodDesc *pMD, BOOL fTailCall)
while (p < pStart + cbAligned) { *(DWORD*)p = 0xBADC0DF0; p += 4; }\
ClrFlushInstructionCache(pStart, cbAligned); \
return (PCODE)pStart
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
// Uses x8 as scratch register to store address of data label
// After load x8 is increment to point to next data
diff --git a/src/coreclr/src/vm/callcounting.cpp b/src/coreclr/src/vm/callcounting.cpp
index 8c3678bfd8536..eaf18aaff909b 100644
--- a/src/coreclr/src/vm/callcounting.cpp
+++ b/src/coreclr/src/vm/callcounting.cpp
@@ -251,6 +251,10 @@ const CallCountingStub *CallCountingManager::CallCountingStubAllocator::Allocate
}
CONTRACTL_END;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
LoaderHeap *heap = m_heap;
if (heap == nullptr)
{
diff --git a/src/coreclr/src/vm/callhelpers.cpp b/src/coreclr/src/vm/callhelpers.cpp
index 910cc060f74ae..492869f1c328f 100644
--- a/src/coreclr/src/vm/callhelpers.cpp
+++ b/src/coreclr/src/vm/callhelpers.cpp
@@ -62,6 +62,9 @@ void CallDescrWorkerWithHandler(
#endif
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(false);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
BEGIN_CALL_TO_MANAGEDEX(fCriticalCall ? EEToManagedCriticalCall : EEToManagedDefault);
diff --git a/src/coreclr/src/vm/ceemain.cpp b/src/coreclr/src/vm/ceemain.cpp
index 154dc475a9929..3526d9f8c776d 100644
--- a/src/coreclr/src/vm/ceemain.cpp
+++ b/src/coreclr/src/vm/ceemain.cpp
@@ -972,6 +972,10 @@ void EEStartupHelper()
#endif // CROSSGEN_COMPILE
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
SystemDomain::System()->Init();
#ifdef PROFILING_SUPPORTED
diff --git a/src/coreclr/src/vm/codeman.cpp b/src/coreclr/src/vm/codeman.cpp
index 39eef3e7f3cc1..d81e83d71c2e0 100644
--- a/src/coreclr/src/vm/codeman.cpp
+++ b/src/coreclr/src/vm/codeman.cpp
@@ -1849,6 +1849,10 @@ TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize
{
CrstHolder ch(&m_CritSec);
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
dwRequestedSize = ALIGN_UP(dwRequestedSize, sizeof(TADDR));
if (dwRequestedSize < sizeof(FreeBlock))
@@ -1930,6 +1934,10 @@ void CodeFragmentHeap::RealBackoutMem(void *pMem
_ASSERTE(dwSize >= sizeof(FreeBlock));
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
ZeroMemory((BYTE *)pMem, dwSize);
//
@@ -4844,6 +4852,10 @@ void ExecutionManager::Unload(LoaderAllocator *pLoaderAllocator)
GC_NOTRIGGER;
} CONTRACTL_END;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
// a size of 0 is a signal to Nirvana to flush the entire cache
FlushInstructionCache(GetCurrentProcess(),0,0);
diff --git a/src/coreclr/src/vm/comdelegate.cpp b/src/coreclr/src/vm/comdelegate.cpp
index 9c5b48cc8ce73..5f12dc5fa0ce8 100644
--- a/src/coreclr/src/vm/comdelegate.cpp
+++ b/src/coreclr/src/vm/comdelegate.cpp
@@ -969,6 +969,10 @@ FCIMPL5(FC_BOOL_RET, COMDelegate::BindToMethodInfo, Object* refThisUNSAFE, Objec
flags,
&fIsOpenDelegate))
{
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
// Initialize the delegate to point to the target method.
BindToMethod(&gc.refThis,
&gc.refFirstArg,
@@ -1588,6 +1592,10 @@ FCIMPL3(void, COMDelegate::DelegateConstruct, Object* refThisUNSAFE, Object* tar
// try to catch the easy garbage.
_ASSERTE(isMemoryReadable(method, 1));
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
MethodTable *pMTTarg = NULL;
if (gc.target != NULL)
@@ -1717,6 +1725,7 @@ FCIMPL3(void, COMDelegate::DelegateConstruct, Object* refThisUNSAFE, Object* tar
gc.refThis->SetTarget(gc.target);
gc.refThis->SetMethodPtr((PCODE)(void *)method);
}
+
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
diff --git a/src/coreclr/src/vm/dllimportcallback.cpp b/src/coreclr/src/vm/dllimportcallback.cpp
index c3c69ddbf3488..f25da147690eb 100644
--- a/src/coreclr/src/vm/dllimportcallback.cpp
+++ b/src/coreclr/src/vm/dllimportcallback.cpp
@@ -78,6 +78,10 @@ class UMEntryThunkFreeList
CrstHolder ch(&m_crst);
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
if (m_pHead == NULL)
{
m_pHead = pThunk;
@@ -1111,6 +1115,10 @@ void UMEntryThunk::Terminate()
if (GetObjectHandle())
{
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
DestroyLongWeakHandle(GetObjectHandle());
m_pObjectHandle = 0;
}
diff --git a/src/coreclr/src/vm/dllimportcallback.h b/src/coreclr/src/vm/dllimportcallback.h
index b98f0743213f0..8a483808d3d28 100644
--- a/src/coreclr/src/vm/dllimportcallback.h
+++ b/src/coreclr/src/vm/dllimportcallback.h
@@ -267,6 +267,10 @@ class UMEntryThunk
}
CONTRACTL_END;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
m_pManagedTarget = pManagedTarget;
m_pObjectHandle = pObjectHandle;
m_pUMThunkMarshInfo = pUMThunkMarshInfo;
@@ -298,8 +302,12 @@ class UMEntryThunk
m_code.Encode((BYTE*)m_pUMThunkMarshInfo->GetExecStubEntryPoint(), this);
#ifdef _DEBUG
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
m_state = kRunTimeInited;
-#endif
+#endif // _DEBUG
}
// asm entrypoint
diff --git a/src/coreclr/src/vm/dynamicmethod.cpp b/src/coreclr/src/vm/dynamicmethod.cpp
index 51ce113d1adb7..22785e9d60362 100644
--- a/src/coreclr/src/vm/dynamicmethod.cpp
+++ b/src/coreclr/src/vm/dynamicmethod.cpp
@@ -551,6 +551,10 @@ void HostCodeHeap::AddToFreeList(TrackAllocation *pBlockToInsert)
LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p] - Add to FreeList [%p, 0x%X]\n", this, pBlockToInsert, pBlockToInsert->size));
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
// append to the list in the proper position and coalesce if needed
if (m_pFreeList)
{
diff --git a/src/coreclr/src/vm/eventpipe.cpp b/src/coreclr/src/vm/eventpipe.cpp
index debdcdd2de52a..77855ce127134 100644
--- a/src/coreclr/src/vm/eventpipe.cpp
+++ b/src/coreclr/src/vm/eventpipe.cpp
@@ -1043,6 +1043,10 @@ HANDLE EventPipe::GetWaitHandle(EventPipeSessionID sessionID)
void EventPipe::InvokeCallback(EventPipeProviderCallbackData *pEventPipeProviderCallbackData)
{
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(false);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
EventPipeProvider::InvokeCallback(pEventPipeProviderCallbackData);
}
diff --git a/src/coreclr/src/vm/exceptionhandling.cpp b/src/coreclr/src/vm/exceptionhandling.cpp
index 84a65d8687e24..7d329902e42ab 100644
--- a/src/coreclr/src/vm/exceptionhandling.cpp
+++ b/src/coreclr/src/vm/exceptionhandling.cpp
@@ -3312,6 +3312,10 @@ DWORD_PTR ExceptionTracker::CallHandler(
break;
}
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(false);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
#ifdef USE_FUNCLET_CALL_HELPER
// Invoke the funclet. We pass throwable only when invoking the catch block.
// Since the actual caller of the funclet is the assembly helper, pass the reference
@@ -3948,6 +3952,10 @@ void ExceptionTracker::ResumeExecution(
EH_LOG((LL_INFO100, "resuming execution at 0x%p\n", GetIP(pContextRecord)));
EH_LOG((LL_INFO100, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"));
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(false);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
RtlRestoreContext(pContextRecord, pExceptionRecord);
UNREACHABLE();
diff --git a/src/coreclr/src/vm/gcinfodecoder.cpp b/src/coreclr/src/vm/gcinfodecoder.cpp
index ee16de252e2fa..52c2c3c649c0d 100644
--- a/src/coreclr/src/vm/gcinfodecoder.cpp
+++ b/src/coreclr/src/vm/gcinfodecoder.cpp
@@ -1658,6 +1658,27 @@ void GcInfoDecoder::ReportRegisterToGC( // ARM64
LOG((LF_GCROOTS, LL_INFO1000, "Reporting " FMT_REG, regNum ));
OBJECTREF* pObjRef = GetRegisterSlot( regNum, pRD );
+#if defined(TARGET_UNIX) && !defined(SOS_TARGET_ARM64)
+ // On PAL, we don't always have the context pointers available due to
+ // a limitation of an unwinding library. In such case, the context
+ // pointers for some nonvolatile registers are NULL.
+ // In such case, we let the pObjRef point to the captured register
+ // value in the context and pin the object itself.
+ if (pObjRef == NULL)
+ {
+ // Report a pinned object to GC only in the promotion phase when the
+ // GC is scanning roots.
+ GCCONTEXT* pGCCtx = (GCCONTEXT*)(hCallBack);
+ if (!pGCCtx->sc->promotion)
+ {
+ return;
+ }
+
+ pObjRef = GetCapturedRegister(regNum, pRD);
+
+ gcFlags |= GC_CALL_PINNED;
+ }
+#endif // TARGET_UNIX && !SOS_TARGET_ARM64
#ifdef _DEBUG
if(IsScratchRegister(regNum, pRD))
@@ -1685,7 +1706,17 @@ OBJECTREF* GcInfoDecoder::GetCapturedRegister(
PREGDISPLAY pRD
)
{
- _ASSERTE(regNum >= 0 && regNum <= 28);
+ _ASSERTE(regNum >= 0 && regNum <= 30);
+ _ASSERTE(regNum != 18);
+
+ if (regNum == 29)
+ {
+ return (OBJECTREF*) &pRD->pCurrentContext->Fp;
+ }
+ else if (regNum == 30)
+ {
+ return (OBJECTREF*) &pRD->pCurrentContext->Lr;
+ }
// The fields of CONTEXT are in the same order as
// the processor encoding numbers.
diff --git a/src/coreclr/src/vm/methoddescbackpatchinfo.cpp b/src/coreclr/src/vm/methoddescbackpatchinfo.cpp
index efe1d04cf6f82..000b56dbc5bbd 100644
--- a/src/coreclr/src/vm/methoddescbackpatchinfo.cpp
+++ b/src/coreclr/src/vm/methoddescbackpatchinfo.cpp
@@ -28,6 +28,10 @@ void EntryPointSlots::Backpatch_Locked(TADDR slot, SlotType slotType, PCODE entr
_ASSERTE(entryPoint != NULL);
_ASSERTE(IS_ALIGNED((SIZE_T)slot, GetRequiredSlotAlignment(slotType)));
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
switch (slotType)
{
case SlotType_Normal:
diff --git a/src/coreclr/src/vm/precode.cpp b/src/coreclr/src/vm/precode.cpp
index 0228d860c5358..2ef201d629654 100644
--- a/src/coreclr/src/vm/precode.cpp
+++ b/src/coreclr/src/vm/precode.cpp
@@ -404,6 +404,10 @@ void Precode::ResetTargetInterlocked()
{
WRAPPER_NO_CONTRACT;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
PrecodeType precodeType = GetType();
switch (precodeType)
{
@@ -437,6 +441,10 @@ BOOL Precode::SetTargetInterlocked(PCODE target, BOOL fOnlyRedirectFromPrestub)
if (fOnlyRedirectFromPrestub && !IsPointingToPrestub(expected))
return FALSE;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
g_IBCLogger.LogMethodPrecodeWriteAccess(GetMethodDesc());
PrecodeType precodeType = GetType();
diff --git a/src/coreclr/src/vm/prestub.cpp b/src/coreclr/src/vm/prestub.cpp
index a61a9ed2584dc..040ca8662f7f3 100644
--- a/src/coreclr/src/vm/prestub.cpp
+++ b/src/coreclr/src/vm/prestub.cpp
@@ -326,6 +326,10 @@ PCODE MethodDesc::PrepareCode(PrepareCodeConfig* pConfig)
{
STANDARD_VM_CONTRACT;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
// If other kinds of code need multi-versioning we could add more cases here,
// but for now generation of all other code/stubs occurs in other code paths
_ASSERTE(IsIL() || IsNoMetadata());
@@ -1915,6 +1919,10 @@ extern "C" PCODE STDCALL PreStubWorker(TransitionBlock* pTransitionBlock, Method
ETWOnStartup(PrestubWorker_V1, PrestubWorkerEnd_V1);
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
MAKE_CURRENT_THREAD_AVAILABLE();
// Attempt to check what GC mode we are running under.
diff --git a/src/coreclr/src/vm/threads.cpp b/src/coreclr/src/vm/threads.cpp
index 3d5512413d5e0..1c1cba7d102aa 100644
--- a/src/coreclr/src/vm/threads.cpp
+++ b/src/coreclr/src/vm/threads.cpp
@@ -707,6 +707,11 @@ Thread* SetupThread()
}
#endif
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ // Initialize new threads to JIT Write disabled
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(false);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
// Normally, HasStarted is called from the thread's entrypoint to introduce it to
// the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
// that call into managed code. In that case, a call to SetupThread here must
@@ -1097,9 +1102,14 @@ PCODE AdjustWriteBarrierIP(PCODE controlPc)
return (PCODE)JIT_PatchedCodeStart + (controlPc - (PCODE)s_barrierCopy);
}
-#endif // FEATURE_WRITEBARRIER_COPY
-
extern "C" void *JIT_WriteBarrier_Loc;
+#ifdef TARGET_ARM64
+extern "C" void (*JIT_WriteBarrier_Table)();
+extern "C" void *JIT_WriteBarrier_Loc = 0;
+extern "C" void *JIT_WriteBarrier_Table_Loc = 0;
+#endif // TARGET_ARM64
+
+#endif // FEATURE_WRITEBARRIER_COPY
#ifndef TARGET_UNIX
// g_TlsIndex is only used by the DAC. Disable optimizations around it to prevent it from getting optimized out.
@@ -1137,6 +1147,10 @@ void InitThreadManager()
COMPlusThrowWin32();
}
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
memcpy(s_barrierCopy, (BYTE*)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart);
// Store the JIT_WriteBarrier copy location to a global variable so that helpers
@@ -1144,6 +1158,12 @@ void InitThreadManager()
JIT_WriteBarrier_Loc = GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier);
SetJitHelperFunction(CORINFO_HELP_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier));
+
+#ifdef TARGET_ARM64
+ // Store the JIT_WriteBarrier_Table copy location to a global variable so that it can be updated.
+ JIT_WriteBarrier_Table_Loc = GetWriteBarrierCodeLocation((void*)&JIT_WriteBarrier_Table);
+#endif // TARGET_ARM64
+
#else // FEATURE_WRITEBARRIER_COPY
// I am using virtual protect to cover the entire range that this code falls in.
diff --git a/src/coreclr/src/vm/virtualcallstub.cpp b/src/coreclr/src/vm/virtualcallstub.cpp
index 078b4eeadaa23..f0aa1db4accf2 100644
--- a/src/coreclr/src/vm/virtualcallstub.cpp
+++ b/src/coreclr/src/vm/virtualcallstub.cpp
@@ -1724,6 +1724,10 @@ PCODE VirtualCallStubManager::ResolveWorker(StubCallSite* pCallSite,
PRECONDITION(IsProtectedByGCFrame(protectedObj));
} CONTRACTL_END;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
MethodTable* objectType = (*protectedObj)->GetMethodTable();
CONSISTENCY_CHECK(CheckPointer(objectType));
@@ -2976,6 +2980,10 @@ LookupHolder *VirtualCallStubManager::GenerateLookupStub(PCODE addrOfResolver, s
POSTCONDITION(CheckPointer(RETVAL));
} CONTRACT_END;
+#if defined(HOST_OSX) && defined(HOST_ARM64)
+ auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
+#endif // defined(HOST_OSX) && defined(HOST_ARM64)
+
//allocate from the requisite heap and copy the template over it.
LookupHolder * holder = (LookupHolder*) (void*) lookup_heap->AllocAlignedMem(sizeof(LookupHolder), CODE_SIZE_ALIGN);
diff --git a/src/coreclr/tryrun.cmake b/src/coreclr/tryrun.cmake
index a70a7b6936bab..4b23122b91c2b 100644
--- a/src/coreclr/tryrun.cmake
+++ b/src/coreclr/tryrun.cmake
@@ -18,9 +18,55 @@ elseif(EXISTS ${CROSS_ROOTFS}/bin/freebsd-version)
elseif(EXISTS ${CROSS_ROOTFS}/usr/platform/i86pc)
set(ILLUMOS 1)
set(CLR_CMAKE_TARGET_OS SunOS)
+elseif(EXISTS /System/Library/CoreServices)
+ set(DARWIN 1)
endif()
-if(TARGET_ARCH_NAME MATCHES "^(armel|arm|arm64|x86)$" OR FREEBSD OR ILLUMOS)
+if(DARWIN)
+ if(TARGET_ARCH_NAME MATCHES "^(arm64|x64)$")
+ set_cache_value(FILE_OPS_CHECK_FERROR_OF_PREVIOUS_CALL_EXITCODE 1)
+ set_cache_value(GETPWUID_R_SETS_ERRNO_EXITCODE 1)
+ set_cache_value(HAS_POSIX_SEMAPHORES_EXITCODE 1)
+ set_cache_value(HAVE_BROKEN_FIFO_KEVENT_EXITCODE 1)
+ set_cache_value(HAVE_BROKEN_FIFO_SELECT_EXITCODE 1)
+ set_cache_value(HAVE_CLOCK_MONOTONIC_COARSE_EXITCODE 1)
+ set_cache_value(HAVE_CLOCK_MONOTONIC_EXITCODE 0)
+ set_cache_value(HAVE_CLOCK_THREAD_CPUTIME_EXITCODE 0)
+ set_cache_value(HAVE_CLOCK_GETTIME_NSEC_NP_EXITCODE 0)
+ set_cache_value(HAVE_COMPATIBLE_ACOS_EXITCODE 0)
+ set_cache_value(HAVE_COMPATIBLE_ASIN_EXITCODE 0)
+ set_cache_value(HAVE_COMPATIBLE_ATAN2_EXITCODE 0)
+ set_cache_value(HAVE_COMPATIBLE_EXP_EXITCODE 1)
+ set_cache_value(HAVE_COMPATIBLE_ILOGB0_EXITCODE 0)
+ set_cache_value(HAVE_COMPATIBLE_ILOGBNAN_EXITCODE 1)
+ set_cache_value(HAVE_COMPATIBLE_LOG10_EXITCODE 0)
+ set_cache_value(HAVE_COMPATIBLE_LOG_EXITCODE 0)
+ set_cache_value(HAVE_COMPATIBLE_POW_EXITCODE 0)
+ set_cache_value(HAVE_FUNCTIONAL_PTHREAD_ROBUST_MUTEXES_EXITCODE 1)
+ set_cache_value(HAVE_LARGE_SNPRINTF_SUPPORT_EXITCODE 0)
+ set_cache_value(HAVE_MMAP_DEV_ZERO_EXITCODE 1)
+ set_cache_value(HAVE_PROCFS_CTL_EXITCODE 1)
+ set_cache_value(HAVE_PROCFS_MAPS_EXITCODE 1)
+ set_cache_value(HAVE_PROCFS_STATUS_EXITCODE 1)
+ set_cache_value(HAVE_PROCFS_STAT_EXITCODE 1)
+ set_cache_value(HAVE_SCHED_GETCPU_EXITCODE 1)
+ set_cache_value(HAVE_SCHED_GET_PRIORITY_EXITCODE 0)
+ set_cache_value(HAVE_VALID_NEGATIVE_INF_POW_EXITCODE 0)
+ set_cache_value(HAVE_VALID_POSITIVE_INF_POW_EXITCODE 0)
+ set_cache_value(HAVE_WORKING_CLOCK_GETTIME_EXITCODE 0)
+ set_cache_value(HAVE_WORKING_GETTIMEOFDAY_EXITCODE 0)
+ set_cache_value(MMAP_ANON_IGNORES_PROTECTION_EXITCODE 1)
+ set_cache_value(ONE_SHARED_MAPPING_PER_FILEREGION_PER_PROCESS_EXITCODE 1)
+ set_cache_value(PTHREAD_CREATE_MODIFIES_ERRNO_EXITCODE 1)
+ set_cache_value(REALPATH_SUPPORTS_NONEXISTENT_FILES_EXITCODE 1)
+ set_cache_value(SEM_INIT_MODIFIES_ERRNO_EXITCODE 1)
+ set_cache_value(SSCANF_CANNOT_HANDLE_MISSING_EXPONENT_EXITCODE 1)
+ set_cache_value(SSCANF_SUPPORT_ll_EXITCODE 0)
+ set_cache_value(UNGETC_NOT_RETURN_EOF_EXITCODE 1)
+ else()
+ message(FATAL_ERROR "Arch is ${TARGET_ARCH_NAME}. Only arm64 or x64 is supported for OSX cross build!")
+ endif()
+elseif(TARGET_ARCH_NAME MATCHES "^(armel|arm|arm64|x86)$" OR FREEBSD OR ILLUMOS)
set_cache_value(FILE_OPS_CHECK_FERROR_OF_PREVIOUS_CALL_EXITCODE 1)
set_cache_value(GETPWUID_R_SETS_ERRNO_EXITCODE 0)
set_cache_value(HAS_POSIX_SEMAPHORES_EXITCODE 0)
diff --git a/src/installer/corehost/build.sh b/src/installer/corehost/build.sh
index bd789c6f34f34..100808e9c1f45 100755
--- a/src/installer/corehost/build.sh
+++ b/src/installer/corehost/build.sh
@@ -110,4 +110,4 @@ setup_dirs
check_prereqs
# Build the installer native components.
-build_native "$__BuildArch" "$__scriptpath" "$__scriptpath" "$__IntermediatesDir" "installer component"
+build_native "$__TargetOS" "$__BuildArch" "$__scriptpath" "$__scriptpath" "$__IntermediatesDir" "$__CMakeArgs" "installer component"
diff --git a/src/libraries/Native/build-native.sh b/src/libraries/Native/build-native.sh
index fb8b183403eb5..440ddd643bb1b 100755
--- a/src/libraries/Native/build-native.sh
+++ b/src/libraries/Native/build-native.sh
@@ -69,7 +69,7 @@ else
__CMakeArgs="-DFEATURE_DISTRO_AGNOSTIC_SSL=$__PortableBuild $__CMakeArgs"
__CMakeArgs="-DCMAKE_STATIC_LIB_LINK=$__StaticLibLink $__CMakeArgs"
- if [[ "$__BuildArch" != x86 && "$__BuildArch" != x64 ]]; then
+ if [[ "$__BuildArch" != x86 && "$__BuildArch" != x64 && "$__BuildArch" != "$__HostArch" ]]; then
__CrossBuild=1
echo "Set CrossBuild for $__BuildArch build"
fi
@@ -157,4 +157,4 @@ setup_dirs
check_prereqs
# Build the corefx native components.
-build_native "$__BuildArch" "$__nativeroot" "$__nativeroot" "$__IntermediatesDir" "native libraries component"
+build_native "$__TargetOS" "$__BuildArch" "$__nativeroot" "$__nativeroot" "$__IntermediatesDir" "$__CMakeArgs" "native libraries component"
diff --git a/src/tests/build.sh b/src/tests/build.sh
index 8f151ef7d87c9..7a0dd87ea78d8 100755
--- a/src/tests/build.sh
+++ b/src/tests/build.sh
@@ -290,7 +290,7 @@ build_Tests()
fi
if [[ "$__SkipNative" != 1 && "$__BuildArch" != "wasm" ]]; then
- build_native "$__BuildArch" "$__TestDir" "$__TryRunDir" "$__NativeTestIntermediatesDir" "CoreCLR test component"
+ build_native "$__TargetOS" "$__BuildArch" "$__TestDir" "$__TryRunDir" "$__NativeTestIntermediatesDir" "CoreCLR test component"
if [[ "$?" -ne 0 ]]; then
echo "${__ErrMsgPrefix}${__MsgPrefix}Error: native test build failed. Refer to the build log files for details (above)"
diff --git a/src/tests/run.sh b/src/tests/run.sh
index 3ad281a5840b2..879d3f4646832 100755
--- a/src/tests/run.sh
+++ b/src/tests/run.sh
@@ -87,7 +87,7 @@ function check_cpu_architecture {
armv7l)
__arch=arm
;;
- aarch64)
+ aarch64|arm64)
__arch=arm64
;;
*)