diff --git a/docs/design/coreclr/botr/clr-abi.md b/docs/design/coreclr/botr/clr-abi.md
index af53e558712b5c..906bdbe3f47086 100644
--- a/docs/design/coreclr/botr/clr-abi.md
+++ b/docs/design/coreclr/botr/clr-abi.md
@@ -537,6 +537,22 @@ The extra state created by the JIT for synchronized methods (lock taken flag) mu
EnC is supported for adding and editing generic methods and methods on generic types and generic methods on non-generic types.
+# Portable entrypoints
+
+On platforms that allow dynamic code generation, the runtime abstracts away execution strategies for dynamically loaded methods by allocating [`Precode`](method-descriptor.md#precode)s. The `Precode` is a small code fragment that is used as a temporary method entrypoint until the actual method code is acquired. `Precode`s are also used as part of the execution for methods that do not have regular JITed or AOT-compiled code, for example stubs or interpreted methods. `Precode`s allow native code to use the same native code calling convention irrespective of the execution strategy used by the target method.
+
+On platforms that do not allow dynamic code generation (Wasm), the runtime abstracts away execution strategies by allocating portable entrypoints for dynamically loaded methods. The `PortableEntryPoint` is a data structure that allows efficient transition to the desired execution strategy for the target method. When the runtime is configured to use portable entrypoints, the managed calling convention is modified as follows:
+
+- The native code to call is obtained by dereferencing the entrypoint
+
+- The entrypoint address is passed in as an extra last hidden argument. The extra hidden argument must be present in signatures of all methods. It is unused by the code of JITed or AOT-compiled methods.
+
+Pseudo code for a call with portable entrypoints:
+
+> `(*(void**)pfn)(arg0, arg1, ..., argN, pfn)`
+
+Portable entrypoints are used for Wasm with interpreter only currently. Note that portable entrypoints are unnecessary for Wasm with native AOT since native AOT does not support dynamic loading.
+
# System V x86_64 support
This section relates mostly to calling conventions on System V systems (such as Ubuntu Linux and Mac OS X).
diff --git a/eng/OSArch.props b/eng/OSArch.props
index 22fb8b577fca90..4f30cb2e71d5be 100644
--- a/eng/OSArch.props
+++ b/eng/OSArch.props
@@ -33,6 +33,10 @@
$(TargetArchitecture)
+
+ true
+
+
<_ImportedOSArchProps>true
diff --git a/src/coreclr/clr.featuredefines.props b/src/coreclr/clr.featuredefines.props
index 81fb995c84a793..cde73ea5bc3bca 100644
--- a/src/coreclr/clr.featuredefines.props
+++ b/src/coreclr/clr.featuredefines.props
@@ -3,6 +3,7 @@
true
true
true
+ false
true
@@ -10,6 +11,10 @@
true
+
+ true
+
+
true
true
@@ -43,6 +48,7 @@
$(DefineConstants);FEATURE_TYPEEQUIVALENCE
$(DefineConstants);FEATURE_EH_FUNCLETS
$(DefineConstants);FEATURE_INTERPRETER
+ $(DefineConstants);FEATURE_PORTABLE_ENTRYPOINTS
$(DefineConstants);PROFILING_SUPPORTED
diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake
index 5dba165212f0d4..610a7fef055698 100644
--- a/src/coreclr/clrdefinitions.cmake
+++ b/src/coreclr/clrdefinitions.cmake
@@ -118,6 +118,9 @@ if (CLR_CMAKE_TARGET_WIN32 AND (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_
endif (CLR_CMAKE_TARGET_WIN32 AND (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_I386 OR CLR_CMAKE_TARGET_ARCH_ARM64))
add_compile_definitions($<${FEATURE_INTERPRETER}:FEATURE_INTERPRETER>)
+if (FEATURE_PORTABLE_ENTRYPOINTS)
+ add_compile_definitions(FEATURE_PORTABLE_ENTRYPOINTS)
+endif()
if (CLR_CMAKE_TARGET_WIN32)
add_definitions(-DFEATURE_ISYM_READER)
@@ -160,7 +163,9 @@ add_definitions(-DFEATURE_READYTORUN)
set(FEATURE_READYTORUN 1)
-add_compile_definitions(FEATURE_REJIT)
+if(FEATURE_REJIT)
+ add_compile_definitions(FEATURE_REJIT)
+endif()
if (CLR_CMAKE_HOST_UNIX AND CLR_CMAKE_TARGET_UNIX)
add_definitions(-DFEATURE_REMOTE_PROC_MEM)
@@ -174,7 +179,11 @@ if (NOT CLR_CMAKE_HOST_ANDROID)
add_definitions(-DFEATURE_SVR_GC)
endif(NOT CLR_CMAKE_HOST_ANDROID)
add_definitions(-DFEATURE_SYMDIFF)
-add_compile_definitions(FEATURE_TIERED_COMPILATION)
+
+if (FEATURE_TIERED_COMPILATION)
+ add_compile_definitions(FEATURE_TIERED_COMPILATION)
+endif(FEATURE_TIERED_COMPILATION)
+
add_compile_definitions(FEATURE_PGO)
if (CLR_CMAKE_TARGET_ARCH_AMD64)
# Enable the AMD64 Unix struct passing JIT-EE interface for all AMD64 platforms, to enable altjit.
diff --git a/src/coreclr/clrfeatures.cmake b/src/coreclr/clrfeatures.cmake
index 3b4fb203ee2c51..1dbc058acc6f03 100644
--- a/src/coreclr/clrfeatures.cmake
+++ b/src/coreclr/clrfeatures.cmake
@@ -1,5 +1,7 @@
if (NOT CLR_CMAKE_TARGET_ARCH_WASM AND NOT CLR_CMAKE_TARGET_IOS AND NOT CLR_CMAKE_TARGET_TVOS AND NOT CLR_CMAKE_TARGET_MACCATALYST)
set(FEATURE_JIT 1)
+ set(FEATURE_TIERED_COMPILATION 1)
+ set(FEATURE_REJIT 1)
endif()
if (CLR_CMAKE_TARGET_ARCH_WASM OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS OR CLR_CMAKE_TARGET_MACCATALYST)
@@ -39,6 +41,7 @@ if(NOT DEFINED FEATURE_INTERPRETER)
set(FEATURE_INTERPRETER 0)
elseif(CLR_CMAKE_TARGET_ARCH_WASM)
set(FEATURE_INTERPRETER 1)
+ set(FEATURE_PORTABLE_ENTRYPOINTS 1)
else()
if(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64)
set(FEATURE_INTERPRETER $,1,0>)
diff --git a/src/coreclr/interpreter/compiler.cpp b/src/coreclr/interpreter/compiler.cpp
index c93116c145ce14..93564ee61edaff 100644
--- a/src/coreclr/interpreter/compiler.cpp
+++ b/src/coreclr/interpreter/compiler.cpp
@@ -2233,7 +2233,7 @@ int32_t InterpCompiler::GetDataForHelperFtn(CorInfoHelpFunc ftn)
static_assert(sizeof(InterpHelperData) == sizeof(int32_t), "InterpHelperData must be the same size as an int32_t");
- InterpHelperData result;
+ InterpHelperData result{};
result.accessType = ftnLookup.accessType;
int32_t dataItemIndex = GetDataItemIndex(ftnLookup.addr);
result.addressDataItemIndex = dataItemIndex;
@@ -6577,8 +6577,8 @@ void InterpCompiler::PrintPointer(void* pointer)
void InterpCompiler::PrintHelperFtn(int32_t _data)
{
- InterpHelperData data;
- memcpy(&data, &_data, sizeof(int32_t));
+ InterpHelperData data{};
+ memcpy(&data, &_data, sizeof(_data));
void *helperAddr = GetDataItemAtIndex(data.addressDataItemIndex);
PrintPointer(helperAddr);
diff --git a/src/coreclr/interpreter/interpretershared.h b/src/coreclr/interpreter/interpretershared.h
index aaa90045dd9f29..137077d6add124 100644
--- a/src/coreclr/interpreter/interpretershared.h
+++ b/src/coreclr/interpreter/interpretershared.h
@@ -30,7 +30,8 @@ struct InterpMethod
InterpMethod *self;
#endif
CORINFO_METHOD_HANDLE methodHnd;
- int32_t argsSize, allocaSize;
+ int32_t argsSize;
+ int32_t allocaSize;
void** pDataItems;
// This stub is used for calling the interpreted method from JITted/AOTed code
CallStubHeader *pCallStub;
diff --git a/src/coreclr/vm/CMakeLists.txt b/src/coreclr/vm/CMakeLists.txt
index 37f1a76d4f73a3..53faa6b87aeee0 100644
--- a/src/coreclr/vm/CMakeLists.txt
+++ b/src/coreclr/vm/CMakeLists.txt
@@ -112,6 +112,7 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
peimage.cpp
perfmap.cpp
pgo.cpp
+ precode_portable.cpp
precode.cpp
prestub.cpp
readytorunstandalonemethodmetadata.cpp
@@ -215,6 +216,7 @@ set(VM_HEADERS_DAC_AND_WKS_COMMON
peimagelayout.inl
perfmap.h
pgo.h
+ precode_portable.hpp
precode.h
rejit.h
rejit.inl
diff --git a/src/coreclr/vm/appdomain.hpp b/src/coreclr/vm/appdomain.hpp
index 8a42c4d7cd213f..1f02ebbaac5a97 100644
--- a/src/coreclr/vm/appdomain.hpp
+++ b/src/coreclr/vm/appdomain.hpp
@@ -1549,8 +1549,6 @@ class AppDomain final
#endif
-#if defined(FEATURE_TIERED_COMPILATION)
-
public:
TieredCompilationManager * GetTieredCompilationManager()
{
@@ -1561,8 +1559,6 @@ class AppDomain final
private:
TieredCompilationManager m_tieredCompilationManager;
-#endif
-
friend struct cdac_data;
}; // class AppDomain
diff --git a/src/coreclr/vm/callhelpers.cpp b/src/coreclr/vm/callhelpers.cpp
index 4bf742d5078440..daf5df18ee75fc 100644
--- a/src/coreclr/vm/callhelpers.cpp
+++ b/src/coreclr/vm/callhelpers.cpp
@@ -180,11 +180,11 @@ void CopyReturnedFpStructFromRegisters(void* dest, UINT64 returnRegs[2], FpStruc
#endif // TARGET_RISCV64 || TARGET_LOONGARCH64
// Helper for VM->managed calls with simple signatures.
-void * DispatchCallSimple(
- SIZE_T *pSrc,
- DWORD numStackSlotsToCopy,
- PCODE pTargetAddress,
- DWORD dwDispatchCallSimpleFlags)
+void* DispatchCallSimple(
+ SIZE_T *pSrc,
+ DWORD numStackSlotsToCopy,
+ PCODE pTargetAddress,
+ DWORD dwDispatchCallSimpleFlags)
{
CONTRACTL
{
diff --git a/src/coreclr/vm/callhelpers.h b/src/coreclr/vm/callhelpers.h
index 12b4a8da1af464..91a249cbc7660b 100644
--- a/src/coreclr/vm/callhelpers.h
+++ b/src/coreclr/vm/callhelpers.h
@@ -74,11 +74,11 @@ void CallDescrWorkerWithHandler(
BOOL fCriticalCall = FALSE);
// Helper for VM->managed calls with simple signatures.
-void * DispatchCallSimple(
- SIZE_T *pSrc,
- DWORD numStackSlotsToCopy,
- PCODE pTargetAddress,
- DWORD dwDispatchCallSimpleFlags);
+void* DispatchCallSimple(
+ SIZE_T *pSrc,
+ DWORD numStackSlotsToCopy,
+ PCODE pTargetAddress,
+ DWORD dwDispatchCallSimpleFlags);
#if defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
// Copy structs returned according to floating-point calling convention from 'returnRegs' containing struct fields
diff --git a/src/coreclr/vm/cdacplatformmetadata.cpp b/src/coreclr/vm/cdacplatformmetadata.cpp
index e616d10c82afde..2434315a506dd7 100644
--- a/src/coreclr/vm/cdacplatformmetadata.cpp
+++ b/src/coreclr/vm/cdacplatformmetadata.cpp
@@ -20,7 +20,9 @@ void CDacPlatformMetadata::Init()
void CDacPlatformMetadata::InitPrecodes()
{
+#ifndef FEATURE_PORTABLE_ENTRYPOINTS
PrecodeMachineDescriptor::Init(&(&g_cdacPlatformMetadata)->precode);
+#endif // !FEATURE_PORTABLE_ENTRYPOINTS
}
#endif // !DACCESS_COMPILE
diff --git a/src/coreclr/vm/ceemain.cpp b/src/coreclr/vm/ceemain.cpp
index 4be75dc9158969..3515db9792064a 100644
--- a/src/coreclr/vm/ceemain.cpp
+++ b/src/coreclr/vm/ceemain.cpp
@@ -670,8 +670,10 @@ void EEStartupHelper()
JITInlineTrackingMap::StaticInitialize();
MethodDescBackpatchInfoTracker::StaticInitialize();
CodeVersionManager::StaticInitialize();
+#ifdef FEATURE_TIERED_COMPILATION
TieredCompilationManager::StaticInitialize();
CallCountingManager::StaticInitialize();
+#endif // FEATURE_TIERED_COMPILATION
OnStackReplacementManager::StaticInitialize();
MethodTable::InitMethodDataCache();
@@ -804,9 +806,11 @@ void EEStartupHelper()
CoreLibBinder::Startup();
StubLinkerCPU::Init();
+#ifndef FEATURE_PORTABLE_ENTRYPOINTS
StubPrecode::StaticInitialize();
FixupPrecode::StaticInitialize();
CDacPlatformMetadata::InitPrecodes();
+#endif // !FEATURE_PORTABLE_ENTRYPOINTS
InitializeGarbageCollector();
diff --git a/src/coreclr/vm/codeversion.cpp b/src/coreclr/vm/codeversion.cpp
index 9a2af2a97f79d0..9db4ea522b0fbc 100644
--- a/src/coreclr/vm/codeversion.cpp
+++ b/src/coreclr/vm/codeversion.cpp
@@ -322,11 +322,23 @@ MethodDescVersioningState* NativeCodeVersion::GetMethodDescVersioningState()
}
#endif
+bool NativeCodeVersion::IsFinalTier() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
#ifdef FEATURE_TIERED_COMPILATION
+ OptimizationTier tier = GetOptimizationTier();
+ return tier == OptimizationTier1 || tier == OptimizationTierOptimized;
+#else // !FEATURE_TIERED_COMPILATION
+ return true;
+#endif // FEATURE_TIERED_COMPILATION
+}
+#ifdef FEATURE_TIERED_COMPILATION
NativeCodeVersion::OptimizationTier NativeCodeVersion::GetOptimizationTier() const
{
LIMITED_METHOD_DAC_CONTRACT;
+
if (m_storageKind == StorageKind::Explicit)
{
return AsNode()->GetOptimizationTier();
@@ -337,13 +349,6 @@ NativeCodeVersion::OptimizationTier NativeCodeVersion::GetOptimizationTier() con
}
}
-bool NativeCodeVersion::IsFinalTier() const
-{
- LIMITED_METHOD_DAC_CONTRACT;
- OptimizationTier tier = GetOptimizationTier();
- return tier == OptimizationTier1 || tier == OptimizationTierOptimized;
-}
-
#ifndef DACCESS_COMPILE
void NativeCodeVersion::SetOptimizationTier(OptimizationTier tier)
{
diff --git a/src/coreclr/vm/codeversion.h b/src/coreclr/vm/codeversion.h
index 7735a1039d43c6..84019d9e346aca 100644
--- a/src/coreclr/vm/codeversion.h
+++ b/src/coreclr/vm/codeversion.h
@@ -71,6 +71,8 @@ class NativeCodeVersion
BOOL SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected = 0);
#endif
+ bool IsFinalTier() const;
+
// NOTE: Don't change existing values to avoid breaking changes in event tracing
enum OptimizationTier
{
@@ -83,7 +85,7 @@ class NativeCodeVersion
};
#ifdef FEATURE_TIERED_COMPILATION
OptimizationTier GetOptimizationTier() const;
- bool IsFinalTier() const;
+
#ifndef DACCESS_COMPILE
void SetOptimizationTier(OptimizationTier tier);
#endif
diff --git a/src/coreclr/vm/eeconfig.h b/src/coreclr/vm/eeconfig.h
index 2300be8fba075e..9c008ae7647cc2 100644
--- a/src/coreclr/vm/eeconfig.h
+++ b/src/coreclr/vm/eeconfig.h
@@ -87,7 +87,7 @@ class EEConfig
DWORD TieredCompilation_CallCountingDelayMs() const { LIMITED_METHOD_CONTRACT; return tieredCompilation_CallCountingDelayMs; }
bool TieredCompilation_UseCallCountingStubs() const { LIMITED_METHOD_CONTRACT; return fTieredCompilation_UseCallCountingStubs; }
DWORD TieredCompilation_DeleteCallCountingStubsAfter() const { LIMITED_METHOD_CONTRACT; return tieredCompilation_DeleteCallCountingStubsAfter; }
-#endif
+#endif // FEATURE_TIERED_COMPILATION
#if defined(FEATURE_PGO)
bool TieredPGO(void) const { LIMITED_METHOD_CONTRACT; return fTieredPGO; }
diff --git a/src/coreclr/vm/interpexec.cpp b/src/coreclr/vm/interpexec.cpp
index f0b5a81f641295..bc504bab76272e 100644
--- a/src/coreclr/vm/interpexec.cpp
+++ b/src/coreclr/vm/interpexec.cpp
@@ -232,23 +232,38 @@ static OBJECTREF CreateMultiDimArray(MethodTable* arrayClass, int8_t* stack, int
#define LOCAL_VAR(offset,type) (*LOCAL_VAR_ADDR(offset, type))
#define NULL_CHECK(o) do { if ((o) == NULL) { COMPlusThrow(kNullReferenceException); } } while (0)
-template static THelper GetPossiblyIndirectHelper(const InterpMethod *pMethod, int32_t _data)
+template static THelper GetPossiblyIndirectHelper(const InterpMethod* pMethod, int32_t _data, MethodDesc** pILTargetMethod = NULL)
{
- InterpHelperData data;
- memcpy(&data, &_data, sizeof(int32_t));
+ InterpHelperData data{};
+ memcpy(&data, &_data, sizeof(_data));
- void *addr = pMethod->pDataItems[data.addressDataItemIndex];
- switch (data.accessType) {
+ void* addr = pMethod->pDataItems[data.addressDataItemIndex];
+ switch (data.accessType)
+ {
case IAT_VALUE:
- return (THelper)addr;
+ break;
case IAT_PVALUE:
- return *(THelper *)addr;
+ addr = *(void**)addr;
+ break;
case IAT_PPVALUE:
- return **(THelper **)addr;
+ addr = **(void***)addr;
+ break;
default:
COMPlusThrowHR(COR_E_EXECUTIONENGINE);
- return (THelper)nullptr;
+ break;
}
+
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+ if (!PortableEntryPoint::IsNativeEntryPoint((TADDR)addr))
+ {
+ _ASSERTE(pILTargetMethod != NULL);
+ *pILTargetMethod = PortableEntryPoint::GetMethodDesc((TADDR)addr);
+ return NULL; // Return null to interpret this entrypoint
+ }
+ addr = PortableEntryPoint::GetActualCode((TADDR)addr);
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
+
+ return (THelper)addr;
}
// At present our behavior for float to int conversions is to perform a saturating conversion down to either 32 or 64 bits
diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp
index 77f65cb7178463..b2d2b11543c2f9 100644
--- a/src/coreclr/vm/jitinterface.cpp
+++ b/src/coreclr/vm/jitinterface.cpp
@@ -6546,10 +6546,12 @@ DWORD CEEInfo::getMethodAttribsInternal (CORINFO_METHOD_HANDLE ftn)
result |= CORINFO_FLG_DELEGATE_INVOKE;
}
+#ifdef FEATURE_TIERED_COMPILATION
if (!g_pConfig->TieredCompilation_QuickJitForLoops())
{
result |= CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS;
}
+#endif // FEATURE_TIERED_COMPILATION
return result;
}
@@ -13351,10 +13353,20 @@ PCODE UnsafeJitFunction(PrepareCodeConfig* config,
{
sizeOfILCode = interpreterJitInfo.getMethodInfoInternal()->ILCodeSize;
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+ PCODE portableEntryPoint = ftn->GetPortableEntryPoint();
+ _ASSERTE(portableEntryPoint != NULL);
+ PortableEntryPoint::SetInterpreterData(PCODEToPINSTR(portableEntryPoint), ret);
+ ret = portableEntryPoint;
+
+#else // !FEATURE_PORTABLE_ENTRYPOINTS
AllocMemTracker amt;
InterpreterPrecode* pPrecode = Precode::AllocateInterpreterPrecode(ret, ftn->GetLoaderAllocator(), &amt);
amt.SuppressRelease();
ret = PINSTRToPCODE(pPrecode->GetEntryPoint());
+
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
+
*isInterpreterCode = true;
*isTier0 = interpreterJitInfo.getJitFlagsInternal()->IsSet(CORJIT_FLAGS::CORJIT_FLAG_TIER0);
}
@@ -14569,6 +14581,7 @@ CORINFO_METHOD_HANDLE CEEJitInfo::getAsyncResumptionStub()
numArgs++;
#endif
+#ifdef FEATURE_TIERED_COMPILATION
// Resumption stubs are uniquely coupled to the code version (since the
// continuation is), so we need to make sure we always keep calling the
// same version here.
@@ -14583,11 +14596,12 @@ CORINFO_METHOD_HANDLE CEEJitInfo::getAsyncResumptionStub()
// so through information stored in the continuation).
_ASSERTE(m_pPatchpointInfoFromRuntime != NULL);
pCode->EmitLDC((DWORD_PTR)m_pPatchpointInfoFromRuntime->GetTier0EntryPoint());
-#else
+#else // !FEATURE_ON_STACK_REPLACEMENT
_ASSERTE(!"Unexpected optimization tier with OSR disabled");
-#endif
+#endif // FEATURE_ON_STACK_REPLACEMENT
}
else
+#endif // FEATURE_TIERED_COMPILATION
{
{
m_finalCodeAddressSlot = (PCODE*)amTracker.Track(m_pMethodBeingCompiled->GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(PCODE))));
@@ -14725,7 +14739,8 @@ CORINFO_METHOD_HANDLE CEEJitInfo::getAsyncResumptionStub()
amTracker.SuppressRelease();
- const char* optimizationTierName = nullptr;
+ const char* optimizationTierName = "UnknownTier";
+#ifdef FEATURE_TIERED_COMPILATION
switch (ncv.GetOptimizationTier())
{
case NativeCodeVersion::OptimizationTier0: optimizationTierName = "Tier0"; break;
@@ -14734,8 +14749,9 @@ CORINFO_METHOD_HANDLE CEEJitInfo::getAsyncResumptionStub()
case NativeCodeVersion::OptimizationTierOptimized: optimizationTierName = "Optimized"; break;
case NativeCodeVersion::OptimizationTier0Instrumented: optimizationTierName = "Tier0Instrumented"; break;
case NativeCodeVersion::OptimizationTier1Instrumented: optimizationTierName = "Tier1Instrumented"; break;
- default: optimizationTierName = "UnknownTier"; break;
+ default: break;
}
+#endif // FEATURE_TIERED_COMPILATION
char name[256];
int numWritten = sprintf_s(name, ARRAY_SIZE(name), "IL_STUB_AsyncResume_%s_%s", m_pMethodBeingCompiled->GetName(), optimizationTierName);
diff --git a/src/coreclr/vm/loaderallocator.cpp b/src/coreclr/vm/loaderallocator.cpp
index 58740770b2b644..f02cd07af6ee6e 100644
--- a/src/coreclr/vm/loaderallocator.cpp
+++ b/src/coreclr/vm/loaderallocator.cpp
@@ -1219,9 +1219,11 @@ void LoaderAllocator::Init(BYTE *pExecutableHeapMemory)
&s_stubPrecodeHeapConfig);
#endif // defined(FEATURE_STUBPRECODE_DYNAMIC_HELPERS) && defined(FEATURE_READYTORUN)
+#ifdef HAS_FIXUP_PRECODE
m_pFixupPrecodeHeap = new (&m_FixupPrecodeHeapInstance) InterleavedLoaderHeap(&m_fixupPrecodeRangeList,
false /* fUnlocked */,
&s_fixupStubPrecodeHeapConfig);
+#endif // HAS_FIXUP_PRECODE
// Initialize the EE marshaling data to NULL.
m_pMarshalingData = NULL;
@@ -1242,7 +1244,7 @@ void LoaderAllocator::Init(BYTE *pExecutableHeapMemory)
{
m_callCountingManager = new CallCountingManager();
}
-#endif
+#endif // FEATURE_TIERED_COMPILATION
}
diff --git a/src/coreclr/vm/method.cpp b/src/coreclr/vm/method.cpp
index 769a5a0d720c95..32bd8222e8577f 100644
--- a/src/coreclr/vm/method.cpp
+++ b/src/coreclr/vm/method.cpp
@@ -2107,6 +2107,10 @@ PCODE MethodDesc::TryGetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags
_ASSERTE((accessFlags & ~CORINFO_ACCESS_LDFTN) == 0);
}
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+ return GetPortableEntryPoint();
+
+#else // !FEATURE_PORTABLE_ENTRYPOINTS
if (RequiresStableEntryPoint() && !HasStableEntryPoint())
GetOrCreatePrecode();
@@ -2168,6 +2172,7 @@ PCODE MethodDesc::TryGetMultiCallableAddrOfCode(CORINFO_ACCESS_FLAGS accessFlags
//
return GetTemporaryEntryPoint();
}
+#endif // !FEATURE_PORTABLE_ENTRYPOINTS
}
//*******************************************************************************
@@ -2202,13 +2207,18 @@ PCODE MethodDesc::GetCallTarget(OBJECTREF* pThisObj, TypeHandle ownerType)
MethodDesc* NonVirtualEntry2MethodDesc(PCODE entryPoint)
{
- CONTRACTL {
+ CONTRACTL
+ {
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+ return PortableEntryPoint::GetMethodDesc(PCODEToPINSTR(entryPoint));
+
+#else // FEATURE_PORTABLE_ENTRYPOINTS
RangeSection* pRS = ExecutionManager::FindCodeRange(entryPoint, ExecutionManager::GetScanFlags());
if (pRS == NULL)
{
@@ -2242,6 +2252,7 @@ MethodDesc* NonVirtualEntry2MethodDesc(PCODE entryPoint)
// We should never get here
_ASSERTE(!"NonVirtualEntry2MethodDesc failed");
return NULL;
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
}
#ifndef DACCESS_COMPILE
@@ -2636,12 +2647,16 @@ MethodDesc* MethodDesc::GetMethodDescFromPrecode(PCODE addr, BOOL fSpeculative /
MethodDesc* pMD = NULL;
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+ pMD = PortableEntryPoint::GetMethodDesc(PCODEToPINSTR(addr));
+
+#else // !FEATURE_PORTABLE_ENTRYPOINTS
PTR_Precode pPrecode = Precode::GetPrecodeFromEntryPoint(addr, fSpeculative);
_ASSERTE(fSpeculative || (pPrecode != NULL));
if (pPrecode != NULL)
- {
pMD = pPrecode->GetMethodDesc(fSpeculative);
- }
+
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
RETURN(pMD);
}
@@ -2735,12 +2750,24 @@ void MethodDesc::EnsureTemporaryEntryPointCore(AllocMemTracker *pamTracker)
PTR_PCODE pSlot = GetAddrOfSlot();
AllocMemTracker amt;
- AllocMemTracker *pamTrackerPrecode = pamTracker != NULL ? pamTracker : &amt;
+ AllocMemTracker* pamTrackerPrecode = pamTracker != NULL ? pamTracker : &amt;
+
+ PCODE entryPoint;
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+ PortableEntryPoint* portableEntryPoint = (PortableEntryPoint*)pamTrackerPrecode->Track(
+ GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T{ sizeof(PortableEntryPoint) }));
+ portableEntryPoint->Init(this);
+ entryPoint = (PCODE)portableEntryPoint;
+
+#else // !FEATURE_PORTABLE_ENTRYPOINTS
Precode* pPrecode = Precode::Allocate(GetPrecodeType(), this, GetLoaderAllocator(), pamTrackerPrecode);
+ entryPoint = pPrecode->GetEntryPoint();
+
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
IfFailThrow(EnsureCodeDataExists(pamTracker));
- if (InterlockedCompareExchangeT(&m_codeData->TemporaryEntryPoint, pPrecode->GetEntryPoint(), (PCODE)NULL) == (PCODE)NULL)
+ if (InterlockedCompareExchangeT(&m_codeData->TemporaryEntryPoint, entryPoint, (PCODE)NULL) == (PCODE)NULL)
amt.SuppressRelease(); // We only need to suppress the release if we are working with a MethodDesc which is not newly allocated
PCODE tempEntryPoint = m_codeData->TemporaryEntryPoint;
@@ -2754,6 +2781,28 @@ void MethodDesc::EnsureTemporaryEntryPointCore(AllocMemTracker *pamTracker)
}
}
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+void MethodDesc::EnsurePortableEntryPoint()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // The portable entry point is currently the same as the
+ // temporary entry point.
+ EnsureTemporaryEntryPoint();
+
+ SetStableEntryPointInterlocked(GetPortableEntryPoint());
+}
+
+PCODE MethodDesc::GetPortableEntryPoint()
+{
+ WRAPPER_NO_CONTRACT;
+
+ // The portable entry point is currently the same as the
+ // temporary entry point.
+ return GetTemporaryEntryPoint();
+}
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
+
//*******************************************************************************
void MethodDescChunk::DetermineAndSetIsEligibleForTieredCompilation()
{
@@ -2827,12 +2876,14 @@ Precode* MethodDesc::GetOrCreatePrecode()
#ifdef _DEBUG
PTR_PCODE pSlot = GetAddrOfSlot();
+ _ASSERTE(*pSlot != (PCODE)NULL);
+ _ASSERTE(*pSlot == tempEntry);
+#ifndef FEATURE_PORTABLE_ENTRYPOINTS
PrecodeType requiredType = GetPrecodeType();
PrecodeType availableType = Precode::GetPrecodeFromEntryPoint(tempEntry)->GetType();
_ASSERTE(requiredType == availableType);
- _ASSERTE(*pSlot != (PCODE)NULL);
- _ASSERTE(*pSlot == tempEntry);
-#endif
+#endif // !FEATURE_PORTABLE_ENTRYPOINTS
+#endif // _DEBUG
// Set the flags atomically
InterlockedUpdateFlags3(enum_flag3_HasStableEntryPoint | enum_flag3_HasPrecode, TRUE);
@@ -2845,10 +2896,14 @@ void MethodDesc::MarkPrecodeAsStableEntrypoint()
#if _DEBUG
PCODE tempEntry = GetTemporaryEntryPointIfExists();
_ASSERTE(tempEntry != (PCODE)NULL);
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+ _ASSERTE(PortableEntryPoint::GetMethodDesc(PCODEToPINSTR(tempEntry)) == this);
+#else // !FEATURE_PORTABLE_ENTRYPOINTS
PrecodeType requiredType = GetPrecodeType();
PrecodeType availableType = Precode::GetPrecodeFromEntryPoint(tempEntry)->GetType();
_ASSERTE(requiredType == availableType);
-#endif
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
+#endif // _DEBUG
_ASSERTE(!HasPrecode());
_ASSERTE(RequiresStableEntryPoint());
diff --git a/src/coreclr/vm/method.hpp b/src/coreclr/vm/method.hpp
index dc138254d93965..9bd1f5889e6fa3 100644
--- a/src/coreclr/vm/method.hpp
+++ b/src/coreclr/vm/method.hpp
@@ -1574,6 +1574,13 @@ class MethodDesc
// OR must be set to point to the same AllocMemTracker that controls allocation of the MethodDesc
void EnsureTemporaryEntryPointCore(AllocMemTracker *pamTracker);
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+ // Ensure that the portable entrypoint is allocated, and the slot is filled
+ void EnsurePortableEntryPoint();
+
+ PCODE GetPortableEntryPoint();
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
+
//*******************************************************************************
// Returns the address of the native code.
PCODE GetNativeCode();
@@ -3221,7 +3228,11 @@ class PInvokeMethodDesc : public MethodDesc
{
LIMITED_METHOD_DAC_CONTRACT;
+#ifdef HAS_PINVOKE_IMPORT_PRECODE
return m_pImportThunkGlue;
+#else
+ return &m_ImportThunkGlue;
+#endif // HAS_PINVOKE_IMPORT_PRECODE
}
LPVOID GetPInvokeTarget()
@@ -3241,7 +3252,7 @@ class PInvokeMethodDesc : public MethodDesc
_ASSERTE(IsPInvoke());
- return (GetPInvokeTarget() == GetPInvokeImportThunkGlue()->GetEntrypoint());
+ return (GetPInvokeTarget() == GetPInvokeImportThunkGlue()->GetEntryPoint());
}
#endif // !DACCESS_COMPILE
diff --git a/src/coreclr/vm/methodtablebuilder.cpp b/src/coreclr/vm/methodtablebuilder.cpp
index 1e8b5c87893c2c..f6b43b82d9d245 100644
--- a/src/coreclr/vm/methodtablebuilder.cpp
+++ b/src/coreclr/vm/methodtablebuilder.cpp
@@ -4311,7 +4311,7 @@ VOID MethodTableBuilder::InitializeFieldDescs(FieldDesc *pFieldDescList,
fIsByValue = FALSE; // we're going to treat it as the underlying type now
goto GOT_ELEMENT_TYPE;
}
-
+
// #FieldDescTypeMorph IF it is an enum, strip it down to its underlying type
if (!fIsStatic && pByValueClass->IsEnum())
{
@@ -6169,7 +6169,7 @@ MethodTableBuilder::InitMethodDesc(
pNewNMD->SetIsEarlyBound();
}
- pNewNMD->m_pPInvokeTarget = pNewNMD->GetPInvokeImportThunkGlue()->GetEntrypoint();
+ pNewNMD->m_pPInvokeTarget = pNewNMD->GetPInvokeImportThunkGlue()->GetEntryPoint();
}
break;
diff --git a/src/coreclr/vm/precode.cpp b/src/coreclr/vm/precode.cpp
index d5d8cffdec6487..eb682e07d17004 100644
--- a/src/coreclr/vm/precode.cpp
+++ b/src/coreclr/vm/precode.cpp
@@ -7,6 +7,7 @@
// Stub that runs before the actual native code
//
+#ifndef FEATURE_PORTABLE_ENTRYPOINTS
#include "common.h"
#include "dllimportcallback.h"
@@ -134,9 +135,6 @@ MethodDesc* Precode::GetMethodDesc(BOOL fSpeculative /*= FALSE*/)
TADDR pMD = (TADDR)NULL;
PrecodeType precodeType = GetType();
-#ifdef TARGET_WASM
- pMD = *(TADDR*)(m_data + OFFSETOF_PRECODE_MD);
-#else
switch (precodeType)
{
case PRECODE_STUB:
@@ -169,7 +167,6 @@ MethodDesc* Precode::GetMethodDesc(BOOL fSpeculative /*= FALSE*/)
default:
break;
}
-#endif // TARGET_WASM
if (pMD == (TADDR)NULL)
{
@@ -322,11 +319,8 @@ void Precode::Init(Precode* pPrecodeRX, PrecodeType t, MethodDesc* pMD, LoaderAl
{
LIMITED_METHOD_CONTRACT;
-#ifdef TARGET_WASM
- m_data[OFFSETOF_PRECODE_TYPE] = t;
- *(TADDR*)(m_data + OFFSETOF_PRECODE_MD) = (TADDR)pMD;
-#else
- switch (t) {
+ switch (t)
+ {
case PRECODE_STUB:
((StubPrecode*)this)->Init((StubPrecode*)pPrecodeRX, (TADDR)pMD, pLoaderAllocator);
break;
@@ -349,7 +343,6 @@ void Precode::Init(Precode* pPrecodeRX, PrecodeType t, MethodDesc* pMD, LoaderAl
UnexpectedPrecodeType("Precode::Init", t);
break;
}
-#endif
_ASSERTE(IsValidType(GetType()));
}
@@ -571,8 +564,6 @@ void StubPrecode::StaticInitialize()
}
#undef ENUM_PAGE_SIZE
-#elif defined(TARGET_WASM)
- // StubPrecode is not implemented on WASM
#else
_ASSERTE((SIZE_T)((BYTE*)StubPrecodeCode_End - (BYTE*)StubPrecodeCode) <= StubPrecode::CodeSize);
#endif
@@ -587,7 +578,6 @@ void StubPrecode::StaticInitialize()
void StubPrecode::GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size_t pageSize)
{
-#ifndef TARGET_WASM
int totalCodeSize = (int)(pageSize / StubPrecode::CodeSize) * StubPrecode::CodeSize;
#ifdef TARGET_X86
for (int i = 0; i < totalCodeSize; i += StubPrecode::CodeSize)
@@ -610,7 +600,6 @@ void StubPrecode::GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size_
_ASSERTE(StubPrecode::IsStubPrecodeByASM_DAC((PCODE)(pageBaseRX + i)));
}
#endif // _DEBUG
-#endif // TARGET_WASM
}
BOOL StubPrecode::IsStubPrecodeByASM(PCODE addr)
@@ -725,8 +714,6 @@ void FixupPrecode::StaticInitialize()
// This should fail if the template is used on a platform which doesn't support the supported page size for templates
ThrowHR(COR_E_EXECUTIONENGINE);
}
-#elif defined(TARGET_WASM)
- // FixupPrecode is not implemented on WASM
#else
_ASSERTE((SIZE_T)((BYTE*)FixupPrecodeCode_End - (BYTE*)FixupPrecodeCode) <= FixupPrecode::CodeSize);
#endif
@@ -740,7 +727,6 @@ void FixupPrecode::StaticInitialize()
void FixupPrecode::GenerateDataPage(uint8_t* pageBase, size_t pageSize)
{
-#ifndef TARGET_WASM
// Fill in the data page such that the target of the fixup precode starts as initialized to point
// to the start of the precode itself, so that before the memory for the precode is initialized,
// the precode is in a state where it will loop forever.
@@ -759,15 +745,12 @@ void FixupPrecode::GenerateDataPage(uint8_t* pageBase, size_t pageSize)
PCODE* ppTargetSlot = (PCODE*)(pageBase + i + offsetof(FixupPrecodeData, Target));
*ppTargetSlot = ((Precode*)(pageBase - pageSize + i))->GetEntryPoint();
}
-#endif // !TARGET_WASM
}
void FixupPrecode::GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size_t pageSize)
{
-#ifndef TARGET_WASM
int totalCodeSize = (int)((pageSize / FixupPrecode::CodeSize) * FixupPrecode::CodeSize);
#ifdef TARGET_X86
-
for (int i = 0; i < totalCodeSize; i += FixupPrecode::CodeSize)
{
memcpy(pageBase + i, (const void*)FixupPrecodeCode, FixupPrecode::CodeSize);
@@ -790,7 +773,6 @@ void FixupPrecode::GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size
_ASSERTE(FixupPrecode::IsFixupPrecodeByASM_DAC((PCODE)(pageBaseRX + i)));
}
#endif // _DEBUG
-#endif // !TARGET_WASM
}
BOOL FixupPrecode::IsFixupPrecodeByASM(PCODE addr)
@@ -967,3 +949,5 @@ BOOL StubPrecode::IsStubPrecodeByASM(PCODE addr)
return TRUE;
}
+
+#endif // !FEATURE_PORTABLE_ENTRYPOINTS
diff --git a/src/coreclr/vm/precode.h b/src/coreclr/vm/precode.h
index a42539a264edf7..1118faf86dfa05 100644
--- a/src/coreclr/vm/precode.h
+++ b/src/coreclr/vm/precode.h
@@ -9,6 +9,12 @@
#ifndef __PRECODE_H__
#define __PRECODE_H__
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+
+#include "precode_portable.hpp"
+
+#else // !FEATURE_PORTABLE_ENTRYPOINTS
+
#define PRECODE_ALIGNMENT sizeof(void*)
#if defined(TARGET_AMD64)
@@ -17,8 +23,6 @@
#elif defined(TARGET_X86)
-EXTERN_C VOID STDCALL PrecodeRemotingThunk();
-
#define SIZEOF_PRECODE_BASE 8
#elif defined(TARGET_ARM64)
@@ -37,12 +41,6 @@ EXTERN_C VOID STDCALL PrecodeRemotingThunk();
#define SIZEOF_PRECODE_BASE CODE_SIZE_ALIGN
-#elif defined(TARGET_WASM)
-
-// on wasm we have "fake" precode, with precode type and MethodDesc information stored
-#define SIZEOF_PRECODE_BASE 2*sizeof(void*)
-#define OFFSETOF_PRECODE_TYPE 0
-#define OFFSETOF_PRECODE_MD 4
#endif // TARGET_AMD64
#ifndef DACCESS_COMPILE
@@ -88,7 +86,7 @@ typedef DPTR(class UMEntryThunk) PTR_UMEntryThunk;
struct InterpreterPrecode;
typedef DPTR(InterpreterPrecode) PTR_InterpreterPrecode;
-#endif
+#endif // FEATURE_INTERPRETER
// Regular precode
struct StubPrecode
@@ -106,8 +104,6 @@ struct StubPrecode
static const SIZE_T CodeSize = 24;
#elif defined(TARGET_RISCV64)
static const SIZE_T CodeSize = 24;
-#elif defined(TARGET_WASM)
- static const SIZE_T CodeSize = 3*sizeof(void*);
#endif // TARGET_AMD64
BYTE m_code[CodeSize];
@@ -219,7 +215,7 @@ struct PInvokeImportPrecode : StubPrecode
void Init(PInvokeImportPrecode* pPrecodeRX, MethodDesc* pMD, LoaderAllocator *pLoaderAllocator);
- LPVOID GetEntrypoint()
+ LPVOID GetEntryPoint()
{
LIMITED_METHOD_CONTRACT;
return (LPVOID)PINSTRToPCODE(dac_cast(this));
@@ -254,7 +250,7 @@ struct ThisPtrRetBufPrecode : StubPrecode
return dac_cast(StubPrecode::GetData()->SecretParam);
}
- LPVOID GetEntrypoint()
+ LPVOID GetEntryPoint()
{
LIMITED_METHOD_CONTRACT;
return (LPVOID)PINSTRToPCODE(dac_cast(this));
@@ -399,9 +395,6 @@ struct FixupPrecode
#elif defined(TARGET_RISCV64)
static const SIZE_T CodeSize = 32;
static const int FixupCodeOffset = 10;
-#elif defined(TARGET_WASM)
- static const SIZE_T CodeSize = 2*sizeof(void*);
- static const int FixupCodeOffset = 0;
#endif // TARGET_AMD64
BYTE m_code[CodeSize];
@@ -548,10 +541,6 @@ inline BYTE StubPrecode::GetType()
LIMITED_METHOD_DAC_CONTRACT;
TADDR type = GetData()->Type;
-#ifdef TARGET_WASM
- return (BYTE)type;
-#endif
-
// There are a limited number of valid bit patterns here. Restrict to those, so that the
// speculative variant of GetPrecodeFromEntryPoint is more robust. Type is stored as a TADDR
// so that a single byte matching is not enough to cause a false match.
@@ -649,9 +638,7 @@ class Precode {
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
-#ifdef TARGET_WASM // WASM-TODO: we will not need this once we have real precode on Wasm
- return (PrecodeType)m_data[OFFSETOF_PRECODE_TYPE];
-#endif
+
PrecodeType basicPrecodeType = PRECODE_INVALID;
if (StubPrecode::IsStubPrecodeByASM(PINSTRToPCODE(dac_cast(this))))
{
@@ -768,10 +755,6 @@ class Precode {
fSpeculative = TRUE;
#endif
-#ifdef TARGET_WASM // WASM-TODO: we will not need this once we have real precode on Wasm
- return (PTR_Precode)addr;
-#endif
-
TADDR pInstr = PCODEToPINSTR(addr);
// Always do consistency check in debug
@@ -883,4 +866,6 @@ extern InterleavedLoaderHeapConfig s_stubPrecodeHeapConfig;
extern InterleavedLoaderHeapConfig s_fixupStubPrecodeHeapConfig;
#endif
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
+
#endif // __PRECODE_H__
diff --git a/src/coreclr/vm/precode_portable.cpp b/src/coreclr/vm/precode_portable.cpp
new file mode 100644
index 00000000000000..317add07b551a5
--- /dev/null
+++ b/src/coreclr/vm/precode_portable.cpp
@@ -0,0 +1,237 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+//
+
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+
+#include "common.h"
+#include "precode_portable.hpp"
+
+#ifdef HOST_64BIT
+ #define CANARY_VALUE 0x1234567812345678
+#else // HOST_64BIT
+ #define CANARY_VALUE 0x12345678
+#endif // HOST_64BIT
+
+bool PortableEntryPoint::IsNativeEntryPoint(TADDR addr)
+{
+ STANDARD_VM_CONTRACT;
+
+ return false;
+}
+
+void* PortableEntryPoint::GetActualCode(TADDR addr)
+{
+ STANDARD_VM_CONTRACT;
+
+ PortableEntryPoint* portableEntryPoint = ToPortableEntryPoint(addr);
+ _ASSERTE(portableEntryPoint->_pActualCode != NULL);
+ return portableEntryPoint->_pActualCode;
+}
+
+MethodDesc* PortableEntryPoint::GetMethodDesc(TADDR addr)
+{
+ STANDARD_VM_CONTRACT;
+
+ PortableEntryPoint* portableEntryPoint = ToPortableEntryPoint(addr);
+ _ASSERTE(portableEntryPoint->_pMD != NULL);
+ return portableEntryPoint->_pMD;
+}
+
+void* PortableEntryPoint::GetInterpreterData(TADDR addr)
+{
+ STANDARD_VM_CONTRACT;
+
+ PortableEntryPoint* portableEntryPoint = ToPortableEntryPoint(addr);
+ _ASSERTE(portableEntryPoint->_pInterpreterData != NULL);
+ return portableEntryPoint->_pInterpreterData;
+}
+
+void PortableEntryPoint::SetInterpreterData(TADDR addr, PCODE interpreterData)
+{
+ STANDARD_VM_CONTRACT;
+
+ PortableEntryPoint* portableEntryPoint = ToPortableEntryPoint(addr);
+ _ASSERTE(portableEntryPoint->_pInterpreterData == NULL);
+ portableEntryPoint->_pInterpreterData = (void*)PCODEToPINSTR(interpreterData);
+}
+
+PortableEntryPoint* PortableEntryPoint::ToPortableEntryPoint(TADDR addr)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(addr != NULL);
+
+ PortableEntryPoint* portableEntryPoint = (PortableEntryPoint*)addr;
+ _ASSERTE(portableEntryPoint->_canary == CANARY_VALUE);
+ return portableEntryPoint;
+}
+
+void PortableEntryPoint::Init(MethodDesc* pMD)
+{
+ LIMITED_METHOD_CONTRACT;
+ _pActualCode = NULL;
+ _pMD = pMD;
+ _pInterpreterData = NULL;
+ INDEBUG(_canary = CANARY_VALUE);
+}
+
+InterleavedLoaderHeapConfig s_stubPrecodeHeapConfig;
+
+void StubPrecode::Init(StubPrecode* pPrecodeRX, TADDR secretParam, LoaderAllocator *pLoaderAllocator, TADDR type, TADDR target)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"StubPrecode::Init is not supported with Portable EntryPoints");
+}
+
+BYTE StubPrecode::GetType()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"StubPrecode::GetType is not supported with Portable EntryPoints");
+ return 0;
+}
+
+void StubPrecode::SetTargetUnconditional(TADDR target)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"StubPrecode::SetTargetUnconditional is not supported with Portable EntryPoints");
+}
+
+TADDR StubPrecode::GetSecretParam() const
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"StubPrecode::GetSecretParam is not supported with Portable EntryPoints");
+ return (TADDR)NULL;
+}
+
+MethodDesc* StubPrecode::GetMethodDesc()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"StubPrecode::GetMethodDesc is not supported with Portable EntryPoints");
+ return NULL;
+}
+
+PCODE* FixupPrecode::GetTargetSlot()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"FixupPrecode::GetTargetSlot is not supported with Portable EntryPoints");
+ return NULL;
+}
+
+MethodDesc* FixupPrecode::GetMethodDesc()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"FixupPrecode::GetMethodDesc is not supported with Portable EntryPoints");
+ return NULL;
+}
+
+Precode* Precode::Allocate(PrecodeType t, MethodDesc* pMD,
+ LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::Allocate is not supported with Portable EntryPoints");
+ return NULL;
+}
+
+Precode* Precode::GetPrecodeFromEntryPoint(PCODE addr, BOOL fSpeculative)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::GetPrecodeFromEntryPoint is not supported with Portable EntryPoints");
+ return NULL;
+}
+
+PrecodeType Precode::GetType()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::GetType is not supported with Portable EntryPoints");
+ return (PrecodeType)0;
+}
+
+UMEntryThunk* Precode::AsUMEntryThunk()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::AsUMEntryThunk is not supported with Portable EntryPoints");
+ return NULL;
+}
+
+StubPrecode* Precode::AsStubPrecode()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::AsStubPrecode is not supported with Portable EntryPoints");
+ return NULL;
+}
+
+MethodDesc* Precode::GetMethodDesc(BOOL fSpeculative)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::GetMethodDesc is not supported with Portable EntryPoints");
+ return NULL;
+}
+
+PCODE Precode::GetEntryPoint()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::GetEntryPoint is not supported with Portable EntryPoints");
+ return (PCODE)NULL;
+}
+
+BOOL Precode::IsPointingToNativeCode(PCODE pNativeCode)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::IsPointingToNativeCode is not supported with Portable EntryPoints");
+ return FALSE;
+}
+
+void Precode::Reset()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::Reset is not supported with Portable EntryPoints");
+}
+
+PCODE Precode::GetTarget()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::GetTarget is not supported with Portable EntryPoints");
+ return (PCODE)NULL;
+}
+
+void Precode::ResetTargetInterlocked()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::ResetTargetInterlocked is not supported with Portable EntryPoints");
+}
+
+BOOL Precode::SetTargetInterlocked(PCODE target, BOOL fOnlyRedirectFromPrestub)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::SetTargetInterlocked is not supported with Portable EntryPoints");
+ return FALSE;
+}
+
+BOOL Precode::IsPointingToPrestub()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::IsPointingToPrestub is not supported with Portable EntryPoints");
+ return FALSE;
+}
+
+BOOL Precode::IsPointingToPrestub(PCODE target)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"Precode::IsPointingToPrestub is not supported with Portable EntryPoints");
+ return FALSE;
+}
+
+void FlushCacheForDynamicMappedStub(void* code, SIZE_T size)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"FlushCacheForDynamicMappedStub is not supported with Portable EntryPoints");
+}
+
+BOOL DoesSlotCallPrestub(PCODE pCode)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"DoesSlotCallPrestub is not supported with Portable EntryPoints");
+ return FALSE;
+}
+
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
diff --git a/src/coreclr/vm/precode_portable.hpp b/src/coreclr/vm/precode_portable.hpp
new file mode 100644
index 00000000000000..07aec0eff4f5c6
--- /dev/null
+++ b/src/coreclr/vm/precode_portable.hpp
@@ -0,0 +1,119 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+//
+
+#ifndef __PRECODE_PORTABLE_H__
+#define __PRECODE_PORTABLE_H__
+
+#ifndef FEATURE_PORTABLE_ENTRYPOINTS
+#error Requires FEATURE_PORTABLE_ENTRYPOINTS to be set
+#endif // !FEATURE_PORTABLE_ENTRYPOINTS
+
+class PortableEntryPoint final
+{
+public: // static
+ static bool IsNativeEntryPoint(TADDR addr);
+ static void* GetActualCode(TADDR addr);
+ static MethodDesc* GetMethodDesc(TADDR addr);
+ static void* GetInterpreterData(TADDR addr);
+ static void SetInterpreterData(TADDR addr, PCODE interpreterData);
+
+private: // static
+ static PortableEntryPoint* ToPortableEntryPoint(TADDR addr);
+
+private:
+ void* _pActualCode;
+ MethodDesc* _pMD;
+ void* _pInterpreterData;
+
+ // We keep the canary value last to ensure a stable ABI across build flavors
+ INDEBUG(size_t _canary);
+
+public:
+ void Init(MethodDesc* pMD);
+};
+
+extern InterleavedLoaderHeapConfig s_stubPrecodeHeapConfig;
+
+enum PrecodeType
+{
+ PRECODE_INVALID = -100,
+ PRECODE_STUB,
+ PRECODE_UMENTRY_THUNK,
+ PRECODE_FIXUP,
+ PRECODE_PINVOKE_IMPORT,
+ PRECODE_INTERPRETER,
+};
+
+class StubPrecode
+{
+public: // static
+ static const BYTE Type = PRECODE_STUB;
+
+public:
+ void Init(StubPrecode* pPrecodeRX, TADDR secretParam, LoaderAllocator *pLoaderAllocator = NULL, TADDR type = StubPrecode::Type, TADDR target = 0);
+
+ BYTE GetType();
+
+ void SetTargetUnconditional(TADDR target);
+
+ TADDR GetSecretParam() const;
+
+ MethodDesc* GetMethodDesc();
+};
+
+typedef DPTR(StubPrecode) PTR_StubPrecode;
+
+class FixupPrecode final
+{
+public: // static
+ static const int FixupCodeOffset = 0;
+
+public:
+ PCODE* GetTargetSlot();
+
+ MethodDesc* GetMethodDesc();
+};
+
+class UMEntryThunk;
+
+class Precode
+{
+public: // static
+ static Precode* Allocate(PrecodeType t, MethodDesc* pMD,
+ LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker);
+
+ static Precode* GetPrecodeFromEntryPoint(PCODE addr, BOOL fSpeculative = FALSE);
+
+public:
+ PrecodeType GetType();
+
+ UMEntryThunk* AsUMEntryThunk();
+
+ StubPrecode* AsStubPrecode();
+
+ MethodDesc* GetMethodDesc(BOOL fSpeculative = FALSE);
+
+ PCODE GetEntryPoint();
+
+ BOOL IsPointingToNativeCode(PCODE pNativeCode);
+
+ void Reset();
+
+ PCODE GetTarget();
+
+ void ResetTargetInterlocked();
+
+ BOOL SetTargetInterlocked(PCODE target, BOOL fOnlyRedirectFromPrestub = TRUE);
+
+ BOOL IsPointingToPrestub();
+
+ BOOL IsPointingToPrestub(PCODE target);
+};
+
+void FlushCacheForDynamicMappedStub(void* code, SIZE_T size);
+BOOL DoesSlotCallPrestub(PCODE pCode);
+
+class PrecodeMachineDescriptor { };
+
+#endif // __PRECODE_PORTABLE_H__
diff --git a/src/coreclr/vm/prestub.cpp b/src/coreclr/vm/prestub.cpp
index 8ce695d3d50410..0ef2ed8ff2fb29 100644
--- a/src/coreclr/vm/prestub.cpp
+++ b/src/coreclr/vm/prestub.cpp
@@ -994,8 +994,15 @@ PCODE MethodDesc::JitCompileCodeLocked(PrepareCodeConfig* pConfig, COR_ILMETHOD_
#ifdef FEATURE_INTERPRETER
if (*pIsInterpreterCode)
{
+ InterpByteCodeStart* interpreterCode;
+#ifdef FEATURE_PORTABLE_ENTRYPOINTS
+ interpreterCode = (InterpByteCodeStart*)PortableEntryPoint::GetInterpreterData(PCODEToPINSTR(pCode));
+
+#else // !FEATURE_PORTABLE_ENTRYPOINTS
InterpreterPrecode* pPrecode = InterpreterPrecode::FromEntryPoint(pCode);
- InterpByteCodeStart* interpreterCode = dac_cast(pPrecode->GetData()->ByteCodeAddr);
+ interpreterCode = dac_cast(pPrecode->GetData()->ByteCodeAddr);
+#endif // FEATURE_PORTABLE_ENTRYPOINTS
+
pConfig->GetMethodDesc()->SetInterpreterCode(interpreterCode);
}
#endif // FEATURE_INTERPRETER
diff --git a/src/coreclr/vm/tieredcompilation.cpp b/src/coreclr/vm/tieredcompilation.cpp
index 8f1b36eeac43ec..5a66f26ef328b3 100644
--- a/src/coreclr/vm/tieredcompilation.cpp
+++ b/src/coreclr/vm/tieredcompilation.cpp
@@ -130,6 +130,16 @@ NativeCodeVersion::OptimizationTier TieredCompilationManager::GetInitialOptimiza
#endif
}
+bool TieredCompilationManager::IsTieringDelayActive()
+{
+ LIMITED_METHOD_CONTRACT;
+#if defined(FEATURE_TIERED_COMPILATION)
+ return m_methodsPendingCountingForTier1 != nullptr;
+#else
+ return false;
+#endif // FEATURE_TIERED_COMPILATION
+}
+
#if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE)
void TieredCompilationManager::HandleCallCountingForFirstCall(MethodDesc* pMethodDesc)
@@ -574,12 +584,6 @@ void TieredCompilationManager::BackgroundWorkerStart()
}
}
-bool TieredCompilationManager::IsTieringDelayActive()
-{
- LIMITED_METHOD_CONTRACT;
- return m_methodsPendingCountingForTier1 != nullptr;
-}
-
bool TieredCompilationManager::TryDeactivateTieringDelay()
{
CONTRACTL
diff --git a/src/coreclr/vm/tieredcompilation.h b/src/coreclr/vm/tieredcompilation.h
index b39ef788a72e12..775240e8193a2c 100644
--- a/src/coreclr/vm/tieredcompilation.h
+++ b/src/coreclr/vm/tieredcompilation.h
@@ -37,6 +37,8 @@ class TieredCompilationManager
public:
static NativeCodeVersion::OptimizationTier GetInitialOptimizationTier(PTR_MethodDesc pMethodDesc);
+ bool IsTieringDelayActive();
+
#ifdef FEATURE_TIERED_COMPILATION
public:
@@ -66,7 +68,6 @@ class TieredCompilationManager
bool TryDeactivateTieringDelay();
public:
- bool IsTieringDelayActive();
void AsyncCompleteCallCounting();
private:
@@ -76,7 +77,7 @@ class TieredCompilationManager
private:
void OptimizeMethod(NativeCodeVersion nativeCodeVersion);
HRESULT DeoptimizeMethodHelper(Module* pModule, mdMethodDef methodDef);
-
+
NativeCodeVersion GetNextMethodToOptimize();
BOOL CompileCodeVersion(NativeCodeVersion nativeCodeVersion);
void ActivateCodeVersion(NativeCodeVersion nativeCodeVersion);
diff --git a/src/coreclr/vm/util.hpp b/src/coreclr/vm/util.hpp
index 61a948dfb6e232..087a7398012a72 100644
--- a/src/coreclr/vm/util.hpp
+++ b/src/coreclr/vm/util.hpp
@@ -427,11 +427,11 @@ extern LockOwner g_lockTrustMeIAmThreadSafe;
class EEThreadId
{
private:
- void *m_FiberPtrId;
+ static SIZE_T const UNKNOWN_ID = INVALID_POINTER_CD;
+ SIZE_T m_FiberPtrId;
public:
#ifdef _DEBUG
- EEThreadId()
- : m_FiberPtrId(NULL)
+ EEThreadId() : m_FiberPtrId(UNKNOWN_ID)
{
LIMITED_METHOD_CONTRACT;
}
@@ -441,28 +441,27 @@ class EEThreadId
{
WRAPPER_NO_CONTRACT;
- m_FiberPtrId = ClrTeb::GetFiberPtrId();
+ m_FiberPtrId = (SIZE_T)ClrTeb::GetFiberPtrId();
}
bool IsCurrentThread() const
{
WRAPPER_NO_CONTRACT;
- return (m_FiberPtrId == ClrTeb::GetFiberPtrId());
+ return (m_FiberPtrId == (SIZE_T)ClrTeb::GetFiberPtrId());
}
-
#ifdef _DEBUG
bool IsUnknown() const
{
LIMITED_METHOD_CONTRACT;
- return m_FiberPtrId == NULL;
+ return m_FiberPtrId == UNKNOWN_ID;
}
#endif
void Clear()
{
LIMITED_METHOD_CONTRACT;
- m_FiberPtrId = NULL;
+ m_FiberPtrId = UNKNOWN_ID;
}
};
diff --git a/src/coreclr/vm/wasm/cgencpu.h b/src/coreclr/vm/wasm/cgencpu.h
index 975f892a17f1f1..e36a95297127b6 100644
--- a/src/coreclr/vm/wasm/cgencpu.h
+++ b/src/coreclr/vm/wasm/cgencpu.h
@@ -14,12 +14,6 @@
#define CODE_SIZE_ALIGN 4
#define LOG2SLOT LOG2_PTRSIZE
-// looks like this is mandatory for now
-#define HAS_PINVOKE_IMPORT_PRECODE 1
-#define HAS_FIXUP_PRECODE 1
-// ThisPtrRetBufPrecode one is necessary for closed delegates over static methods with return buffer
-#define HAS_THISPTR_RETBUF_PRECODE 1
-
#define BACK_TO_BACK_JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a back to back jump instruction
//**********************************************************************